gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
from math import pi, degrees, radians, sin, cos, atan2, asin, fabs
from mymath import pfmod
from angle import Angle, Longitude, Latitude
from coordinates import Ecliptical, Equatorial
from calendar import Date, Time, JulianDayNumber
from constants import epoch_j2000, julian_century
from ecliptic import Ecliptic
class Sun:
def __init__(self, jdn_):
assert isinstance(jdn_, JulianDayNumber), \
'jdn_ should be a JulianDayNumber'
two_pi = 2*pi
T = (jdn_.jdn - epoch_j2000.jdn)/julian_century
# Mean Longitude referred to the mean equinox
L_0 = 280.46646 + (36000.76983 + 0.0003032*T)*T
L_0 = pfmod(L_0, 360)
L_0 = radians(L_0)
# Mean anomaly
M = 357.52911 + (35999.05029 - 0.0001537*T)*T
M = pfmod(M, 360)
M = radians(M)
# Eccentricity of the Earth's orbit
e = 0.016708634 - (0.000042037 + 0.0000001267*T)*T
# Equation of the center
C = (1.914602 - (0.004817 + 0.000014*T)*T)*sin(M) \
+ (0.019993 - 0.000101*T)*sin(2*M) \
+ 0.000289*sin(3*M)
C = pfmod(C, 360)
C = radians(C)
# True geometric Longitude referred to the mean equinox of the date
self.L = pfmod(L_0 + C, two_pi)
# True geometric latitude referred to the mean equinox
self.beta = 0
# True anomaly
self.v = pfmod(M + C, two_pi)
# Radius vector
self.R = 1.000001018*(1-e**2)/(1 + e*cos(self.v))
# Longitude of ascending node (see more accurate expression in ecliptic)
Omega = radians(125.04 - 1934.136*T)
# Apparent Longitude referred to the *true* equinox of the date
self.lambda_apparent = degrees(self.L) - 0.00569 - 0.00478*sin(Omega)
self.lambda_apparent = pfmod(self.lambda_apparent, 360)
self.lambda_apparent = radians(self.lambda_apparent)
# Apparent Latitude referred to the *true* equinox of the date
self.beta_apparent = 0
# Equatorial coordinates referred to the mean equinox of the date
ecliptic = Ecliptic(jdn_)
w_uncorrected = ecliptic.get_obliquity_uncorrected().rads
self.alpha = atan2(cos(w_uncorrected)*sin(self.L), cos(self.L))
self.alpha = pfmod(self.alpha, two_pi)
self.delta = asin(sin(w_uncorrected)*sin(self.L))
self.delta = pfmod(self.delta, two_pi)
# Equatorial coordinates referred to the *true* equinox of the date
w_corrected = w_uncorrected + radians(0.00256*cos(Omega))
self.alpha_apparent = \
atan2(cos(w_corrected)*sin(self.lambda_apparent), cos(self.L))
self.alpha_apparent = pfmod(self.alpha_apparent, two_pi)
self.delta_apparent = asin(sin(w_corrected)*sin(self.lambda_apparent))
self.delta_apparent = pfmod(self.delta_apparent, two_pi)
# Rectangular Coordinates referred to the mean equinox of the date
self.X = self.R*cos(self.beta)*cos(self.L)
self.Y = self.R*(cos(self.beta)*sin(self.L)*cos(w_uncorrected) \
- sin(self.beta)*sin(w_uncorrected))
self.Z = self.R*(cos(self.beta)*sin(self.L)*sin(w_uncorrected) \
+ sin(self.beta)*cos(w_uncorrected))
def get_ecliptical(self):
return Ecliptical(Longitude(self.L), Latitude(0))
def get_ecliptical_apparent(self):
return Ecliptical(Longitude(self.lambda_apparent), Latitude(0))
def get_equatorial(self):
return Equatorial(Longitude(self.alpha), Latitude(self.delta))
def get_equatorial_apparent(self):
return Equatorial(Longitude(self.alpha_apparent),
Latitude(self.delta_apparent))
def get_rectangular(self):
return (self.X, self.Y, self.Z)
########
# Table of equations for use with equinox and solstice computation
# for the years before +1000
# Note: Y = year/1000
########
spring_table27a = \
lambda Y: (((-0.00071*Y +0.00111)*Y +0.06134)*Y +365242.13740)*Y \
+ 1721139.29189
summer_table27a = \
lambda Y: (((0.00025*Y +0.00907)*Y -0.05323)*Y +365241.72562)*Y \
+ 1721233.25401
autumn_table27a = \
lambda Y: (((0.00074*Y -0.00297)*Y -0.11677)*Y +365242.49558)*Y \
+ 1721325.70455
winter_table27a = \
lambda Y: (((-0.00006*Y -0.00933)*Y -0.00769)*Y +365242.88257)*Y \
+ 1721414.39987
table27a = (
spring_table27a, # Spring Equinox
summer_table27a, # Summer Solstice
autumn_table27a, # Autumn Equinox
winter_table27a # Winter Solstice
)
########
# Table of equations for use with equinox and solstice computation
# for the years after +1000
# Note: Y = (year-2000)/1000
########
spring_table27b = \
lambda Y: (((-0.00057*Y -0.00411)*Y +0.05169)*Y +365242.37404)*Y \
+ 2451623.80984
summer_table27b = \
lambda Y: (((-0.00030*Y +0.00888)*Y + 0.00325)*Y +365241.62603)*Y \
+ 2451716.56767
autumn_table27b = \
lambda Y: (((0.00078*Y +0.00337)*Y -0.11575)*Y +365242.01767)*Y \
+ 2451810.21715
winter_table27b = \
lambda Y: (((0.00032*Y -0.00823)*Y -0.06223)*Y +365242.74049)*Y \
+ 2451900.05952
table27b = (
spring_table27b, # Spring Equinox
summer_table27b, # Summer Solstice
autumn_table27b, # Autumn Equinox
winter_table27b # Winter Solstice
)
########
# Table of coefficients for use with equinox and solstice computation
########
table27c = (
(485, 324.96, 1934.136),
(203, 337.23, 32964.467),
(199, 342.08, 20.186),
(182, 27.85, 445267.112),
(156, 73.14, 45036.886),
(136, 171.52, 22518.443),
( 77, 222.54, 65928.934),
( 74, 296.72, 3034.906),
( 70, 243.58, 9037.513),
( 58, 119.81, 33718.147),
( 52, 297.17, 150.678),
( 50, 21.02, 2281.226),
( 45, 247.54, 29929.562),
( 44, 325.15, 31555.956),
( 29, 60.93, 4443.417),
( 18, 155.12, 67555.328),
( 17, 288.79, 4562.452),
( 16, 198.04, 62894.029),
( 14, 199.76, 31436.921),
( 12, 95.39, 14577.848),
( 12, 287.11, 31931.756),
( 12, 320.81, 34777.259),
( 9, 227.73, 1222.114),
( 8, 15.45, 16859.074)
)
def mean_equinox_solstice(year_, idx_):
if int(year_) <= 1000:
Y = int(year_)/1000.0
jde_0 = table27a[idx_](Y)
else:
Y = (int(year_)-2000)/1000.0
jde_0 = table27b[idx_](Y)
return jde_0
def equinox_solstice_helper(year_, idx_):
jde_0 = mean_equinox_solstice(year_, idx_)
T = (jde_0 - epoch_j2000.jdn)/julian_century
W = radians(35999.373*T - 2.47)
dlambda = 1 + 0.0334*cos(W) + 0.0007*cos(2*W)
S = 0.0
for (A,B,C) in table27c:
S += A*cos(radians(B + C*T))
jde = jde_0 + (0.00001*S)/dlambda
jdn_to_return = JulianDayNumber(Date(year_,1,1), Time(0,0,0))
jdn_to_return.jdn = jde # Roundabout way of creating a JDN
return jdn_to_return
########
# @todo - The iterative method is not accurate until we have the 'higher
# accuracy' calculations for the sun's circumstances
########
def equinox_solstice_iterative(year_, idx_, prec_):
jde_0 = mean_equinox_solstice(year_, idx_)
jdn_to_ret = JulianDayNumber(Date(year_,1,1), Time(0,0,0))
jdn_to_ret.jdn = jde_0 # Hack
while True:
sun = Sun(jdn_to_ret)
dpsi = Ecliptic(jdn_to_ret).get_dpsi_low().rads
fk5_correction = -radians(0.09033/3600.0)
aberration = -radians(20.4898/3600.0/sun.R)
lambda_app = sun.L + dpsi + fk5_correction + aberration
dday = 58*sin(idx_*pi/2 - lambda_app)
if fabs(dday) <= fabs(prec_): break
jdn_to_ret.jdn += dday
return jdn_to_ret
def spring_equinox(year_):
""" Julian Day Number of the Spring Equinox in the given year"""
return equinox_solstice_helper(year_,0)
def summer_solstice(year_):
""" Julian Day Number of the Summer Solstice in the given year"""
return equinox_solstice_helper(year_,1)
def autumn_equinox(year_):
""" Julian Day Number of the Autumn Equinox in the given year"""
return equinox_solstice_helper(year_,2)
def winter_solstice(year_):
""" Julian Day Number of the Winter Solstice in the given year"""
return equinox_solstice_helper(year_,3)
if __name__ == "__main__":
# Sun tests
sooraj = Sun(JulianDayNumber(Date(1992,10,13),Time(0,0,0)))
print sooraj.get_ecliptical() # 199deg54'36", 0
print sooraj.get_ecliptical_apparent() # 199deg54'32", 0
print sooraj.get_equatorial()
print sooraj.get_equatorial_apparent() # 13h13m31.4s, -7deg47'06"
print sooraj.get_rectangular()
print
# Equinox/Solstice tests
years = [ 1962, 1975, 1979, 2013, 2017, 2018, 2019, 2020 ]
prec = 1e-6
for year in years:
print 'Year', year
instant = spring_equinox(year)
print 'Spring Equinox :', instant.get_date(), instant.get_time()
instant = summer_solstice(year)
print 'Summer Solstice:', instant.get_date(), instant.get_time()
instant = autumn_equinox(year)
print 'Autumn Equinox :', instant.get_date(), instant.get_time()
instant = winter_solstice(year)
print 'Winter Solstice:', instant.get_date(), instant.get_time()
print
| |
import collections
import os
import shelve
import threading
import time
from contextlib import closing
from .block import Block, BlockHeader
from .serialize import Serialize
from .script import Script
from .util import *
class Blockchain:
SAVED_BLOCKCHAIN_LENGTH = 100
def __init__(self, spv):
assert (spv.coin.CHECKPOINT_BLOCK_HEIGHT % spv.coin.WORK_INTERVAL) == 0
self.spv = spv
self.saved_blockchain_length = max(Blockchain.SAVED_BLOCKCHAIN_LENGTH, self.spv.coin.WORK_INTERVAL) # Save at least WORK_INTERVAL blocks
self.blockchain_db_file = spv.config.get_file("blockchain")
self.blockchain_lock = threading.Lock() # TODO use RLock?
genesis = self.create_block_link(hash=self.spv.coin.GENESIS_BLOCK_HASH, height=0, main=True, connected=True, header=BlockHeader(spv.coin, timestamp=self.spv.coin.GENESIS_BLOCK_TIMESTAMP, bits=self.spv.coin.GENESIS_BLOCK_BITS))
checkpoint = self.create_block_link(hash=self.spv.coin.CHECKPOINT_BLOCK_HASH, height=self.spv.coin.CHECKPOINT_BLOCK_HEIGHT, main=True, connected=True, header=BlockHeader(spv.coin, timestamp=self.spv.coin.CHECKPOINT_BLOCK_TIMESTAMP, bits=self.spv.coin.CHECKPOINT_BLOCK_BITS))
self.blocks = {
self.spv.coin.GENESIS_BLOCK_HASH : genesis,
self.spv.coin.CHECKPOINT_BLOCK_HASH: checkpoint,
}
self.unknown_referenced_blocks = collections.defaultdict(set)
with self.blockchain_lock:
with closing(shelve.open(self.blockchain_db_file)) as db:
if 'needs_headers' not in db or self.spv.args.resync:
db['needs_headers'] = True
# Make sure sync_block_start is consistent between restarts
if 'sync_block_start' not in db:
db['sync_block_start'] = None
if self.spv.sync_block_start is not None:
db['sync_block_start'] = self.spv.sync_block_start
self.sync_block_start = db['sync_block_start']
self.best_chain = (checkpoint if (self.sync_block_start is None or self.sync_block_start >= checkpoint['height']) else genesis)
if 'blockchain' not in db or self.spv.args.resync:
db['blockchain'] = {
'start': 0,
'count': 0,
'links': [None] * self.saved_blockchain_length,
}
self.needs_headers = db['needs_headers']
# load blocks from db
start = db['blockchain']['start']
links = db['blockchain']['links']
start_time = time.time()
if self.spv.logging_level <= INFO:
print('[BLOCKCHAIN] loading blockchain headers...')
for i in range(db['blockchain']['count']):
index = (start + i) % self.saved_blockchain_length
link = links[index]
#print('connecting {}/{}, {}'.format(i, db['blockchain']['count'], bytes_to_hexstring(link['hash'])))
header, _ = BlockHeader.unserialize(link['header'], self.spv.coin)
block_link = self.create_block_link(header.hash(), height=link['height'], work=link['work'], header=header)
# changes are ignored when loading
self.__connect_block_link(None, block_link, skip_validation=True)
# First block has to be manually connected. The rest of the blocks will connect normally
if i == 0:
block_link['connected'] = True
block_link['main'] = True
block_link['height'] = link['height']
self.best_chain = block_link
else:
if self.best_chain is not block_link:
#print("Error connecting block {}".format(str(block_link['header'])))#bytes_to_hexstring(block_link['hash'])))
#print("best block is {}".format(str(self.best_chain['header'])))#bytes_to_hexstring(self.best_chain['hash'])))
raise Exception("Uh oh. Blockchain state is corrupted. Loaded {} blocks to height {}.".format(i, self.best_chain['height']))
if self.spv.logging_level <= INFO:
print('[BLOCKCHAIN] done ({:5.3f} sec)'.format(time.time()-start_time))
def create_block_link(self, hash, height=0, main=False, connected=False, prev=None, header=None, work=None):
if work is None and header is not None:
work = header.work()
else:
work = 0
return {
'hash' : hash,
'height' : height,
'main' : main,
'connected': connected,
'prev' : prev,
'header' : header,
'work' : work,
}
def get_best_chain_locator(self):
return BlockLocator(self.best_chain)
def get_best_chain_height(self):
return self.best_chain['height']
def get_needs_headers(self):
with self.blockchain_lock:
return self.needs_headers
def add_block_headers(self, block_headers):
# The incoming headers must ALL connect, but they are allowed to disconnect some blocks
# before connecting into the chain.
if len(block_headers) == 0:
print('no headers')
return False
# First, link all the block_headers that were given
new_block_links = []
new_block_links.append(self.create_block_link(hash=block_headers[0].hash(), header=block_headers[0]))
for i in range(1, len(block_headers)):
new_block_links.append(self.create_block_link(hash=block_headers[i].hash(), header=block_headers[i]))
if block_headers[i].prev_block_hash != new_block_links[i-1]['hash']:
# The chain of headers we were just given doesn't connect to eachother
return False
new_block_links[i]['prev'] = new_block_links[i-1]
changes = []
with self.blockchain_lock:
if not self.needs_headers:
print('doesnt need headers')
return False
# All of the blocks must be new
if any(block_link['hash'] in self.blocks for block_link in new_block_links):
print('seen some of the block headers before')
return False
# make sure the first block connects
prev = self.blocks.get(block_headers[0].prev_block_hash, None)
if prev is None:
print('first block doesnt connect')
return False
with closing(shelve.open(self.blockchain_db_file)) as db:
blockchain = db['blockchain'].copy()
# Connect it and update the rest of the blocks
new_block_links[0]['prev'] = prev
for new_block_link in new_block_links:
changes = changes + self.__connect_block_link(blockchain, new_block_link)
assert self.best_chain is new_block_link
if (self.best_chain['header'].timestamp >= self.spv.wallet.creation_time - (24 * 60 * 60)) or (self.sync_block_start is not None and self.best_chain['height'] >= self.sync_block_start):
print('headers sync done, switching to full blocks')
self.needs_headers = db['needs_headers'] = False
break
db['blockchain'] = blockchain
self.__run_changes(changes)
if self.spv.logging_level <= INFO:
print("[BLOCKCHAIN] added {} headers (new height = {})".format(len(new_block_links), self.best_chain['height']))
return True
def add_block(self, block):
if not block.check():
return
block_link = self.create_block_link(hash=block.header.hash(), header=block.header)
# __connect_block_link drops the block data after its connected to the block tree
block_link['block'] = block
with self.blockchain_lock:
with closing(shelve.open(self.blockchain_db_file)) as db:
blockchain = db['blockchain'].copy()
changes = self.__connect_block_link(blockchain, block_link)
db['blockchain'] = blockchain
self.__run_changes(changes)
def __run_changes(self, changes):
for change in changes:
if change[0] == 'removed':
self.spv.on_block_removed(*change[1:])
elif change[0] == 'added':
self.spv.on_block_added(*change[1:])
def __connect_block_link(self, blockchain, block_link, skip_validation=False):
changes = []
if block_link['hash'] in self.blocks:
if self.spv.logging_level <= DEBUG:
print("[BLOCKCHAIN] already have {}".format(bytes_to_hexstring(block_link['hash'])))
return []
self.blocks[block_link['hash']] = block_link
self.unknown_referenced_blocks[block_link['header'].prev_block_hash].add(block_link['hash'])
# See if this block causes any chains to be created or extended
hashes_to_check = collections.deque([block_link['header'].prev_block_hash])
while len(hashes_to_check) != 0:
hash_to_check = hashes_to_check.popleft()
if hash_to_check not in self.unknown_referenced_blocks or hash_to_check not in self.blocks or not self.blocks[hash_to_check]['connected']:
continue
for referenced_by_block_hash in self.unknown_referenced_blocks.pop(hash_to_check):
referenced_by_block_link = self.blocks[referenced_by_block_hash]
assert referenced_by_block_link['header'].prev_block_hash == hash_to_check
assert not referenced_by_block_link['connected']
error = None
height = self.blocks[hash_to_check]['height'] + 1
block_time = referenced_by_block_link['header'].timestamp
# The block must meet proof of work requirements
while not skip_validation:
next_work = self.__get_next_work(self.blocks[hash_to_check], referenced_by_block_link['header'].timestamp)
if next_work != referenced_by_block_link['header'].bits:
error = "proof of work error: new block has bits = {:x} but it should be {:x}".format(referenced_by_block_link['header'].bits, next_work)
break
# The block timestamp must be at least after the previous block timestamp minus the median block time
if referenced_by_block_link['header'].timestamp <= self.__get_median_time_past(self.blocks[hash_to_check]):
error = "timestamp error: new block has timestamp = {} but it should be less than {}".format(referenced_by_block_link['header'].timestamp, self.__get_median_time_past(self.blocks[hash_to_check]))
break
# All transactions in the block have to be final
block = referenced_by_block_link.get('block', None)
if block is not None:
if not all(tx.is_final(height, block_time) for tx in block.transactions):
error = "not all transactions in block are final"
break
# Reject version 2 and higher blocks if at least 750 of the past 1000 blocks are version 2 or later and
# if the coinbase doesn't start with the serialized block height
if referenced_by_block_link['header'].version >= 2:
if (not self.spv.testnet and self.__is_block_majority(2, self.blocks[hash_to_check], 750, 1000)) or \
(self.spv.testnet and self.__is_block_majority(2, self.blocks[hash_to_check], 51, 100)):
v = []
t = height
while t != 0:
v.append(t % 256)
t //= 256
s = Script()
s.push_bytes(bytes(v))
s = s.serialize()
coinbase_script = block.transactions[0].inputs[0].script
if coinbase_script.serialize()[:len(s)] != s:
error = "coinbase doesn't have encoded block height"
break
# Reject version 1 blocks if at least 950 of the past 1000 blocks are version 2 or later
if referenced_by_block_link['header'].version < 2:
if (not self.spv.testnet and self.__is_block_majority(2, self.blocks[hash_to_check], 950, 1000)) or \
(self.spv.testnet and self.__is_block_majority(2, self.blocks[hash_to_check], 75, 100)):
error = "block should not be version 1"
break
break
if error is None:
if 'block' in referenced_by_block_link:
# Save memory: we don't need the transactions anymore
referenced_by_block_link.pop('block')
referenced_by_block_link['prev'] = self.blocks[hash_to_check]
referenced_by_block_link['height'] = self.blocks[hash_to_check]['height'] + 1
referenced_by_block_link['connected'] = True
referenced_by_block_link['main'] = False
referenced_by_block_link['work'] = self.blocks[hash_to_check]['work'] + referenced_by_block_link['header'].work()
changes = changes + self.__set_best_chain(blockchain, referenced_by_block_link)
hashes_to_check.append(referenced_by_block_hash)
else:
# This block is bad. Remove it from everything.
print('[BLOCKCHAIN] invalid block {}: {}'.format(bytes_to_hexstring(referenced_by_block_hash), error))
self.blocks.pop(referenced_by_block_hash)
if referenced_by_block_hash in self.unknown_referenced_blocks:
self.unknown_referenced_blocks.pop(referenced_by_block_hash)
if self.spv.logging_level <= INFO and not block_link['connected'] and blockchain is not None:
# TODO Should we store orphaned blocks on disk so that they aren't fetched from the network
# upon restarting?
print("[BLOCKCHAIN] orphaned {}".format(bytes_to_hexstring(block_link['hash'])))
return changes
def get_next_work(self, next_block_link_timestamp):
with self.blockchain_lock:
return self.__get_next_work(self.best_chain, next_block_link_timestamp)
def get_next_reward(self):
return self.spv.coin.STARTING_BLOCK_REWARD >> ((self.best_chain['height'] + 1) // self.spv.coin.BLOCK_REWARD_HALVING)
def __get_next_work(self, prev_block_link, next_block_link_timestamp):
if ((prev_block_link['height'] + 1) % self.spv.coin.WORK_INTERVAL) != 0:
# special retargetting rules for testnet
if self.spv.testnet:
if next_block_link_timestamp > (prev_block_link['header'].timestamp + (self.spv.coin.TARGET_BLOCK_SPACING * 2)):
return target_to_bits(Block.BLOCK_DIFFICULTY_LIMIT)
else:
# return the last block that did not fall under the special min-difficulty rule
p = prev_block_link
bits_limit = target_to_bits(Block.BLOCK_DIFFICULTY_LIMIT)
while p['prev'] is not None and (p['height'] % self.spv.coin.WORK_INTERVAL) != 0 and p['header'].bits == bits_limit:
p = p['prev']
return p['header'].bits
else:
return prev_block_link['header'].bits
# Get the block at the beginning of the adjustment interval
p = prev_block_link
for _ in range(self.spv.coin.WORK_INTERVAL - 1):
p = p['prev']
if p is None:
raise Exception("There are not enough blocks in our blockchain to compute proof of work. That's a problem")
# Clamp target to limited range
timespan = prev_block_link['header'].timestamp - p['header'].timestamp
timespan = max(timespan, self.spv.coin.TARGET_BLOCK_TIMESPAN // 4)
timespan = min(timespan, self.spv.coin.TARGET_BLOCK_TIMESPAN * 4)
target = bits_to_target(prev_block_link['header'].bits) * timespan
target = target // self.spv.coin.TARGET_BLOCK_TIMESPAN
target = min(target, Block.BLOCK_DIFFICULTY_LIMIT)
bits = target_to_bits(target)
if self.spv.logging_level <= DEBUG:
print("[BLOCKCHAIN] block work retarget!!")
print("[BLOCKCHAIN] target timespan = {} actual timespan = {}".format(self.spv.coin.TARGET_BLOCK_TIMESPAN, timespan))
print("[BLOCKCHAIN] before: {:08x} {:064x}".format(prev_block_link['header'].bits, bits_to_target(prev_block_link['header'].bits)))
print("[BLOCKCHAIN] after: {:08x} {:064x} change: {:5.3f}%".format(bits, bits_to_target(bits), (target - bits_to_target(prev_block_link['header'].bits)) / bits_to_target(prev_block_link['header'].bits) * 100))
return bits
def __get_median_time_past(self, block_link):
if 'median_time_past' in block_link:
return block_link['median_time_past']
times = []
p = block_link
for _ in range(self.spv.coin.MEDIAN_TIME_SPAN):
times.append(p['header'].timestamp)
p = p['prev']
if p is None:
break
times.sort()
block_link['median_time_past'] = times[len(times)//2]
return block_link['median_time_past']
def __is_block_majority(self, min_version, block_link_start, target_block_count, block_population_count):
found = 0
for _ in range(block_population_count):
if block_link_start['header'].version >= min_version:
found += 1
block_link_start = block_link_start['prev']
if block_link_start is None:
break
return found >= target_block_count
def __set_best_chain(self, blockchain, block_link):
assert block_link['connected']
changes = []
# New block has to have more work
if block_link['work'] <= self.best_chain['work']:
return []
new_best_chain = block_link
old_best_chain = self.best_chain
self.best_chain = new_best_chain
# Old chain being longer is actually a rare case
while old_best_chain['height'] > new_best_chain['height']:
if blockchain is not None:
# drop count by one and notify SPV that a block was removed from the chain
count = blockchain['count']
count -= 1
assert count >= 0, "this is bad."
blockchain['count'] = count
changes.append(('removed', old_best_chain['header'], old_best_chain['height']))
old_best_chain['main'] = False
old_best_chain = old_best_chain['prev']
while new_best_chain['height'] > old_best_chain['height']:
new_best_chain['main'] = True
new_best_chain = new_best_chain['prev']
# At this point, new_best_chain['height'] == old_best_chain['height']
assert new_best_chain['height'] == old_best_chain['height']
if new_best_chain is not old_best_chain:
while new_best_chain['hash'] != old_best_chain['hash']:
if blockchain is not None:
# drop count by one and notify SPV that a block was removed from the chain
count = blockchain['count']
count -= 1
assert count >= 0, "this is bad."
blockchain['count'] = count
changes.append(('removed', old_best_chain['header'], old_best_chain['height']))
old_best_chain['main'] = False
old_best_chain = old_best_chain['prev']
new_best_chain['main'] = True
new_best_chain = new_best_chain['prev']
# add the new chain (in order) and notify spv
notify_block_links = []
chain_fork = new_best_chain
end_fork = self.best_chain
while end_fork is not chain_fork:
notify_block_links.append(end_fork)
end_fork = end_fork['prev']
while len(notify_block_links):
notify_block_link = notify_block_links.pop()
if blockchain is not None:
start = blockchain['start']
count = blockchain['count']
index = (start + count) % self.saved_blockchain_length
links = blockchain['links']
links[index] = {
'work' : notify_block_link['work'],
'height': notify_block_link['height'],
'hash' : notify_block_link['hash'],
'header': notify_block_link['header'].serialize(),
}
if count != self.saved_blockchain_length:
count += 1
blockchain['count'] = count
else:
start = (start + 1) % self.saved_blockchain_length
blockchain['start'] = start
blockchain['links'] = links
changes.append(('added', notify_block_link['header'], notify_block_link['height']))
if self.spv.logging_level <= INFO and blockchain is not None:
print('[BLOCKCHAIN] new best chain = {} (height={})'.format(bytes_to_hexstring(self.best_chain['hash']), self.best_chain['height']))
return changes
class BlockLocator:
def __init__(self, block_link):
self.hashes = [block_link['hash']]
step = 1
while block_link['prev'] is not None:
if len(self.hashes) >= 10:
step *= 2
for _ in range(step):
if block_link['prev'] is None:
break
block_link = block_link['prev']
self.hashes.append(block_link['hash'])
def serialize(self):
return Serialize.serialize_variable_int(len(self.hashes)) + b''.join(self.hashes)
def __str__(self):
return '<block_locator\n\t' + '\n\t'.join(['{}'.format(bytes_to_hexstring(block_hash)) for block_hash in self.hashes][::-1]) + '>'
| |
# The MIT License (MIT)
# Copyright (c) 2016, 2017 by the ESA CCI Toolbox development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Fast binary heap (min-heap) implementation using ``numba``.
See https://en.wikipedia.org/wiki/Binary_heap
(implementation is based on german version at https://de.wikipedia.org/wiki/Bin%C3%A4rer_Heap)
"""
__author__ = "Norman Fomferra (Brockmann Consult GmbH)"
from typing import Union, Tuple, Optional
import numba
import numpy as np
KeyType = Union[int, float]
ValueType = Union[bool, int, float]
KeyArray = np.ndarray
ValueArray = np.ndarray
@numba.jit(nopython=True)
def build(keys: KeyArray, values: ValueArray, size: int) -> None:
"""
Turn the given array into a min-heap.
"""
assert 0 <= size <= keys.size, "size out of bounds"
if size > 1:
n = size >> 1
for i in range(0, n):
index = n - i - 1
_heapify(keys, values, size, index)
@numba.jit(nopython=True)
def add(keys: KeyArray, values: ValueArray, size: int,
max_key: KeyType, new_key: KeyType, new_value: ValueType) -> int:
"""
Add a new value to the heap.
:param keys: The heap's keys, ``0 <= size <= keys.size``.
:param values: The heap's values, ``0 <= size <= values.size``.
:param size: The heap's current size.
:param max_key: The maximum key value.
:param new_key: The new key to be inserted.
:return: The new heap size.
"""
assert 0 <= size < keys.size, "size out of bounds"
index = size
size += 1
keys[index] = max_key
values[index] = 0
_decrease(keys, values, size, index, new_key, new_value)
return size
@numba.jit(nopython=True)
def remove(keys: KeyArray, values: ValueArray, size: int,
min_key: KeyType, index: int) -> int:
"""
Remove a value from the heap.
:param keys: The heap's keys, ``0 <= size <= keys.size``.
:param values: The heap's values, ``0 <= size <= values.size``.
:param size: The heap's current size.
:param min_key: The minimum key value.
:param index: The index of the element to be removed.
:return: The new heap size
"""
assert 0 <= size <= keys.size, "size out of bounds"
assert 0 <= index < size, "index out of bounds"
last_i = size - 1
size = last_i
if index != last_i:
_swap(keys, values, index, last_i)
# TODO (forman): make sure (test!) that size arg is correct here. Is it the old size (size + 1)?
if index == 0 or keys[index] > keys[_parent(index)]:
_heapify(keys, values, size, index)
else:
# decrease does nothing, if h[i] == h[parent(i)]
_decrease(keys, values, size, index, min_key, 0)
return size
@numba.jit(nopython=True)
def remove_min(keys: KeyArray, values: ValueArray, size: int,
min_key: KeyType) -> int:
"""
Remove the heap's current minimum value (element at index 0).
:param keys: The heap's keys, ``0 <= size <= keys.size``.
:param values: The heap's values, ``0 <= size <= values.size``.
:param size: The heap's current size.
:param min_key: The minimum key value.
:return: The new heap size
"""
return remove(keys, values, size, min_key, 0)
@numba.jit(nopython=True)
def _heapify(keys: KeyArray, values: ValueArray, size: int, index: int) -> None:
"""
:param keys: The heap's keys, ``0 <= size <= keys.size``.
:param values: The heap's values, ``0 <= size <= values.size``.
:param size: The heap's current size.
:param index: The index, ``0 <= index < size``.
"""
assert 0 <= size <= keys.size, "size out of bounds"
assert 0 <= index < size, "index out of bounds"
i = index
while True:
min_i = i
left_i = _left(i)
if left_i < size and keys[left_i] < keys[min_i]:
min_i = left_i
right_i = _right(i)
if right_i < size and keys[right_i] < keys[min_i]:
min_i = right_i
if min_i == i:
break
_swap(keys, values, i, min_i)
i = min_i
@numba.jit(nopython=True)
def _decrease(keys: KeyArray, values: ValueArray, size: int, index: int,
new_key: KeyType, new_value: ValueType):
"""
:param keys: The heap's keys, ``1 <= size <= keys.size``.
:param values: The heap's values, ``1 <= size <= values.size``.
:param size: The heap's current size.
:param index: The index, ``0 <= index < size``.
:param new_key: The new key, ``keys[index] >= new_key``.
:param new_value: Any new value
"""
assert 1 <= size <= keys.size, "size out of bounds of keys"
assert 1 <= size <= values.size, "size out of bounds of values"
assert 0 <= index < size, "index out of bounds"
assert keys[index] >= new_key
keys[index] = new_key
values[index] = new_value
while index > 0:
parent_i = _parent(index)
if keys[index] >= keys[parent_i]:
break
_swap(keys, values, index, parent_i)
index = parent_i
@numba.jit(nopython=True)
def _swap(keys: KeyArray, values: ValueArray, index1: int, index2: int) -> None:
key1 = keys[index1]
keys[index1] = keys[index2]
keys[index2] = key1
value1 = values[index1]
values[index1] = values[index2]
values[index2] = value1
@numba.jit(nopython=True)
def _parent(index: int) -> int:
return (index - 1) >> 1
@numba.jit(nopython=True)
def _left(index: int) -> int:
return (index << 1) + 1
@numba.jit(nopython=True)
def _right(index: int) -> int:
return (index << 1) + 2
class MinHeap:
"""
A min-heap.
:param keys: Initial key array. The size of this array determines the maximum capacity of the min-heap.
:param values: Initial values. This is usually an array of indices into the actual values.
:param size: Initial heap size.
:param min_key: The smallest possible key value.
:param max_key: The largest possible key value.
"""
def __init__(self,
keys: KeyArray,
values: Optional[ValueArray] = None,
size: Optional[int] = None,
min_key: Optional[KeyType] = None,
max_key: Optional[KeyType] = None):
if keys is None:
raise ValueError('keys must be given')
if keys.size == 0:
raise ValueError('keys must not be empty')
if values is not None and values.size < keys.size:
raise ValueError('values.size must greater than or equal to keys.size')
if size is not None and size > keys.size:
raise ValueError('size must be less than or equal to keys.size')
if min_key is not None and max_key is not None and max_key <= min_key:
raise ValueError('min_key must be less than max_key')
if values is None:
values = np.arange(keys.size, dtype=np.uint32)
if size is None:
size = keys.size
if min_key is None:
dtype = keys.dtype
min_key = np.iinfo(dtype).min if np.issubdtype(dtype, np.integer) else np.finfo(dtype).min
if max_key is None:
dtype = keys.dtype
max_key = np.iinfo(dtype).max if np.issubdtype(dtype, np.integer) else np.finfo(dtype).max
build(keys, values, size)
self._keys = keys
self._values = values
self._size = size
self._min_key = min_key
self._max_key = max_key
@property
def keys(self) -> KeyArray:
return self._keys
@property
def values(self) -> ValueArray:
return self._values
@property
def size(self) -> int:
return self._size
@property
def min(self) -> Tuple[KeyType, ValueType]:
return self._keys[0], self._values[0]
@property
def min_key(self) -> KeyType:
return self._keys[0]
@property
def min_value(self) -> ValueType:
return self._values[0]
def get(self, index: int) -> Tuple[KeyType, ValueType]:
return self._keys[index], self._values[index]
def get_key(self, index: int) -> KeyType:
return self._keys[index]
def get_value(self, index: int) -> ValueType:
return self._values[index]
def add(self, new_key: KeyType, new_value: Optional[ValueType] = None) -> None:
if new_value is None:
new_value = self._size
self._size = add(self._keys, self._values, self._size, self._max_key, new_key, new_value)
def remove(self, index: int) -> Tuple[KeyType, ValueType]:
old_entry = self._keys[index], self._values[index]
self._size = remove(self._keys, self._values, self._size, self._min_key, index)
return old_entry
def remove_min(self) -> Tuple[KeyType, ValueType]:
return self.remove(0)
| |
# Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import re
from fake_switches.command_processing.switch_tftp_parser import SwitchTftpParser
from fake_switches.command_processing.base_command_processor import BaseCommandProcessor
from fake_switches.cisco.command_processor.config import ConfigCommandProcessor
from fake_switches.switch_configuration import VlanPort
from fake_switches import group_sequences
class EnabledCommandProcessor(BaseCommandProcessor):
def get_prompt(self):
return self.switch_configuration.name + "#"
def do_enable(self, *args):
pass
def do_configure(self, *_):
self.write_line("Enter configuration commands, one per line. End with CNTL/Z.")
self.move_to(ConfigCommandProcessor)
def do_show(self, *args):
if "running-config".startswith(args[0]):
if len(args) < 2:
self.show_run()
elif "vlan".startswith(args[1]):
self.write_line("Building configuration...")
self.write_line("")
self.write_line("Current configuration:")
for vlan in self.switch_configuration.vlans:
if vlan.number == int(args[2]):
self.write_line("\n".join(["!"] + build_running_vlan(vlan)))
self.write_line("end")
self.write_line("")
elif "interface".startswith(args[1]):
if_name = "".join(args[2:])
port = self.switch_configuration.get_port_by_partial_name(if_name)
if port:
self.write_line("Building configuration...")
self.write_line("")
data = ["!"] + build_running_interface(port) + ["end", ""]
self.write_line("Current configuration : %i bytes" % (len("\n".join(data)) + 1))
[self.write_line(l) for l in data]
else:
self.write_line(" ^")
self.write_line("% Invalid input detected at '^' marker.")
self.write_line("")
elif "vlan".startswith(args[0]):
self.write_line("")
self.write_line("VLAN Name Status Ports")
self.write_line("---- -------------------------------- --------- -------------------------------")
for vlan in sorted(self.switch_configuration.vlans, key=lambda v: v.number):
ports = [port.get_subname(length=2) for port in self.switch_configuration.ports
if port.access_vlan == vlan.number or (vlan.number == 1 and port.access_vlan is None)]
self.write_line("%-4s %-32s %s%s" % (
vlan.number,
vlan_name(vlan) if vlan_name(vlan) else "VLAN%s" % vlan.number,
"active",
(" " + ", ".join(ports)) if ports else ""
))
if len(args) == 1:
self.write_line("")
self.write_line("VLAN Type SAID MTU Parent RingNo BridgeNo Stp BrdgMode Trans1 Trans2")
self.write_line("---- ----- ---------- ----- ------ ------ -------- ---- -------- ------ ------")
for vlan in sorted(self.switch_configuration.vlans, key=lambda v: v.number):
self.write_line("%-4s enet 10%04d 1500 - - - - - 0 0" % (vlan.number, vlan.number))
self.write_line("")
self.write_line("Remote SPAN VLANs")
self.write_line("------------------------------------------------------------------------------")
self.write_line("")
self.write_line("")
self.write_line("Primary Secondary Type Ports")
self.write_line("------- --------- ----------------- ------------------------------------------")
self.write_line("")
elif "ip".startswith(args[0]):
if "interface".startswith(args[1]):
if_list = None
if len(args) > 2:
interface = self.switch_configuration.get_port_by_partial_name("".join(args[2:]))
if interface:
if_list = [interface]
else:
self.write_line(" ^")
self.write_line("% Invalid input detected at '^' marker.")
self.write_line("")
else:
if_list = sorted(self.switch_configuration.ports, key=lambda e: ("a" if isinstance(e, VlanPort) else "b") + e.name)
if if_list:
for interface in if_list:
self.write_line("%s is down, line protocol is down" % interface.name)
if not isinstance(interface, VlanPort):
self.write_line(" Internet protocol processing disabled")
else:
if len(interface.ips) == 0:
self.write_line(" Internet protocol processing disabled")
else:
self.write_line(" Internet address is %s" % interface.ips[0])
for ip in interface.ips[1:]:
self.write_line(" Secondary address %s" % ip)
self.write_line(" Outgoing access list is %s" % (interface.access_group_out if interface.access_group_out else "not set"))
self.write_line(" Inbound access list is %s" % (interface.access_group_in if interface.access_group_in else "not set"))
if interface.vrf is not None:
self.write_line(" VPN Routing/Forwarding \"%s\"" % interface.vrf.name)
def do_copy(self, source_url, destination_url):
dest_protocol, dest_file = destination_url.split(":")
self.write("Destination filename [%s]? " % strip_leading_slash(dest_file))
self.continue_to(partial(self.continue_validate_copy, source_url))
def continue_validate_copy(self, source_url, _):
self.write_line("Accessing %s..." % source_url)
try:
url, filename = re.match('tftp://([^/]*)/(.*)', source_url).group(1, 2)
SwitchTftpParser(self.switch_configuration).parse(url, filename, ConfigCommandProcessor)
self.write_line("Done (or some official message...)")
except Exception as e:
self.logger.warning("tftp parsing went wrong : %s" % str(e))
self.write_line("Error opening %s (Timed out)" % source_url)
def do_terminal(self, *args):
pass
def do_write(self, *args):
pass
def do_exit(self):
self.is_done = True
def show_run(self):
all_data = [
"version 12.1",
"!",
"hostname %s" % self.switch_configuration.name,
"!",
"!",
]
for vlan in self.switch_configuration.vlans:
all_data = all_data + build_running_vlan(vlan) + ["!"]
for interface in sorted(self.switch_configuration.ports, key=lambda e: ("b" if isinstance(e, VlanPort) else "a") + e.name):
all_data = all_data + build_running_interface(interface) + ["!"]
all_data += ["end", ""]
self.write_line("Building configuration...")
self.write_line("")
self.write_line("Current configuration : %i bytes" % (len("\n".join(all_data)) + 1))
[self.write_line(l) for l in all_data]
def strip_leading_slash(dest_file):
return dest_file[1:]
def build_running_vlan(vlan):
data = [
"vlan %s" % vlan.number,
]
if vlan.name:
data.append(" name %s" % vlan.name)
return data
def build_running_interface(port):
data = [
"interface %s" % port.name
]
if port.description:
data.append(" description %s" % port.description)
if port.access_vlan and port.access_vlan != 1:
data.append(" switchport access vlan %s" % port.access_vlan)
if port.trunk_encapsulation_mode is not None:
data.append(" switchport trunk encapsulation %s" % port.trunk_encapsulation_mode)
if port.trunk_native_vlan is not None:
data.append(" switchport trunk native vlan %s" % port.trunk_native_vlan)
if port.trunk_vlans is not None and len(port.trunk_vlans) < 4096 :
data.append(" switchport trunk allowed vlan %s" % to_vlan_ranges(port.trunk_vlans))
if port.mode:
data.append(" switchport mode %s" % port.mode)
if port.shutdown:
data.append(" shutdown")
if port.aggregation_membership:
data.append(" channel-group %s mode active" % port.aggregation_membership[-1])
if port.vrf:
data.append(" ip vrf forwarding %s" % port.vrf.name)
if isinstance(port, VlanPort):
if len(port.ips) > 0:
for ip in port.ips[1:]:
data.append(" ip address %s %s secondary" % (ip.ip, ip.netmask))
data.append(" ip address %s %s" % (port.ips[0].ip, port.ips[0].netmask))
else:
data.append(" no ip address")
if port.access_group_in:
data.append(" ip access-group %s in" % port.access_group_in)
if port.access_group_out:
data.append(" ip access-group %s out" % port.access_group_out)
if "no ip redirects" in port.vendor_specific:
data.append(" no ip redirects")
for vrrp in port.vrrps:
group = vrrp.group_id
for i, ip_address in enumerate(vrrp.ip_addresses):
data.append(" standby {group} ip {ip_address}{secondary}".format(group=group, ip_address=ip_address,
secondary=' secondary' if i > 0 else ''))
if vrrp.timers_hello is not None and vrrp.timers_hold is not None:
data.append(" standby {group} timers {hello_time} {hold_time}".format(group=group, hello_time=vrrp.timers_hello, hold_time=vrrp.timers_hold))
if vrrp.priority is not None:
data.append(" standby {group} priority {priority}".format(group=group, priority=vrrp.priority))
if vrrp.preempt is not None:
if vrrp.preempt_delay_minimum is not None:
data.append(" standby {group} preempt delay minimum {delay}".format(group=group, delay=vrrp.preempt_delay_minimum))
else:
data.append(" standby {group} preempt".format(group=group))
if vrrp.authentication is not None:
data.append(" standby {group} authentication {authentication}".format(group=group, authentication=vrrp.authentication))
for track, decrement in sorted(vrrp.track.items()):
data.append(" standby {group} track {track} decrement {decrement}".format(group=group, track=track, decrement=decrement))
for ip_address in port.ip_helpers:
data.append(" ip helper-address {}".format(ip_address))
return data
def vlan_name(vlan):
return vlan.name or ("default" if vlan.number == 1 else None)
def to_vlan_ranges(vlans):
if len(vlans) == 0:
return "none"
ranges = group_sequences(vlans, are_in_sequence=lambda a, b: a + 1 == b)
return ",".join([to_range_string(r) for r in ranges])
def to_range_string(array_range):
if len(array_range) < 3:
return ",".join([str(n) for n in array_range])
else:
return "%s-%s" % (array_range[0], array_range[-1])
| |
# Copyright 2021, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
# This modules disables the Pytype analyzer, see
# https://github.com/tensorflow/federated/blob/main/docs/pytype.md for more
# information.
"""Library of common metric aggregators."""
import collections
import dataclasses
import math
from typing import Optional, OrderedDict, Tuple, Union
import tensorflow as tf
import tree
from tensorflow_federated.python.aggregators import secure
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import structure
from tensorflow_federated.python.core.api import computation_base
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.impl.federated_context import intrinsics
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import placements
from tensorflow_federated.python.core.impl.types import type_conversions
from tensorflow_federated.python.learning import model as model_lib
class InternalError(Exception):
"""An error internal to TFF. File a bug report."""
def _check_finalizers_matches_unfinalized_metrics(
metric_finalizers: model_lib.MetricFinalizersType,
local_unfinalized_metrics_type: computation_types.StructWithPythonType):
"""Verifies that compatibility of variables and finalizers.
Args:
metric_finalizers: The finalizers to validate.
local_unfinalized_metrics_type: The unfinalized metrics type to validate.
Raises:
ValueError: If `metric_finalizers` cannot finalize a variable structure
with type `local_unfinalized_metrics_type`.
"""
metric_names_in_metric_finalizers = set(metric_finalizers.keys())
metric_names_in_local_unfinalized_metrics = set(
structure.name_list(local_unfinalized_metrics_type))
if (metric_names_in_metric_finalizers !=
metric_names_in_local_unfinalized_metrics):
difference_1 = (
metric_names_in_metric_finalizers -
metric_names_in_local_unfinalized_metrics)
difference_2 = (
metric_names_in_local_unfinalized_metrics -
metric_names_in_metric_finalizers)
raise ValueError(
'The metric names in `metric_finalizers` do not match those in the '
'`local_unfinalized_metrics`. Metric names in the `metric_finalizers`'
f'but not the `local_unfinalized_metrics`: {difference_1}. '
'Metric names in the `local_unfinalized_metrics` but not the '
f'`metric_finalizers`: {difference_2}.\n'
'Metrics names in the `metric_finalizers`: '
f'{metric_names_in_metric_finalizers}. Metric names in the '
'`local_unfinalized_metrics`: '
f'{metric_names_in_local_unfinalized_metrics}.')
def _check_metric_finalizers(metric_finalizers: model_lib.MetricFinalizersType):
"""Validates `metric_finalizers` raising error on failure.
Args:
metric_finalizers: The finalizers to validate.
Raises:
TypeError: If `metric_finalizers` is not a `collections.OrderedDict` or
any key is not a `str` type, or value is not a `callable`.
"""
py_typecheck.check_type(metric_finalizers, collections.OrderedDict,
'metric_finalizers')
for key, value in metric_finalizers.items():
py_typecheck.check_type(key, str, f'metric_finalizers key {key}')
py_typecheck.check_callable(value, f'metric_finalizers value {value}')
def _check_local_unfinalzied_metrics_type(
local_unfinalized_metrics_type: computation_types.StructWithPythonType):
"""Validates `local_unfinalized_metrics_type` raising error on failure.
Args:
local_unfinalized_metrics_type: The unfinalized metrics type to validate.
Raises:
TypeError: If `local_unfinalized_metrics_type` is not a
`tff.types.StructWithPythonType` or has a `.container` attribute that is
not the `collections.OrderedDict` type.
"""
# Directly check the type (instead of using `py_typecheck`) here so that the
# the error message has a better format (specifically, the expected type is
# shown as `tff.types.StructWithPythonType` in the error message).
if not isinstance(local_unfinalized_metrics_type,
computation_types.StructWithPythonType):
raise TypeError(
'Expected the input `local_unfinalized_metrics_type` to be a '
'`tff.types.StructWithPythonType`, found '
f'{py_typecheck.type_string(type(local_unfinalized_metrics_type))}.')
local_metrics_container = local_unfinalized_metrics_type.python_container
if local_metrics_container is not collections.OrderedDict:
raise TypeError(
'Expected the input `local_unfinalized_metrics_type` to be a '
'`tff.types.StructWithPythonType` with `collections.OrderedDict` as '
'the Python container, found a `tff.types.StructWithPythonType` with '
f'Python container {py_typecheck.type_string(local_metrics_container)}.'
)
def sum_then_finalize(
metric_finalizers: model_lib.MetricFinalizersType,
local_unfinalized_metrics_type: computation_types.StructWithPythonType
) -> computation_base.Computation:
"""Creates a TFF computation that aggregates metrics via `sum_then_finalize`.
The returned federated TFF computation has the following type signature:
`local_unfinalized_metrics@CLIENTS -> aggregated_metrics@SERVER`, where the
input is given by `tff.learning.Model.report_local_unfinalized_metrics()` at
`CLIENTS`, and the output is computed by first summing the unfinalized metrics
from `CLIENTS`, followed by applying the finalizers at `SERVER`.
Args:
metric_finalizers: An `OrderedDict` of `string` metric names to finalizer
functions returned by `tff.learning.Model.metric_finalizers()`. It should
have the same keys (i.e., metric names) as the `OrderedDict` returned by
`tff.learning.Model.report_local_unfinalized_metrics()`. A finalizer is a
callable (typically `tf.function` or `tff.tf_computation` decoreated
function) that takes in a metric's unfinalized values, and returns the
finalized values.
local_unfinalized_metrics_type: A `tff.types.StructWithPythonType` (with
`OrderedDict` as the Python container) of a client's local unfinalized
metrics. Let `local_unfinalized_metrics` be the output of
`tff.learning.Model.report_local_unfinalized_metrics()`. Its type can be
obtained by `tff.framework.type_from_tensors(local_unfinalized_metrics)`.
Returns:
A federated TFF computation that sums the unfinalized metrics from
`CLIENTS`, and applies the correponding finalizers at `SERVER`.
Raises:
TypeError: If the inputs are of the wrong types.
ValueError: If the keys (i.e., metric names) in `metric_finalizers` are not
the same as those expected by `local_unfinalized_metrics_type`.
"""
_check_metric_finalizers(metric_finalizers)
_check_local_unfinalzied_metrics_type(local_unfinalized_metrics_type)
_check_finalizers_matches_unfinalized_metrics(metric_finalizers,
local_unfinalized_metrics_type)
@computations.federated_computation(
computation_types.at_clients(local_unfinalized_metrics_type))
def aggregator_computation(client_local_unfinalized_metrics):
unfinalized_metrics_sum = intrinsics.federated_sum(
client_local_unfinalized_metrics)
@computations.tf_computation(local_unfinalized_metrics_type)
def finalizer_computation(unfinalized_metrics):
finalized_metrics = collections.OrderedDict()
for metric_name, metric_finalizer in metric_finalizers.items():
finalized_metrics[metric_name] = metric_finalizer(
unfinalized_metrics[metric_name])
return finalized_metrics
return intrinsics.federated_map(finalizer_computation,
unfinalized_metrics_sum)
return aggregator_computation
MetricValueRange = Union[Tuple[float, float], Tuple[int, int]]
MetricValueRangeDict = OrderedDict[str, Union[MetricValueRange,
'MetricValueRangeDict']]
def _check_range(user_value: MetricValueRange):
"""Validates a value is a valid range.
Args:
user_value: The value to validate.
Raises:
TypeError: If `user_value` is not a `tuple` or its elements are not `int` or
`float` type.
ValueError: If `user_value` has length other than two.
"""
py_typecheck.check_type(user_value, tuple)
if len(user_value) != 2:
raise ValueError('Ranges must be defined as a 2-tuple, got a tuple of '
f'length {len(user_value)}.')
for element in user_value:
py_typecheck.check_type(element, (int, float))
@dataclasses.dataclass(frozen=True)
class _MetricRange:
"""An opaque structure defining a closed range.
This is used as an opaque object in a nested structure to prevent
`tree.map_structure` from traversing to the numeric leaves.
"""
lower: Union[int, float]
upper: Union[int, float]
def __eq__(self, other):
"""A type-aware equality that prevents int/float conversion."""
return (type(self.upper) is type(other.upper) and
self.upper == other.upper and
type(self.lower) is type(other.lower) and self.lower == other.lower)
class UnquantizableDTypeError(Exception):
"""An error raised when a tensor dtype is not quantizable."""
DEFAULT_SECURE_LOWER_BOUND = 0
# Use a power of 2 minus one to more accurately encode floating dtypes that
# actually contain integer values. 2 ^ 20 gives us approximately a range of
# [0, 1 million].
DEFAULT_SECURE_UPPER_BOUND = 2**20 - 1
def create_default_secure_sum_quantization_ranges(
local_unfinalized_metrics_type: computation_types.StructWithPythonType,
lower_bound: Union[int, float] = DEFAULT_SECURE_LOWER_BOUND,
upper_bound: Union[int, float] = DEFAULT_SECURE_UPPER_BOUND
) -> MetricValueRangeDict:
"""Create a nested structure of quantization ranges for secure sum encoding.
Args:
local_unfinalized_metrics_type: The `tff.Type` structure to generate default
secure sum quantization ranges form. Must be a `tff.Type` tree containing
only `tff.TensorType` and `tff.StructType`. Each `tff.TensorType` must be
of floating point or integer dtype.
lower_bound: An optional integer or floating point lower bound for the
secure sum quantization range. Values smaller than this will be clipped to
this value. By default is `0`. If a `float`, any `tff.TensorType` in
`local_unfinalized_metrics_type` with an integer dtype will use
`math.ceil(lower_bound)` as a bound.
upper_bound: An optional integer or floating point upper bound for the
secure sum quantization range. Values larger than this will be clipped to
this value. By default is `2^20 - 1` (~1 million). If a `float`, any
`tff.TensorType` in `local_unfinalized_metrics_type` with an integer dtype
will use `math.floor(lower_bound)` as a bound.
Returns:
A nested structure matching the structure of
`local_unfinalized_metrics_type` where each `tf.TensorType` has been
replaced with a 2-tuple of lower bound and upper bound, where the tupel
elements are `float` for floating dtypes, and `int` for integer dtypes.
Raises:
UnquantizableDTypeError: If A `tff.TensorType` in
`local_unfinalized_metrics_type` has a non-float or non-integer dtype.
ValueError: If an integer dtype in `local_unfinalized_metrics_type` will
have a zero range (e.g. `math.ceil(lower_bound) - math.floor(upper_bound)
< 1`).
"""
py_typecheck.check_type(upper_bound, (int, float))
py_typecheck.check_type(lower_bound, (int, float))
if lower_bound >= upper_bound:
raise ValueError('`upper_bound` must be greater than `lower_bound`.')
integer_range_width = math.floor(upper_bound) - math.ceil(lower_bound)
def create_default_range(
type_spec: computation_types.TensorType) -> MetricValueRange:
if type_spec.dtype.is_floating:
return float(lower_bound), float(upper_bound)
elif type_spec.dtype.is_integer:
if integer_range_width < 1:
raise ValueError(
'Encounter an integer tensor in the type, but quantization range '
f'[{lower_bound}, {upper_bound}] is not wide enough to quantize '
f'any integers (becomes [{int(lower_bound)}, {int(upper_bound)}]).')
return math.ceil(lower_bound), math.floor(upper_bound)
else:
raise UnquantizableDTypeError(
'Do not know how to create a default range for dtype '
f'{type_spec.dtype}. Only floating or integer types are supported.')
return type_conversions.structure_from_tensor_type_tree(
create_default_range, local_unfinalized_metrics_type)
# Helper functions for factory keys used in `secure_sum_then_finalize`.
# A factory key is uniquely defined by three values: lower bound, upper bound,
# and tensor dtype. In `secure_sum_then_finalize`, we will create a aggregation
# process for each factory key. Metric values sharing the same factory key will
# be aggregated together.
_DELIMITER = '/'
# TODO(b/222112465): Avoid converting floats to strings as it may cause problem.
def _create_factory_key(lower: Union[int, float], upper: Union[int, float],
tensor_dtype: tf.dtypes.DType) -> str:
return _DELIMITER.join(
str(item) for item in [lower, upper, tensor_dtype.as_datatype_enum])
def secure_sum_then_finalize(
metric_finalizers: model_lib.MetricFinalizersType,
local_unfinalized_metrics_type: computation_types.StructWithPythonType,
metric_value_ranges: Optional[MetricValueRangeDict] = None
) -> computation_base.Computation:
"""Creates a TFF computation that aggregates metrics using secure summation.
The returned federated TFF computation has the following type signature:
```
(local_unfinalized_metrics@CLIENTS ->
<aggregated_metrics@SERVER, secure_sum_measurements@SERVER)
```
where the input is given by
`tff.learning.Model.report_local_unfinalized_metrics()` at `CLIENTS`, and the
first output (`aggregated_metrics`) is computed by first securely summing the
unfinalized metrics from `CLIENTS`, followed by applying the finalizers at
`SERVER`. The second output (`secure_sum_measurements`) is an `OrderedDict`
that maps from `factory_key`s to the secure summation measurements (e.g. the
number of clients gets clipped. See `tff.aggregators.SecureSumFactory` for
details). A `factory_key` is uniquely defined by three scalars: lower bound,
upper bound, and tensor dtype (denoted as datatype enum). Metric values of the
same `factory_key` are grouped and aggegrated together (and hence, the
`secure_sum_measurements` are also computed at a group level).
Since secure summation works in fixed-point arithmetic space, floating point
numbers must be encoding using integer quantization. By default, each tensor
in `local_unfinalized_metrics_type` will be clipped to `[0, 2**20 - 1]` and
encoded to integers inside `tff.aggregators.SecureSumFactory`. Callers can
change this range by setting `metric_value_ranges`, which may be a partial
tree matching the structure of `local_unfinalized_metrics_type`.
Example partial value range specification:
>>> finalizers = ...
>>> metrics_type = tff.to_type(collections.OrderedDict(
a=tff.types.TensorType(tf.int32),
b=tff.types.TensorType(tf.float32),
c=[tff.types.TensorType(tf.float32), tff.types.TensorType(tf.float32)])
>>> value_ranges = collections.OrderedDict(
b=(0.0, 1.0),
c=[None, (0.0, 1.0)])
>>> aggregator = tff.learning.metrics.secure_sum_then_finalize(
finalizers, metrics_type, value_ranges)
This sets the range of the *second* tensor of `b` in the dictionary, using the
range for the first tensor, and the `a` tensor.
Args:
metric_finalizers: An `OrderedDict` of `string` metric names to finalizer
functions returned by `tff.learning.Model.metric_finalizers()`. It should
have the same keys (i.e., metric names) as the `OrderedDict` returned by
`tff.learning.Model.report_local_unfinalized_metrics()`. A finalizer is a
callable (typically `tf.function` or `tff.tf_computation` decoreated
function) that takes in a metric's unfinalized values, and returns the
finalized values.
local_unfinalized_metrics_type: A `tff.types.StructWithPythonType` (with
`OrderedDict` as the Python container) of a client's local unfinalized
metrics. Let `local_unfinalized_metrics` be the output of
`tff.learning.Model.report_local_unfinalized_metrics()`. Its type can be
obtained by `tff.framework.type_from_tensors(local_unfinalized_metrics)`.
metric_value_ranges: A `collections.OrderedDict` that matches the structure
of `local_unfinalized_metrics_type` (a value for each
`tff.types.TensorType` in the type tree). Each leaf in the tree should
have a 2-tuple that defines the range of expected values for that variable
in the metric. If the entire structure is `None`, a default range of
`[0.0, 2.0**20 - 1]` will be applied to all variables. Each leaf may also
be `None`, which will also get the default range; allowing partial user
sepcialization. At runtime, values that fall outside the ranges specified
at the leaves, those values will be clipped to within the range.
Returns:
A federated TFF computation that securely sums the unfinalized metrics from
`CLIENTS`, and applies the correponding finalizers at `SERVER`.
Raises:
TypeError: If the inputs are of the wrong types.
ValueError: If the keys (i.e., metric names) in `metric_finalizers` are not
the same as those expected by `local_unfinalized_metrics_type`.
"""
_check_metric_finalizers(metric_finalizers)
_check_local_unfinalzied_metrics_type(local_unfinalized_metrics_type)
_check_finalizers_matches_unfinalized_metrics(metric_finalizers,
local_unfinalized_metrics_type)
default_metric_value_ranges = create_default_secure_sum_quantization_ranges(
local_unfinalized_metrics_type)
if metric_value_ranges is None:
metric_value_ranges = default_metric_value_ranges
# Walk the incoming `metric_value_ranges` and `default_metric_value_ranges`
# and fill in any missing ranges using the defaults.
def fill_missing_values_with_defaults(default_values, user_values):
if isinstance(default_values, collections.abc.Mapping):
if user_values is None:
user_values = {}
return type(default_values)(
(key,
fill_missing_values_with_defaults(default_value, user_values.get(
key))) for key, default_value in default_values.items())
elif isinstance(default_values, list):
if user_values is None:
user_values = [None] * len(default_values)
return [
fill_missing_values_with_defaults(default_value, user_values[idx])
for idx, default_value in enumerate(default_values)
]
elif user_values is None:
return _MetricRange(*default_values)
else:
_check_range(user_values)
return _MetricRange(*user_values)
try:
metric_value_ranges = fill_missing_values_with_defaults(
default_metric_value_ranges, metric_value_ranges)
except TypeError as e:
raise TypeError('Failed to create encoding value range from: '
f'{metric_value_ranges}') from e
# Create an aggregator factory for each unique value range, rather than each
# leaf tensor (which could introduce a lot of duplication).
aggregator_factories = {
value_range: secure.SecureSumFactory(value_range.upper, value_range.lower)
for value_range in set(tree.flatten(metric_value_ranges))
}
# Construct a python container of `tff.TensorType` so we can traverse it in
# parallel with the value ranges during AggregationProcess construction.
# Otherwise we have a `tff.Type` but `metric_value_ranges` is a Python
# container which are difficult to traverse in parallel.
structure_of_tensor_types = type_conversions.structure_from_tensor_type_tree(
lambda t: t, local_unfinalized_metrics_type)
# We will construct groups of tensors with the same dtype and quantization
# value range so that we can construct fewer aggregations-of-structures,
# rather than a large structure-of-aggregations. Without this, the TFF
# compiler pipeline results in large slow downs (see b/218312198).
factory_key_by_path = collections.OrderedDict()
value_range_by_factory_key = collections.OrderedDict()
path_list_by_factory_key = collections.defaultdict(list)
# Maintain a flattened list of paths. This is useful to flatten the aggregated
# values, which will then be used by `tf.nest.pack_sequence_as`.
flattened_path_list = []
for (path, tensor_spec), (_, value_range) in zip(
tree.flatten_with_path(structure_of_tensor_types),
tree.flatten_with_path(metric_value_ranges)):
factory_key = _create_factory_key(value_range.lower, value_range.upper,
tensor_spec.dtype)
factory_key_by_path[path] = factory_key
value_range_by_factory_key[factory_key] = value_range
path_list_by_factory_key[factory_key].append(path)
flattened_path_list.append(path)
@computations.tf_computation(local_unfinalized_metrics_type)
def group_value_by_factory_key(local_unfinalized_metrics):
"""Groups client local metrics into a map of `factory_key` to value list."""
# We cannot use `collections.defaultdict(list)` here because its result is
# incompatible with `structure_from_tensor_type_tree`.
value_list_by_factory_key = collections.OrderedDict()
for path, value in tree.flatten_with_path(local_unfinalized_metrics):
factory_key = factory_key_by_path[path]
if factory_key in value_list_by_factory_key:
value_list_by_factory_key[factory_key].append(value)
else:
value_list_by_factory_key[factory_key] = [value]
return value_list_by_factory_key
def flatten_grouped_values(value_list_by_factory_key):
"""Flatten the values in the same order as in `flattened_path_list`."""
value_by_path = collections.OrderedDict()
for factory_key in value_list_by_factory_key:
path_list = path_list_by_factory_key[factory_key]
value_list = value_list_by_factory_key[factory_key]
for path, value in zip(path_list, value_list):
value_by_path[path] = value
flattened_value_list = [value_by_path[path] for path in flattened_path_list]
return flattened_value_list
# Create a aggregation process for each factory key.
aggregation_process_by_factory_key = collections.OrderedDict()
# Construct a python container of `tff.TensorType` so we can traverse it and
# create aggregation processes from the factories.
tensor_type_list_by_factory_key = (
type_conversions.structure_from_tensor_type_tree(
lambda t: t, group_value_by_factory_key.type_signature.result))
for factory_key, tensor_type_list in tensor_type_list_by_factory_key.items():
value_range = value_range_by_factory_key[factory_key]
aggregation_process_by_factory_key[factory_key] = aggregator_factories.get(
value_range).create(computation_types.to_type(tensor_type_list))
@computations.federated_computation(
computation_types.at_clients(local_unfinalized_metrics_type))
def aggregator_computation(client_local_unfinalized_metrics):
unused_state = intrinsics.federated_value((), placements.SERVER)
client_local_grouped_unfinalized_metrics = intrinsics.federated_map(
group_value_by_factory_key, client_local_unfinalized_metrics)
metrics_aggregation_output = collections.OrderedDict()
for factory_key, process in aggregation_process_by_factory_key.items():
metrics_aggregation_output[factory_key] = process.next(
unused_state, client_local_grouped_unfinalized_metrics[factory_key])
metrics_aggregation_output = intrinsics.federated_zip(
metrics_aggregation_output)
@computations.tf_computation(
metrics_aggregation_output.type_signature.member)
def finalizer_computation(grouped_aggregation_output):
# One minor downside of grouping the aggregation processes is that the
# SecAgg measurements (e.g., clipped_count) are computed at a group level
# (a group means all metric values belonging to the same `factory_key`).
secure_sum_measurements = collections.OrderedDict(
(factory_key, output.measurements)
for factory_key, output in grouped_aggregation_output.items())
finalized_metrics = collections.OrderedDict(
secure_sum_measurements=secure_sum_measurements)
grouped_unfinalized_metrics = collections.OrderedDict(
(factory_key, output.result)
for factory_key, output in grouped_aggregation_output.items())
flattened_unfinalized_metrics_list = flatten_grouped_values(
grouped_unfinalized_metrics)
unfinalized_metrics = tf.nest.pack_sequence_as(
structure_of_tensor_types, flattened_unfinalized_metrics_list)
for metric_name, metric_finalizer in metric_finalizers.items():
finalized_metrics[metric_name] = metric_finalizer(
unfinalized_metrics[metric_name])
return finalized_metrics
return intrinsics.federated_map(finalizer_computation,
metrics_aggregation_output)
return aggregator_computation
| |
from toontown.coghq.SpecImports import *
GlobalEntities = {1000: {'type': 'levelMgr',
'name': 'LevelMgr',
'comment': '',
'parentEntId': 0,
'cogLevel': 0,
'farPlaneDistance': 1500,
'modelFilename': 'phase_11/models/lawbotHQ/LB_Zone08a',
'wantDoors': 1},
1001: {'type': 'editMgr',
'name': 'EditMgr',
'parentEntId': 0,
'insertEntity': None,
'removeEntity': None,
'requestNewEntity': None,
'requestSave': None},
0: {'type': 'zone',
'name': 'UberZone',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
100014: {'type': 'gagBarrel',
'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(-25.1777, 5.83836, 0.03),
'hpr': Vec3(92.8624, 0, 0),
'scale': Vec3(1, 1, 1),
'gagLevel': 5,
'gagLevelMax': 5,
'gagTrack': 'random',
'rewardPerGrab': 3,
'rewardPerGrabMax': 0},
100035: {'type': 'gagBarrel',
'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(16.7354, -42.9601, 0.03),
'hpr': Vec3(151.049, 0, 0),
'scale': Vec3(1, 1, 1),
'gagLevel': 5,
'gagLevelMax': 5,
'gagTrack': 'random',
'rewardPerGrab': 3,
'rewardPerGrabMax': 0},
100013: {'type': 'healBarrel',
'name': '<unnamed>',
'comment': '',
'parentEntId': 100012,
'pos': Point3(0, 0, 0.591849),
'hpr': Vec3(147.995, 0, 0),
'scale': Vec3(1, 1, 1),
'rewardPerGrab': 15,
'rewardPerGrabMax': 0},
100016: {'type': 'model',
'name': '<unnamed>',
'comment': '',
'parentEntId': 100015,
'pos': Point3(1.77609, -41.5342, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1.5, 1.5, 1.5),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_deskA'},
100018: {'type': 'model',
'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 100015,
'pos': Point3(27.5451, -41.4709, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1.5, 1.5, 1.5),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_deskA'},
100019: {'type': 'model',
'name': 'copy of <unnamed> (2)',
'comment': '',
'parentEntId': 100015,
'pos': Point3(36.4846, -38.3301, 0),
'hpr': Vec3(270.526, 0, 0),
'scale': Vec3(1.5, 1.5, 1.5),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_deskA'},
100020: {'type': 'model',
'name': 'copy of <unnamed> (3)',
'comment': '',
'parentEntId': 100015,
'pos': Point3(10.7887, -37.8558, 0),
'hpr': Vec3(270.526, 0, 0),
'scale': Vec3(1.5, 1.5, 1.5),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_deskA'},
100022: {'type': 'model',
'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 100015,
'pos': Point3(32.4792, -42.2737, 4.71821),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1.6, 1.6, 1.6),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_torch_lampA'},
100023: {'type': 'model',
'name': 'copy of <unnamed> (2)',
'comment': '',
'parentEntId': 100015,
'pos': Point3(5.52344, -42.2737, 4.71821),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1.6, 1.6, 1.6),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_torch_lampA'},
100024: {'type': 'model',
'name': 'copy of <unnamed> (2)',
'comment': '',
'parentEntId': 100015,
'pos': Point3(-39.2286, -39.5741, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_CardBoardBoxX2'},
100025: {'type': 'model',
'name': 'copy of <unnamed> (2)',
'comment': '',
'parentEntId': 100015,
'pos': Point3(-62.8751, -40.0794, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(8, 8, 8),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_pottedplantA'},
100026: {'type': 'model',
'name': 'copy of <unnamed> (3)',
'comment': '',
'parentEntId': 100015,
'pos': Point3(-39.2286, -33.027, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_CardBoardBox'},
100027: {'type': 'model',
'name': 'copy of <unnamed> (4)',
'comment': '',
'parentEntId': 100015,
'pos': Point3(-31.2652, -39.7321, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_CardBoardBox'},
100028: {'type': 'model',
'name': 'copy of <unnamed> (5)',
'comment': '',
'parentEntId': 100015,
'pos': Point3(-22.0578, -39.3922, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_CardBoardBox'},
100029: {'type': 'model',
'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 100015,
'pos': Point3(1.77609, -21.8266, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1.5, 1.5, 1.5),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_deskA'},
100030: {'type': 'model',
'name': 'copy of <unnamed> (4)',
'comment': '',
'parentEntId': 100015,
'pos': Point3(10.7887, -18.0704, 0),
'hpr': Vec3(270.526, 0, 0),
'scale': Vec3(1.5, 1.5, 1.5),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_deskA'},
100031: {'type': 'model',
'name': 'copy of <unnamed> (3)',
'comment': '',
'parentEntId': 100015,
'pos': Point3(36.6874, -16.1212, 0),
'hpr': Vec3(270.526, 0, 0),
'scale': Vec3(1.5, 1.5, 1.5),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_deskA'},
100032: {'type': 'model',
'name': 'copy of <unnamed> (2)',
'comment': '',
'parentEntId': 100015,
'pos': Point3(27.5451, -20.5329, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1.5, 1.5, 1.5),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_deskA'},
100033: {'type': 'model',
'name': 'copy of <unnamed> (3)',
'comment': '',
'parentEntId': 100015,
'pos': Point3(5.52344, -22.4424, 4.71821),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1.6, 1.6, 1.6),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_torch_lampA'},
100034: {'type': 'model',
'name': 'copy of <unnamed> (4)',
'comment': '',
'parentEntId': 100015,
'pos': Point3(32.445, -22.0763, 4.71821),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1.6, 1.6, 1.6),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_torch_lampA'},
100036: {'type': 'model',
'name': 'copy of <unnamed> (5)',
'comment': '',
'parentEntId': 100015,
'pos': Point3(15.0906, 40.376, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_CardBoardBox'},
100037: {'type': 'model',
'name': 'copy of <unnamed> (6)',
'comment': '',
'parentEntId': 100015,
'pos': Point3(23.8805, 40.376, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_CardBoardBox'},
100038: {'type': 'model',
'name': 'copy of <unnamed> (7)',
'comment': '',
'parentEntId': 100015,
'pos': Point3(31.7416, 40.376, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_CardBoardBox'},
100039: {'type': 'model',
'name': 'copy of <unnamed> (8)',
'comment': '',
'parentEntId': 100015,
'pos': Point3(31.7416, 33.2113, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_CardBoardBoxX2'},
100003: {'type': 'nodepath',
'name': 'light target 2',
'comment': '',
'parentEntId': 0,
'pos': Point3(-46.465, -27.1019, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1)},
100004: {'type': 'nodepath',
'name': 'light target1',
'comment': '',
'parentEntId': 0,
'pos': Point3(45.4612, -33.6397, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1)},
100006: {'type': 'nodepath',
'name': 'copy of light target 2',
'comment': '',
'parentEntId': 0,
'pos': Point3(-46.465, 31.2292, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1)},
100007: {'type': 'nodepath',
'name': 'copy of light target1',
'comment': '',
'parentEntId': 0,
'pos': Point3(22.3708, 14.195, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1)},
100008: {'type': 'nodepath',
'name': 'stompergroup',
'comment': '',
'parentEntId': 0,
'pos': Point3(-45.2964, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1)},
100015: {'type': 'nodepath',
'name': 'props',
'comment': '',
'parentEntId': 0,
'pos': Point3(0, 0, 0.05),
'hpr': Vec3(0, 0, 0),
'scale': 1},
100011: {'type': 'platform',
'name': '<unnamed>',
'comment': '',
'parentEntId': 100008,
'pos': Point3(-8.92462, 5.26364, 19.9994),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'floorName': 'platformcollision',
'modelPath': 'phase_9/models/cogHQ/platform1',
'modelScale': Vec3(1, 1, 1),
'motion': 'noBlend',
'offset': Point3(-10, 0, 0),
'period': 8.0,
'phaseShift': 0.0,
'waitPercent': 0.1},
100012: {'type': 'platform',
'name': '<unnamed>',
'comment': '',
'parentEntId': 100008,
'pos': Point3(-18.1468, -5.11, 20),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'floorName': 'platformcollision',
'modelPath': 'phase_9/models/cogHQ/platform1',
'modelScale': Vec3(1, 1, 1),
'motion': 'noBlend',
'offset': Point3(0, 0, 0),
'period': 2,
'phaseShift': 0.0,
'waitPercent': 0.1},
100002: {'type': 'securityCamera',
'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(-5.84843, -50.8043, 0.1),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'accel': 5.0,
'damPow': 10,
'hideModel': 0,
'maxVel': 15.0,
'modelPath': 0,
'projector': Point3(6, 6, 25),
'radius': 10.0,
'switchId': 0,
'trackTarget1': 100004,
'trackTarget2': 100003,
'trackTarget3': 0},
100005: {'type': 'securityCamera',
'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(-5.38565, 34.1311, 0.1),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'accel': 5.0,
'damPow': 8,
'hideModel': 0,
'maxVel': 15.0,
'modelPath': 0,
'projector': Point3(6, 6, 25),
'radius': 10.0,
'switchId': 0,
'trackTarget1': 100006,
'trackTarget2': 100007,
'trackTarget3': 0},
100000: {'type': 'stomper',
'name': '<unnamed>',
'comment': '',
'parentEntId': 100008,
'pos': Point3(5, 5, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'animateShadow': 1,
'cogStyle': 1,
'crushCellId': None,
'damage': 10,
'headScale': Point3(8, 4, 8),
'modelPath': 0,
'motion': 4,
'period': 2.0,
'phaseShift': 0.0,
'range': 18.0,
'removeCamBarrierCollisions': 0,
'removeHeadFloor': 0,
'shaftScale': Point3(0.5, 10, 0.5),
'soundLen': 0,
'soundOn': 1,
'soundPath': 0,
'style': 'vertical',
'switchId': 0,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
100001: {'type': 'stomper',
'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 100008,
'pos': Point3(34.0608, 5, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'animateShadow': 1,
'cogStyle': 1,
'crushCellId': None,
'damage': 10,
'headScale': Point3(8, 4, 8),
'modelPath': 0,
'motion': 4,
'period': 2.0,
'phaseShift': 0.75,
'range': 12.0,
'removeCamBarrierCollisions': 0,
'removeHeadFloor': 0,
'shaftScale': Point3(0.5, 10, 0.5),
'soundLen': 0,
'soundOn': 1,
'soundPath': 0,
'style': 'vertical',
'switchId': 0,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
100009: {'type': 'stomper',
'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 100008,
'pos': Point3(19.6858, 21.6045, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'animateShadow': 1,
'cogStyle': 1,
'crushCellId': None,
'damage': 10,
'headScale': Point3(8, 4, 8),
'modelPath': 0,
'motion': 4,
'period': 2.0,
'phaseShift': 0.5,
'range': 12.0,
'removeCamBarrierCollisions': 0,
'removeHeadFloor': 0,
'shaftScale': Point3(0.5, 10, 0.5),
'soundLen': 0,
'soundOn': 1,
'soundPath': 0,
'style': 'vertical',
'switchId': 0,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
100010: {'type': 'stomper',
'name': 'copy of <unnamed> (2)',
'comment': '',
'parentEntId': 100008,
'pos': Point3(19.6858, -11.5601, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'animateShadow': 1,
'cogStyle': 1,
'crushCellId': None,
'damage': 10,
'headScale': Point3(8, 4, 8),
'modelPath': 0,
'motion': 4,
'period': 2.0,
'phaseShift': 0.25,
'range': 12.0,
'removeCamBarrierCollisions': 0,
'removeHeadFloor': 0,
'shaftScale': Point3(0.5, 10, 0.5),
'soundLen': 0,
'soundOn': 1,
'soundPath': 0,
'style': 'vertical',
'switchId': 0,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0}}
Scenario0 = {}
levelSpec = {'globalEntities': GlobalEntities,
'scenarios': [Scenario0]}
| |
"""
**********
Resistance
**********
Resistance matrix. Renormalized version, as well as conductance and commute matrices.
"""
import networkx as nx
from numpy import linalg as la
from scipy import linalg as spla
import numpy as np
from scipy.sparse import issparse
from netcomp.linalg.matrices import laplacian_matrix
from netcomp.exception import UndefinedException
def resistance_matrix(A,check_connected=True):
"""Return the resistance matrix of G.
Parameters
----------
A : NumPy matrix or SciPy sparse matrix
Adjacency matrix of a graph.
check_connected : Boolean, optional (default=True)
If false, then the resistance matrix will be computed even for
disconnected matrices. See Notes.
Returns
-------
R : NumPy matrix
Matrix of pairwise resistances between nodes.
Notes
-----
Uses formula for resistance matrix R in terms of Moore-Penrose of
pseudoinverse (non-normalized) graph Laplacian. See e.g. Theorem 2.1 in [1].
This formula can be computed even for disconnected graphs, although the
interpretation in this case is unclear. Thus, the usage of
check_connected=False is recommended only to reduce computation time in a
scenario in which the user is confident the graph in question is, in fact,
connected.
Since we do not expect the pseudoinverse of the laplacian to be sparse, we
convert L to dense form before running np.linalg.pinv(). The returned
resistance matrix is dense.
See Also
--------
nx.laplacian_matrix
References
----------
.. [1] W. Ellens, et al. (2011)
Effective graph resistance.
Linear Algebra and its Applications, 435 (2011)
"""
n,m = A.shape
# check if graph is connected
if check_connected:
if issparse(A):
G = nx.from_scipy_sparse_matrix(A)
else:
G = nx.from_numpy_matrix(A)
if not nx.is_connected(G):
raise UndefinedException('Graph is not connected. '
'Resistance matrix is undefined.')
L = laplacian_matrix(A)
try: L = L.todense()
except: pass
M = la.pinv(L)
# calculate R in terms of M
d = np.reshape(np.diag(M),(n,1))
ones = np.ones((n,1))
R = np.dot(d,ones.T) + np.dot(ones,d.T) - M - M.T
return R
def commute_matrix(A):
"""Return the commute matrix of the graph associated with adj. matrix A.
Parameters
----------
A : NumPy matrix or SciPy sparse matrix
Adjacency matrix of a graph.
Returns
-------
C : NumPy matrix
Matrix of pairwise resistances between nodes.
Notes
-----
Uses formula for commute time matrix in terms of resistance matrix,
C = R*2*|E|
where |E| is the number of edges in G. See e.g. Theorem 2.8 in [1].
See Also
--------
laplacian_matrix
resistance_matrix
References
----------
.. [1] W. Ellens, et al. (2011)
Effective graph resistance.
Linear Algebra and its Applications, 435 (2011)
"""
R = resistance_matrix(A)
E = A.sum()/2 # number of edges in graph
C = 2*E*R
return C
def renormalized_res_mat(A,beta=1):
"""Return the renormalized resistance matrix of graph associated with A.
To renormalize a resistance R, we apply the function
R' = R / (R + beta)
In this way, the renormalized resistance of nodes in disconnected components
is 1. The parameter beta determines the penalty for disconnection. If we set
beta to be approximately the maximum resistance found in the network, then
the penalty for disconnection is at least 1/2.
Parameters
----------
A : NumPy matrix or SciPy sparse matrix
Adjacency matrix of a graph.
nodelist : list, optional
The rows and columns are ordered according to the nodes in nodelist. If
nodelist is None, then the ordering is produced by G.nodes().
weight : string or None, optional (default='weight')
The edge data key used to compute each value in the matrix.
If None, then each edge has weight 1.
beta : float, optional
Scaling parameter in renormalization. Must be greater than or equal to
1. Determines how heavily disconnection is penalized.
Returns
-------
R : NumPy array
Matrix of pairwise renormalized resistances between nodes.
Notes
-----
This function converts to a NetworkX graph, as it uses the algorithms
therein for identifying connected components.
See Also
--------
resistance_matrix
"""
if issparse(A):
G = nx.from_scipy_sparse_matrix(A)
else:
G = nx.from_numpy_matrix(A)
n = len(G)
subgraphR = []
for subgraph in nx.connected_component_subgraphs(G):
a_sub = nx.adjacency_matrix(subgraph)
r_sub = resistance_matrix(a_sub)
subgraphR.append(r_sub)
R = spla.block_diag(*subgraphR)
# now, resort R so that it matches the original node list
component_order = []
for component in nx.connected_components(G):
component_order += list(component)
component_order = list(np.argsort(component_order))
R = R[component_order,:]
R = R[:,component_order]
renorm = np.vectorize(lambda r: r/(r+beta))
R = renorm(R)
# set resistance for different components to 1
R[R==0]=1
R = R - np.eye(n) # don't want diagonal to be 1
return R
def conductance_matrix(A):
"""Return the conductance matrix of G.
The conductance matrix of G is the element-wise inverse of the resistance
matrix. The diagonal is set to 0, although it is formally infinite. Nodes in
disconnected components have 0 conductance.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list, optional
The rows and columns are ordered according to the nodes in nodelist. If
nodelist is None, then the ordering is produced by G.nodes().
weight : string or None, optional (default='weight')
The edge data key used to compute each value in the matrix.
If None, then each edge has weight 1.
Returns
-------
C : NumPy array
Matrix of pairwise conductances between nodes.
See Also
--------
resistance_matrix
renormalized_res_mat
"""
if issparse(A):
G = nx.from_scipy_sparse_matrix(A)
else:
G = nx.from_numpy_matrix(A)
subgraphC = []
for subgraph in nx.connected_component_subgraphs(G):
a_sub = nx.adjacency_matrix(subgraph)
r_sub = resistance_matrix(a_sub)
m = len(subgraph)
# add one to diagonal, invert, remove one from diagonal:
c_sub = 1/(r_sub + np.eye(m)) - np.eye(m)
subgraphC.append(c_sub)
C = spla.block_diag(*subgraphC)
# resort C so that it matches the original node list
component_order = []
for component in nx.connected_components(G):
component_order += list(component)
component_order = list(np.argsort(component_order))
C = C[component_order,:]
C = C[:,component_order]
return C
| |
# Copyright 2013 dotCloud inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import struct
import sys
import requests
import requests.exceptions
import six
import websocket
from . import api
from . import constants
from . import errors
from .auth import auth
from .unixconn import unixconn
from .ssladapter import ssladapter
from .utils import utils, check_resource
from .tls import TLSConfig
class Client(
requests.Session,
api.BuildApiMixin,
api.ContainerApiMixin,
api.DaemonApiMixin,
api.ExecApiMixin,
api.ImageApiMixin,
api.VolumeApiMixin,
api.NetworkApiMixin):
def __init__(self, base_url=None, version=None,
timeout=constants.DEFAULT_TIMEOUT_SECONDS, tls=False):
super(Client, self).__init__()
if tls and not base_url.startswith('https://'):
raise errors.TLSParameterError(
'If using TLS, the base_url argument must begin with '
'"https://".')
self.base_url = base_url
self.timeout = timeout
self._auth_configs = auth.load_config()
base_url = utils.parse_host(base_url, sys.platform)
if base_url.startswith('http+unix://'):
self._custom_adapter = unixconn.UnixAdapter(base_url, timeout)
self.mount('http+docker://', self._custom_adapter)
self.base_url = 'http+docker://localunixsocket'
else:
# Use SSLAdapter for the ability to specify SSL version
if isinstance(tls, TLSConfig):
tls.configure_client(self)
elif tls:
self._custom_adapter = ssladapter.SSLAdapter()
self.mount('https://', self._custom_adapter)
self.base_url = base_url
# version detection needs to be after unix adapter mounting
if version is None:
self._version = constants.DEFAULT_DOCKER_API_VERSION
elif isinstance(version, six.string_types):
if version.lower() == 'auto':
self._version = self._retrieve_server_version()
else:
self._version = version
else:
raise errors.DockerException(
'Version parameter must be a string or None. Found {0}'.format(
type(version).__name__
)
)
def _retrieve_server_version(self):
try:
return self.version(api_version=False)["ApiVersion"]
except KeyError:
raise errors.DockerException(
'Invalid response from docker daemon: key "ApiVersion"'
' is missing.'
)
except Exception as e:
raise errors.DockerException(
'Error while fetching server API version: {0}'.format(e)
)
def _set_request_timeout(self, kwargs):
"""Prepare the kwargs for an HTTP request by inserting the timeout
parameter, if not already present."""
kwargs.setdefault('timeout', self.timeout)
return kwargs
def _post(self, url, **kwargs):
return self.post(url, **self._set_request_timeout(kwargs))
def _get(self, url, **kwargs):
return self.get(url, **self._set_request_timeout(kwargs))
def _put(self, url, **kwargs):
return self.put(url, **self._set_request_timeout(kwargs))
def _delete(self, url, **kwargs):
return self.delete(url, **self._set_request_timeout(kwargs))
def _url(self, pathfmt, *args, **kwargs):
for arg in args:
if not isinstance(arg, six.string_types):
raise ValueError(
'Expected a string but found {0} ({1}) '
'instead'.format(arg, type(arg))
)
args = map(six.moves.urllib.parse.quote_plus, args)
if kwargs.get('versioned_api', True):
return '{0}/v{1}{2}'.format(
self.base_url, self._version, pathfmt.format(*args)
)
else:
return '{0}{1}'.format(self.base_url, pathfmt.format(*args))
def _raise_for_status(self, response, explanation=None):
"""Raises stored :class:`APIError`, if one occurred."""
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
raise errors.NotFound(e, response, explanation=explanation)
raise errors.APIError(e, response, explanation=explanation)
def _result(self, response, json=False, binary=False):
assert not (json and binary)
self._raise_for_status(response)
if json:
return response.json()
if binary:
return response.content
return response.text
def _post_json(self, url, data, **kwargs):
# Go <1.1 can't unserialize null to a string
# so we do this disgusting thing here.
data2 = {}
if data is not None:
for k, v in six.iteritems(data):
if v is not None:
data2[k] = v
if 'headers' not in kwargs:
kwargs['headers'] = {}
kwargs['headers']['Content-Type'] = 'application/json'
return self._post(url, data=json.dumps(data2), **kwargs)
def _attach_params(self, override=None):
return override or {
'stdout': 1,
'stderr': 1,
'stream': 1
}
@check_resource
def _attach_websocket(self, container, params=None):
url = self._url("/containers/{0}/attach/ws", container)
req = requests.Request("POST", url, params=self._attach_params(params))
full_url = req.prepare().url
full_url = full_url.replace("http://", "ws://", 1)
full_url = full_url.replace("https://", "wss://", 1)
return self._create_websocket_connection(full_url)
def _create_websocket_connection(self, url):
return websocket.create_connection(url)
def _get_raw_response_socket(self, response):
self._raise_for_status(response)
if six.PY3:
sock = response.raw._fp.fp.raw
if self.base_url.startswith("https://"):
sock = sock._sock
else:
sock = response.raw._fp.fp._sock
try:
# Keep a reference to the response to stop it being garbage
# collected. If the response is garbage collected, it will
# close TLS sockets.
sock._response = response
except AttributeError:
# UNIX sockets can't have attributes set on them, but that's
# fine because we won't be doing TLS over them
pass
return sock
def _stream_helper(self, response, decode=False):
"""Generator for data coming from a chunked-encoded HTTP response."""
if response.raw._fp.chunked:
reader = response.raw
while not reader.closed:
# this read call will block until we get a chunk
data = reader.read(1)
if not data:
break
if reader._fp.chunk_left:
data += reader.read(reader._fp.chunk_left)
if decode:
if six.PY3:
data = data.decode('utf-8')
data = json.loads(data)
yield data
else:
# Response isn't chunked, meaning we probably
# encountered an error immediately
yield self._result(response)
def _multiplexed_buffer_helper(self, response):
"""A generator of multiplexed data blocks read from a buffered
response."""
buf = self._result(response, binary=True)
walker = 0
while True:
if len(buf[walker:]) < 8:
break
_, length = struct.unpack_from('>BxxxL', buf[walker:])
start = walker + constants.STREAM_HEADER_SIZE_BYTES
end = start + length
walker = end
yield buf[start:end]
def _multiplexed_response_stream_helper(self, response):
"""A generator of multiplexed data blocks coming from a response
stream."""
# Disable timeout on the underlying socket to prevent
# Read timed out(s) for long running processes
socket = self._get_raw_response_socket(response)
self._disable_socket_timeout(socket)
while True:
header = response.raw.read(constants.STREAM_HEADER_SIZE_BYTES)
if not header:
break
_, length = struct.unpack('>BxxxL', header)
if not length:
continue
data = response.raw.read(length)
if not data:
break
yield data
def _stream_raw_result_old(self, response):
''' Stream raw output for API versions below 1.6 '''
self._raise_for_status(response)
for line in response.iter_lines(chunk_size=1,
decode_unicode=True):
# filter out keep-alive new lines
if line:
yield line
def _stream_raw_result(self, response):
''' Stream result for TTY-enabled container above API 1.6 '''
self._raise_for_status(response)
for out in response.iter_content(chunk_size=1, decode_unicode=True):
yield out
def _disable_socket_timeout(self, socket):
""" Depending on the combination of python version and whether we're
connecting over http or https, we might need to access _sock, which
may or may not exist; or we may need to just settimeout on socket
itself, which also may or may not have settimeout on it.
To avoid missing the correct one, we try both.
"""
if hasattr(socket, "settimeout"):
socket.settimeout(None)
if hasattr(socket, "_sock") and hasattr(socket._sock, "settimeout"):
socket._sock.settimeout(None)
def _get_result(self, container, stream, res):
cont = self.inspect_container(container)
return self._get_result_tty(stream, res, cont['Config']['Tty'])
def _get_result_tty(self, stream, res, is_tty):
# Stream multi-plexing was only introduced in API v1.6. Anything
# before that needs old-style streaming.
if utils.compare_version('1.6', self._version) < 0:
return self._stream_raw_result_old(res)
# We should also use raw streaming (without keep-alives)
# if we're dealing with a tty-enabled container.
if is_tty:
return self._stream_raw_result(res) if stream else \
self._result(res, binary=True)
self._raise_for_status(res)
sep = six.binary_type()
if stream:
return self._multiplexed_response_stream_helper(res)
else:
return sep.join(
[x for x in self._multiplexed_buffer_helper(res)]
)
def get_adapter(self, url):
try:
return super(Client, self).get_adapter(url)
except requests.exceptions.InvalidSchema as e:
if self._custom_adapter:
return self._custom_adapter
else:
raise e
@property
def api_version(self):
return self._version
class AutoVersionClient(Client):
def __init__(self, *args, **kwargs):
if 'version' in kwargs and kwargs['version']:
raise errors.DockerException(
'Can not specify version for AutoVersionClient'
)
kwargs['version'] = 'auto'
super(AutoVersionClient, self).__init__(*args, **kwargs)
| |
# Twitter News Feed Plugin
from twisted.internet import task
from bot.pluginDespatch import Plugin
import re
import os
import twitter
import json
import HTMLParser
from datetime import timedelta, datetime
from dateutil import parser
import pytz
from models import TwitterStatuses, ReportedTweets, TwitterFollows
import logging
from logos.settings import LOGGING
from logos import utils
from logos.roomlib import get_global_option, set_global_option, \
get_room_option, set_room_option
from bot.logos_decorators import irc_network_permission_required, \
irc_room_permission_required
logger = logging.getLogger(__name__)
logging.config.dictConfig(LOGGING)
# If using this file as a starting point for your plugin
# then remember to change the class name 'MyBotPlugin' to
# something more meaningful.
class TwitterPlugin(Plugin):
plugin = ('twitter', 'Twitter News Feed')
def __init__(self, *args, **kwargs):
super(TwitterPlugin, self).__init__(*args, **kwargs)
self.commands = (\
(r'list\s+follows$',
self.list_follows, 'list all follows for room'),
(r'list\s+follows\s+(?P<room>#\S+)$',
self.list_follows, 'list all follows for room'),
(r'add\s+follow\s+(?P<room>#\S+)\s+(?P<follow>@\S+)',
self.add_follow,
'Add screen name to follow'),
(r'remove\s+follow\s+(?P<room>#\S+)\s+(?P<follow>@\S+)',
self.remove_follow,
'Remove screen name to follow'),
(r'set\s+(?P<room>#\S+)\s+twitter\s+display\s+limit\s+(?P<count>\d+)',
self.set_room_limit,
'Set number of tweets to display each time in room'),
(r'reset tweets (?P<room>#\S+)', self.reset,
'Reset reported tweets'),
(r'set check time (\d+)', self.set_check_time,
'set the twitter check time'),
(r'pull tweets',self.pull_tweet, "Pull some tweets without waiting for timer"),
)
self.timer = task.LoopingCall(self.on_timer)
self.h = HTMLParser.HTMLParser()
def on_activate(self):
""" When this plugin is activated for the network """
check_time = get_global_option("twitter-check-time")
if check_time:
check_time = int(check_time)
else:
check_time = 30
logger.info("Twitter check timer is every {} seconds".format(check_time))
self.timer.start(check_time, now=False)
return (True, None)
def on_deactivate(self):
""" When this plugin is deactivated for the network """
self.timer.stop()
logger.info("Twitter check timer stopped")
def privmsg(self, user, channel, message):
pass
@irc_network_permission_required('bot_admin')
def set_check_time(self, regex, chan, nick, **kwargs):
check_time = regex.group(1)
set_global_option("twitter-check-time", check_time)
self.timer.stop()
self.timer.start(int(check_time), now=False)
self.say(chan,"Twitter check time successfully set")
@irc_room_permission_required('twitter_op')
def set_room_limit(self, regex, chan, nick, **kwargs):
room = regex.group('room')
limit = regex.group('count')
set_room_option(self.network, room, "twitter-post-limit", limit)
self.say(chan,"Twitter post limit set successfully set")
@irc_room_permission_required('twitter_op')
def list_follows(self, regex, chan, nick, **kwargs):
try:
room = regex.group('room')
except IndexError:
room = chan
self.say(chan, "List of twitter follows for room "+room)
follows = TwitterFollows.objects.filter(network=self.network,
room=room.lower())
for follow in follows:
self.say(chan, follow.screen_name)
self.say(chan, "== End of List==")
@irc_room_permission_required('twitter_op')
def add_follow(self, regex, chan, nick, **kwargs):
room = regex.group('room')
follow = regex.group('follow')
if TwitterFollows.objects.filter(network=self.network,
room=room.lower(),
screen_name__iexact=follow).exists():
self.say(chan,"That follow already exists for this room")
else:
twit_follow = TwitterFollows()
twit_follow.network = self.network
twit_follow.room = room.lower()
twit_follow.screen_name = follow
twit_follow.save()
self.say(chan,"Twitter follow added successfully")
@irc_room_permission_required('twitter_op')
def remove_follow(self, regex, chan, nick, **kwargs):
room = regex.group('room')
follow = regex.group('follow')
try:
twitterer = TwitterFollows.objects.get(network=self.network,
room=room.lower(),
screen_name__iexact=follow)
except TwitterFollows.DoesNotExist:
self.say(chan,"That follow doesn't exist for this room")
else:
twitterer.delete()
self.say(chan,"Twitter follow removed successfully")
@irc_room_permission_required('twitter_op')
def reset(self, regex, chan, nick, **kwargs):
room = regex.group('room')
ReportedTweets.objects.filter(network=self.network,
room=room).delete()
self.say(chan,"Tweets for room {} now reset".format(room))
def pull_tweet(self, regex, chan, nick, **kwargs):
self.say(chan, "Manually requesting tweet")
self._process_tweets(channel=chan)
def on_timer(self):
dt = datetime.now(pytz.utc)
logger.debug("on_timer {}".format(str(dt)))
self._process_tweets()
def _process_tweets(self, channel=None):
responses = []
now = datetime.now(pytz.utc)
n_days_ago = now - timedelta(days=365)
statuses = TwitterStatuses.objects.\
filter(created_at__gt = n_days_ago).\
order_by('created_at')
if channel:
rooms = [ channel ]
else:
rooms = self.get_rooms()
for room in rooms:
if not self.is_plugin_enabled(room): continue
limit = get_room_option(self.network, room, "twitter-post-limit")
if limit:
limit = int(limit)
else:
limit = 2
count = 0
for status in statuses:
if status.reportedtweets_set.\
filter(network=self.network.lower(),
room=room.lower()).exists():
continue
if not TwitterFollows.objects.filter(network=self.network.lower(),\
room=room.lower(),\
screen_name__iexact = "@"+status.screen_name).exists():
continue
created_at = status.created_at.strftime("%d-%b-%Y %H:%M")
chan_text = "@{} {} -- {}".format(status.screen_name,
created_at,
status.text.encode("ascii", "replace_spc"))
responses.append((room, chan_text))
reported_twt = ReportedTweets()
reported_twt.tweet = status
reported_twt.network = self.network.lower()
reported_twt.room = room.lower()
reported_twt.save()
count += 1
if count >= limit: break
for room, msg in responses:
if not self.is_plugin_enabled(room): continue
msg = re.sub(r"\n|\r", "", msg)
msg = self.h.unescape(msg)
self.say(room, msg)
if not responses and channel: # manual pull
self.say(channel, "=== No Tweets Found ===")
| |
import os
# ------------------------------------------------------------------------------
#
def get_experiment_frames(experiments, datadir=None):
"""
read profiles for all sessions in the given 'experiments' dict. That dict
is expected to be like this:
{ 'test 1' : [ [ 'rp.session.thinkie.merzky.016609.0007', 'stampede popen sleep 1/1/1/1 (?)'] ],
'test 2' : [ [ 'rp.session.ip-10-184-31-85.merzky.016610.0112', 'stampede shell sleep 16/8/8/4' ] ],
'test 3' : [ [ 'rp.session.ip-10-184-31-85.merzky.016611.0013', 'stampede shell mdrun 16/8/8/4' ] ],
'test 4' : [ [ 'rp.session.titan-ext4.marksant1.016607.0005', 'titan shell sleep 1/1/1/1 a' ] ],
'test 5' : [ [ 'rp.session.titan-ext4.marksant1.016607.0006', 'titan shell sleep 1/1/1/1 b' ] ],
'test 6' : [ [ 'rp.session.ip-10-184-31-85.merzky.016611.0013', 'stampede - isolated', ],
[ 'rp.session.ip-10-184-31-85.merzky.016612.0012', 'stampede - integrated', ],
[ 'rp.session.titan-ext4.marksant1.016607.0006', 'blue waters - integrated' ] ]
} name in
ie. iname in t is a list of experiment names, and each label has a list of
session/label pairs, where the label will be later used to label (duh) plots.
we return a similar dict where the session IDs are data frames
"""
import pandas as pd
exp_frames = dict()
if not datadir:
datadir = os.getcwd()
print 'reading profiles in %s' % datadir
for exp in experiments:
print " - %s" % exp
exp_frames[exp] = list()
for sid, label in experiments[exp]:
print " - %s" % sid
import glob
for prof in glob.glob ("%s/%s-pilot.*.prof" % (datadir, sid)):
print " - %s" % prof
frame = get_profile_frame (prof)
exp_frames[exp].append ([frame, label])
return exp_frames
# ------------------------------------------------------------------------------
#
def get_profile_frame (prof):
import pandas as pd
return pd.read_csv(prof)
# ------------------------------------------------------------------------------
#
tmp = None
def add_concurrency (frame, tgt, spec):
"""
add a column 'tgt' which is a cumulative sum of conditionals of enother row.
The purpose is the following: if a unit enters a component, the tgt row counter is
increased by 1, if the unit leaves the component, the counter is decreases by 1.
For any time, the resulting row contains the number of units which is in the
component. Or state. Or whatever.
The arguments are:
'tgt' : name of the new column
'spec' : a set of filters to determine if a unit enters or leaves
'spec' is expected to be a dict of the following format:
spec = { 'in' : [{'col1' : 'pat1',
'col2' : 'pat2'},
...],
'out' : [{'col3' : 'pat3',
'col4' : 'pat4'},
...]
}
where:
'in' : filter set to determine the unit entering
'out' : filter set to determine the unit leaving
'col' : name of column for which filter is defined
'event' : event which correlates to entering/leaving
'msg' : qualifier on the event, if event is not unique
Example:
spec = {'in' : [{'state' :'Executing'}],
'out' : [{'state' :'Done'},
{'state' :'Failed'},
{'state' :'Cancelled'}]
}
get_concurrency (df, 'concurrently_running', spec)
"""
import numpy
# create a temporary row over which we can do the commulative sum
# --------------------------------------------------------------------------
def _conc (row, spec):
# row must match any filter dict in 'spec[in/out]'
# for any filter dict it must match all col/pat pairs
# for each in filter
for f in spec['in']:
match = 1
# for each col/val in that filter
for col, pat in f.iteritems():
if row[col] != pat:
match = 0
break
if match:
# one filter matched!
# print " + : %-20s : %.2f : %-20s : %s " % (row['uid'], row['time'], row['event'], row['message'])
return 1
# for each out filter
for f in spec['out']:
match = 1
# for each col/val in that filter
for col, pat in f.iteritems():
if row[col] != pat:
match = 0
break
if match:
# one filter matched!
# print " - : %-20s : %.2f : %-20s : %s " % (row['uid'], row['time'], row['event'], row['message'])
return -1
# no filter matched
# print " : %-20s : %.2f : %-20s : %s " % (row['uid'], row['time'], row['event'], row['message'])
return 0
# --------------------------------------------------------------------------
# we only want to later look at changes of the concurrency -- leading or trailing
# idle times are to be ignored. We thus set repeating values of the cumsum to NaN,
# so that they can be filtered out when ploting: df.dropna().plot(...).
# That specifically will limit the plotted time range to the area of activity.
# The full time range can still be plotted when ommitting the dropna() call.
# --------------------------------------------------------------------------
def _time (x):
global tmp
if x != tmp: tmp = x
else : x = numpy.NaN
return x
# --------------------------------------------------------------------------
# sanitize concurrency: negative values indicate incorrect event ordering,
# so we set the repesctive values to 0
# --------------------------------------------------------------------------
def _abs (x):
if x < 0:
return numpy.NaN
return x
# --------------------------------------------------------------------------
frame[tgt] = frame.apply(lambda row: _conc(row, spec), axis=1).cumsum()
frame[tgt] = frame.apply(lambda row: _abs (row[tgt]), axis=1)
frame[tgt] = frame.apply(lambda row: _time(row[tgt]), axis=1)
# print frame[[tgt, 'time']]
# ------------------------------------------------------------------------------
#
t0 = None
def calibrate_frame(frame, spec):
"""
move the time axis of a profiling frame so that t_0 is at the first event
matching the given 'spec'. 'spec' has the same format as described in
'add_concurrency' (list of dicts with col:pat filters)
"""
# --------------------------------------------------------------------------
def _find_t0 (row, spec):
# row must match any filter dict in 'spec[in/out]'
# for any filter dict it must match all col/pat pairs
global t0
if t0 is not None:
# already found t0
return
# for each col/val in that filter
for f in spec:
match = 1
for col, pat in f.iteritems():
if row[col] != pat:
match = 0
break
if match:
# one filter matched!
t0 = row['time']
return
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
def _calibrate (row, t0):
if t0 is None:
# no t0...
return
return row['time'] - t0
# --------------------------------------------------------------------------
# we need to iterate twice over the frame: first to find t0, then to
# calibrate the time axis
global t0
t0 = None # no t0
frame.apply(lambda row: _find_t0 (row, spec), axis=1)
if t0 == None:
print "Can't recalibrate, no matching timestamp found"
return
frame['time'] = frame.apply(lambda row: _calibrate(row, t0 ), axis=1)
# ------------------------------------------------------------------------------
#
def create_plot():
"""
create a plot object and tune its layout to our liking.
"""
import matplotlib.pyplot as plt
fig, plot = plt.subplots(figsize=(12,6))
plot.xaxis.set_tick_params(width=1, length=7)
plot.yaxis.set_tick_params(width=1, length=7)
plot.spines['right' ].set_position(('outward', 10))
plot.spines['top' ].set_position(('outward', 10))
plot.spines['bottom'].set_position(('outward', 10))
plot.spines['left' ].set_position(('outward', 10))
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
fig.tight_layout()
return fig, plot
# ------------------------------------------------------------------------------
#
def frame_plot (frames, axis, title=None, logx=False, logy=False,
legend=True, figdir=None):
"""
plot the given axis from the give data frame. We create a plot, and plot
all frames given in the list. The list is expected to contain [frame,label]
pairs
frames: list of tuples of dataframes and labels
frames = [[stampede_df_1, 'stampede - popen'],
[stampede_df_2, 'stampede - shell'],
[stampede_df_3, 'stampede - ORTE' ]]
axis: tuple of data frame column index and axis label
axis = ['time', 'time (s)']
"""
# create figure and layout
fig, plot = create_plot()
# set plot title
if title:
plot.set_title(title, y=1.05, fontsize=18)
# plot the data frames
# NOTE: we need to set labels separately, because of
# https://github.com/pydata/pandas/issues/9542
labels = list()
for frame, label in frames:
try:
frame.dropna().plot(ax=plot, logx=logx, logy=logy,
x=axis[0][0], y=axis[1][0],
drawstyle='steps',
label=label, legend=False)
except Exception as e:
print "skipping frame '%s': '%s'" % (label, e)
if legend:
plot.legend(labels=labels, loc='upper right', fontsize=14, frameon=True)
# set axis labels
plot.set_xlabel(axis[0][1], fontsize=14)
plot.set_ylabel(axis[1][1], fontsize=14)
plot.set_frame_on(True)
# save as png and pdf. Use the title as base for names
if title: base = title
else : base = "%s_%s" % (axis[0][1], axis[1][1])
# clean up base name -- only keep alphanum and such
import re
base = re.sub('[^a-zA-Z0-9\.\-]', '_', base)
base = re.sub('_+', '_', base)
if not figdir:
figdir = os.getcwd()
print 'saving %s/%s.png' % (figdir, base)
fig.savefig('%s/%s.png' % (figdir, base), bbox_inches='tight')
print 'saving %s/%s.pdf' % (figdir, base)
fig.savefig('%s/%s.pdf' % (figdir, base), bbox_inches='tight')
return fig, plot
# ------------------------------------------------------------------------------
#
def create_analytical_frame (idx, kind, args, limits, step):
"""
create an artificial data frame, ie. a data frame which does not contain
data gathered from an experiment, but data representing an analytical
construct of some 'kind'.
idx: data frame column index to fill (a time column is always created)
kind: construct to use (only 'rate' is supporte right now)
args: construct specific parameters
limits: time range for which data are to be created
step: time steps for which data are to be created
"""
import pandas as pd
# --------------------------------------------------------------------------
def _frange(start, stop, step):
while start <= stop:
yield start
start += step
# --------------------------------------------------------------------------
if kind == 'rate' :
t_0 = args.get ('t_0', 0.0)
rate = args.get ('rate', 1.0)
data = list()
for t in _frange(limits[0], limits[1], step):
data.append ({'time': t+t_0, idx: t*rate})
return pd.DataFrame (data)
else:
raise ValueError ("No such frame kind '%s'" % kind)
# ------------------------------------------------------------------------------
| |
'''
@author: Volodymyr Vladymyrov smartkiwi@gmail.com
Time Intervals calculation library:
contains methods for merging overlapping intervals and calculating their duration
intervals are defined as a list of list:
[[10,100],[110,150]]
or
[
[datetime.datetime(2010, 6, 15, 14, 21, 1, 190000), datetime.datetime(2010, 6, 15, 14, 21, 10, 190000)],
[datetime.datetime(2010, 6, 15, 14, 21, 5, 190000), datetime.datetime(2010, 6, 15, 14, 21, 13, 190000)]
]
usage:
1) first step merged_intervals = merge_intervals( [[],[],[]] )
2) calculation duration: duration = calculate_duration_datetime(merged_intervals)
Example:
input = [
[datetime.datetime(2010, 6, 15, 14, 21, 1, 190000), datetime.datetime(2010, 6, 15, 14, 21, 10, 190000)],
[datetime.datetime(2010, 6, 15, 14, 21, 5, 190000), datetime.datetime(2010, 6, 15, 14, 21, 13, 190000)]
]
merged_intervals = merge_intervals( input )
duration = calculate_duration_datetime(merged_intervals)
duration
timedelta(0, 12, 0)
timedelta_to_secods(duration)
12.0
'''
from datetime import timedelta
from interval_calculator.bx.quicksect import IntervalNode
def timedelta_to_secods(d):
"""converts timedelta into seconds
@param d: datetime.timedelta
@return: float - seconds
"""
return d.days * 60 * 60 * 24 + d.seconds + d.microseconds / 1e6
def merge_intervals(intervals,debug=False):
'''returns merged intervals
sorts input intervals and uses merge_sorted_intervals method
@param set of intervals: list of lists with intervals i.e. [(start1,end1),(start2,end2)] . Input intervals should be sorted by increasing start
@return set of merged intervals: list of lists with intervals i.e. [(start1,end1),(start2,end2)]
'''
def get_start(item):
'''
internal method to extract start for every interval
@param item: list with two elements
@return: item[0]
'''
return item[0]
sorted_intervals=sorted(intervals,key=get_start)
return merge_sorted_intervals(sorted_intervals,debug)
def merge_sorted_intervals(intervals,debug=False):
'''returns merged intervals
raises ValueError if input intervals are out of order - i.e. start1 > start2
@param set of intervals: list of lists with intervals i.e. [(start1,end1),(start2,end2)] . Input intervals should be sorted by increasing start
@return set of merged intervals: list of lists with intervals i.e. [(start1,end1),(start2,end2)]
'''
if len(intervals)<2:
"""case 0 - only one interval """
if debug:
print "case 0 - only one input interval - return it as is"
return intervals
out_intervals = []
latest_end = None
previous_interval=None
for interval in intervals:
"""check for intervals to be in order"""
if debug:
print "previous %s , current %s" % (previous_interval,interval)
if previous_interval is not None and interval[0]<previous_interval[0]:
raise ValueError,"input intervals should be in ascending order of the start values"
"""define aliases for better readability"""
i_start = interval[0]
i_end = interval[1]
if len(out_intervals)>0:
last_out_interval = out_intervals[len(out_intervals)-1]
latest_end = last_out_interval[1]
else:
last_out_interval = None
if debug:
if last_out_interval is not None:
print "previous interval: (%s,%s), probing interval (%s,%s)" % (last_out_interval[0],last_out_interval[1],interval[0],interval[1])
else:
print "first interval - probing interval (%s,%s)" % (interval[0],interval[1])
if previous_interval is None or latest_end<i_end and latest_end<i_start:
"""case 2 - one or many non overlapping intervals"""
if debug:
print "\tcase 1: add new out interval (%s,%s)" % (i_start,i_end)
out_intervals.append(interval)
latest_end = i_end
elif latest_end>=i_start and latest_end<=i_end and last_out_interval is not None:
"""case 2 - next interval starts in the middle or right after last 1.end>=2.start"""
if debug:
print "\tcase 2: extending last out interval (%s,%s) with (%s,%s)" % (last_out_interval[0],last_out_interval[1],i_start,i_end)
last_out_interval[1] = i_end
else:
if debug:
print "do nothing"
previous_interval = interval
return out_intervals
def intersect_sorted_intervals(i1,i2,debug=False):
out_interval = [None,None]
if i1[0]>i2[0] and i1[1]<i2[1]:
if debug: print "case 1"
out_interval[0] = i1[0]
out_interval[1] = i1[1]
if i1[0]<i2[0] and i1[1]>i2[1]:
if debug: print "case 2"
out_interval[0] = i2[0]
out_interval[1] = i2[1]
if i1[0]<i2[0] and i1[1]<i2[1] and i2[0]<i1[1]:
if debug: print "case 3"
out_interval[0] = i2[0]
out_interval[1] = i1[1]
if i1[0]>i2[0] and i1[1]>i2[1] and i1[0]<i2[1]:
if debug: print "case 4"
out_interval[0] = i1[0]
out_interval[1] = i2[1]
if i1[0]==i2[0] and i1[1]==i2[1]:
if debug: print "case 5"
out_interval[0] = i1[0]
out_interval[1] = i2[1]
if out_interval[0] is None and out_interval[1] is None:
if debug: print "case >5"
return None
return out_interval
def intersect_intervals(intervals,debug=False):
c = 0
firsti = None
for ivl in intervals:
if c==0:
firsti = ivl
else:
firsti = intersect_sorted_intervals(firsti,ivl)
c+=1
if firsti is None:
return None
return firsti
def has_intersection(i1,i2,debug=False):
if i2[0]>=i1[0] and i2[1]<=i1[1]:
if debug: print "case 1"
return True
if i2[0]<i1[0] and i2[1]<i1[1] and i2[1]>i1[0]:
if debug: print "case 2"
return True
if i1[0]<i2[0] and i2[0]<i1[1] and i2[1]>i1[1]:
if debug: print "case 3"
return True
if i1[0]<=i2[0] and i2[0]>=i1[1]:
if debug: print "case 4"
#return False
if i1[0]>i2[1] and i1[1]>i2[1]:
if debug: print "case 5"
#return False
if i1[0]==i2[0] and i1[1]==i2[1]:
if debug: print "case 6"
return True
if i1[0]>i2[0] and i1[1]<i2[1]:
if debug: print "case 7"
return True
return False
def intersect_one_with_many(interval,intervals):
res = []
for i in intervals:
r = intersect_sorted_intervals(interval, i)
if r is not None:
res.append(r)
return res
def substract_intervals(i1s,i2s,debug=False):
i1sorted = merge_intervals(i1s)
i2sorted = merge_intervals(i2s)
for i2 in i2sorted:
res = []
for i1 in i1sorted:
if has_intersection(i1, i2, debug):
r = substruct_one(i1, i2, debug)
res.extend(r)
else:
res.append(i1)
i1sorted = merge_intervals(res)
if debug: print str(i2)+" res`:"+str(res)
if debug: print "i1:"+str(i1s)
if debug: print "i2:"+str(i2s)
if debug: print "res:"+str(i1sorted)
return merge_intervals(i1sorted, debug)
def overlap_find(start, end, tree):
"Returns a list with the overlapping intervals"
out = []
tree.intersect( start, end, lambda x: out.append(x) )
return [ (x.start, x.end) for x in out ]
def substract_intervals_itree(i1s,i2s,debug=False):
i1sorted = merge_intervals(i1s)
i2sorted = merge_intervals(i2s)
start, end = i2sorted[0]
tree = IntervalNode( start, end )
for start, end in i2sorted[1:]:
tree = tree.insert( start, end )
res = []
for i1 in i1sorted:
overlap = overlap_find(i1[0], i1[1] , tree)
overlap = merge_intervals(overlap)
if debug:
print "interval out of i1: %s" % i1
print "overlap of i2 with this i1 interval: %s" % overlap
if len(overlap)>0:
r = substract_many_out_of_one(i1, overlap, debug)
if debug:
print "substruct_out result: i1 %s - overlap %s = %s" % (i1,overlap,r)
res.extend(r)
else:
res.append(i1)
i1sorted = merge_intervals(res)
#if debug: print str(i2)+" res`:"+str(res)
if debug: print "i1:"+str(i1s)
if debug: print "i2:"+str(i2s)
if debug: print "res:"+str(i1sorted)
return merge_intervals(i1sorted, debug)
def substruct_one(i1,i2,debug=False):
re = []
if i2[0]>i1[0] and i2[1]<i1[1]:
if debug: print "case 1"
re.append([i1[0],i2[0]])
re.append([i2[1],i1[1]])
return re
if i2[0]<i1[0] and i2[1]<i1[1] and i2[1]>i1[0]:
if debug: print "case 2"
re.append([i2[1],i1[1]])
return re
if i2[0]<=i1[0] and i2[1]<i1[1] and i2[1]>i1[0]:
if debug: print "case 2`"
re.append([i2[1],i1[1]])
return re
if i1[0]<i2[0] and i2[0]<i1[1] and i2[1]>i1[1]:
if debug: print "case 3"
re.append([i1[0],i2[0]])
return re
if i1[0]<i2[0] and i2[0]<i1[1] and i2[1]>=i1[1]:
if debug: print "case 3`"
re.append([i1[0],i2[0]])
return re
if i1[0]<i2[0] and i2[0]>i1[1]:
if debug: print "case 4"
re.append(i1)
return re
if i1[0]>i2[1] and i1[1]>i2[1]:
if debug: print "case 5"
re.append(i1)
return re
return re
def substract_many_out_of_one(i1, i2s, debug=False):
"""substracts intervals defined in i2s from one interval i1
@precondition every interval out of i2s intersects i1
"""
i1_local = [i1[0], i1[1]]
r = []
for i2 in i2s:
if i2[0]<=i1_local[0] and i2[1]>=i1_local[0] and i2[1]<i1_local[1]:
i1_local[0] = i2[1]
elif i2[0]>i1_local[0] and i2[1]<=i1_local[1]:
r.append(([i1_local[0], i2[0]]))
i1_local[0] = i2[1]
elif i2[0]>i1_local[0] and i2[1]>i1_local[1] and i2[0]<i1_local[1]:
r.append(([i1_local[0], i2[0]]))
i1_local = []
break
elif i2[0]>i1_local[0] and i2[0]>i1_local[1]:
break
elif i1_local[0]==i2[0] and i1_local[1]==i2[1]:
i1_local = []
break
elif i2[0]<i1_local[0] and i2[1]>i1_local[1]:
i1_local = []
break
if len(i1_local)==2 and i1_local[0]!=i1_local[1]:
r.append(i1_local)
return r
def calculate_duration_in_period(intervals):
"""method to calculate duration of merged intervals
please note if intervals are overlapping this function will return incorrect result - use merge_intervals method first
raises ValueError if end time is earlier then start time
@param intervals: list of lists - !nonoverlapping intervals
@return duration
"""
duration = 0
for interval in intervals:
if interval[1] < interval[0]:
raise ValueError, "End of the interval is earlier then start: (%s,%s)" % (interval[0],interval[1])
dur1 = interval[1] - interval[0]
duration=duration+dur1
return duration
def calculate_duration_datetime(intervals):
"""method to calculate duration of merged intervals
please note if intervals are overlapping this function will return incorrect result - use merge_intervals method first
raises ValueError if end time is earlier then start time
@param intervals: list of lists - !nonoverlapping intervals (start and end are datetime)
@return duration in timedelta
"""
duration = timedelta(0)
for interval in intervals:
if interval[1] < interval[0]:
raise ValueError, "End of the interval is earlier then start: (%s,%s)" % (interval[0],interval[1])
dur1 = interval[1] - interval[0]
duration=duration+dur1
return duration
def iterate_over_interval(start,end):
current_day = start
while current_day <= end:
yield current_day
current_day += timedelta(days=1)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Concat Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class ConcatOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testHStack(self):
with self.session(use_gpu=True):
p1 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
p2 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
c = array_ops.concat([p1, p2], 0)
params = {
p1: np.random.rand(4, 4).astype("f"),
p2: np.random.rand(4, 4).astype("f")
}
result = c.eval(feed_dict=params)
self.assertEqual(result.shape, c.get_shape())
self.assertAllEqual(result[:4, :], params[p1])
self.assertAllEqual(result[4:, :], params[p2])
@test_util.run_deprecated_v1
def testVStack(self):
with self.session(use_gpu=True):
p1 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
p2 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
c = array_ops.concat([p1, p2], 1)
params = {
p1: np.random.rand(4, 4).astype("f"),
p2: np.random.rand(4, 4).astype("f")
}
result = c.eval(feed_dict=params)
self.assertEqual(result.shape, c.get_shape())
self.assertAllEqual(result[:, :4], params[p1])
self.assertAllEqual(result[:, 4:], params[p2])
def testInt32GPU(self):
with test_util.use_gpu():
p1 = np.random.rand(2, 3).astype("i")
p2 = np.random.rand(2, 3).astype("i")
x1 = constant_op.constant(p1)
x2 = constant_op.constant(p2)
c = array_ops.concat([x1, x2], 0)
result = self.evaluate(c)
self.assertAllEqual(result[:2, :], p1)
self.assertAllEqual(result[2:, :], p2)
def testRefType(self):
with test_util.use_gpu():
p1 = np.random.rand(4, 4).astype("f")
p2 = np.random.rand(4, 4).astype("f")
v1 = variables.Variable(p1)
v2 = variables.Variable(p2)
c = array_ops.concat([v1, v2], 0)
self.evaluate(variables.global_variables_initializer())
result = self.evaluate(c)
self.assertEqual(result.shape, c.get_shape())
self.assertAllEqual(result[:4, :], p1)
self.assertAllEqual(result[4:, :], p2)
def _testRandom(self, dtype):
# Random dims of rank 5
shape = np.random.randint(1, 5, size=5)
# Random number of tensors, but always > 1.
num_tensors = np.random.randint(2, 10)
# Random dim to concat on
concat_dim = np.random.randint(5)
params = {}
if dtype == dtypes.bfloat16:
dtype_feed = dtypes.float32
else:
dtype_feed = dtype
with self.session(use_gpu=True):
p = []
for i in np.arange(num_tensors):
input_shape = shape
input_shape[concat_dim] = np.random.randint(1, 5)
placeholder = array_ops.placeholder(dtype_feed, shape=input_shape)
p.append(placeholder)
t = dtype_feed.as_numpy_dtype
params[placeholder] = np.random.rand(*input_shape).astype(t)
if dtype != dtype_feed:
concat_inputs = [math_ops.cast(p_i, dtype) for p_i in p]
else:
concat_inputs = p
c = array_ops.concat(concat_inputs, concat_dim)
if dtype != dtype_feed:
c = math_ops.cast(c, dtype_feed)
result = c.eval(feed_dict=params)
self.assertEqual(result.shape, c.get_shape())
cur_offset = 0
for i in np.arange(num_tensors):
# The index into the result is the ':' along all dimensions
# except the concat_dim. slice(0, size) is used for ':', and
# a list of slices is used to index into result.
ind = [slice(0, params[p[i]].shape[j]) for j in np.arange(5)]
ind[concat_dim] = slice(cur_offset,
cur_offset + params[p[i]].shape[concat_dim])
cur_offset += params[p[i]].shape[concat_dim]
if dtype == dtype_feed:
self.assertAllEqual(result[ind], params[p[i]])
else:
self.assertAllClose(result[ind], params[p[i]], 0.01)
@test_util.run_deprecated_v1
def testRandom(self):
self._testRandom(dtypes.bool)
self._testRandom(dtypes.float32)
self._testRandom(dtypes.int16)
self._testRandom(dtypes.int32)
self._testRandom(dtypes.int64)
self._testRandom(dtypes.bfloat16)
self._testRandom(dtypes.complex64)
self._testRandom(dtypes.complex128)
@test_util.run_deprecated_v1
def testInvalidConcatDimTypeAndShape(self):
a = variables.Variable(constant_op.constant(1.0, shape=[1]))
b = variables.Variable(constant_op.constant(2.0, shape=[1]))
with self.assertRaises(ValueError):
array_ops.concat(b, a)
with self.assertRaises(TypeError):
array_ops.concat(1, 4.2)
with self.assertRaises(ValueError):
array_ops.concat(1, a)
with self.assertRaises(TypeError):
array_ops.concat([a, b], a)
with self.assertRaises(ValueError):
array_ops.concat([a, b], [3])
with self.assertRaises(ValueError):
array_ops.concat([], 0)
# An integer tensor for shape dim should throw no error.
array_ops.concat(1, constant_op.constant(0, shape=[]))
# A non-scalar tensor for shape should throw ValueError.
with self.assertRaises(ValueError):
array_ops.concat(1, constant_op.constant(0, shape=[1]))
def _testGradientsSimple(self, dtype):
# Test both positive and negative concat axis.
# -2 and 1 correspond to the same axis for 3-dimensional tensors.
for axis in [-2, 1]:
with test_util.use_gpu():
inp = []
inp_tensors = []
for x in [1, 2, 6]:
shape = [10, x, 2]
t = np.random.rand(*shape).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
t += -1j * t
inp.append(t)
inp_tensors.append(
constant_op.constant(
t.flatten(),
shape=shape,
dtype=dtype))
c = array_ops.concat(inp_tensors, axis)
output_shape = [10, 9, 2]
grad_inp = np.random.rand(*output_shape).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
grad_inp += -1j * grad_inp
grad_tensor = constant_op.constant(
grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, axis)
result = self.evaluate(concated_grad)
self.assertAllEqual(result, grad_inp)
@test_util.run_deprecated_v1
def testGradientsSimple(self):
self._testGradientsSimple(dtypes.float32)
self._testGradientsSimple(dtypes.complex64)
@test_util.run_deprecated_v1
def testGradientsFirstDim(self):
with test_util.use_gpu():
inp = []
inp_tensors = []
for x in [1, 2, 6]:
shape = [x, 10, 2]
t = np.random.rand(*shape).astype("f")
inp.append(t)
inp_tensors.append(
constant_op.constant(
t.flatten(),
shape=shape,
dtype=dtypes.float32))
c = array_ops.concat(inp_tensors, 0)
output_shape = [9, 10, 2]
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, 0)
result = self.evaluate(concated_grad)
self.assertAllEqual(result, grad_inp)
@test_util.run_deprecated_v1
def testGradientsLastDim(self):
# Test both positive and negative concat axis.
# -1 and 2 correspond to the same axis for 3-dimensional tensors.
for axis in [-1, 2]:
with test_util.use_gpu():
inp = []
inp_tensors = []
for x in [1, 2, 6]:
shape = [10, 2, x]
t = np.random.rand(*shape).astype("f")
inp.append(t)
inp_tensors.append(
constant_op.constant(
t.flatten(),
shape=shape,
dtype=dtypes.float32))
c = array_ops.concat(inp_tensors, 2)
output_shape = [10, 2, 9]
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, axis)
result = self.evaluate(concated_grad)
self.assertAllEqual(result, grad_inp)
def _RunAndVerifyGradientsRandom(self):
# Random dims of rank 5
input_shape = np.random.randint(1, 5, size=5)
# Random number of tensors
num_tensors = np.random.randint(12, 20)
# Random dim to concat on
concat_dim = np.random.randint(5)
concat_dim_sizes = np.random.randint(1, 5, size=num_tensors)
with test_util.use_gpu():
inp = []
inp_tensors = []
for x in concat_dim_sizes:
shape = input_shape
shape[concat_dim] = x
t = np.random.rand(*shape).astype("f")
inp.append(t)
inp_tensors.append(
constant_op.constant(t.flatten(), shape=shape,
dtype=dtypes.float32))
c = array_ops.concat(inp_tensors, concat_dim)
output_shape = input_shape
output_shape[concat_dim] = concat_dim_sizes.sum()
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, concat_dim)
result = self.evaluate(concated_grad)
self.assertAllEqual(result, grad_inp)
@test_util.run_deprecated_v1
def testGradientsRandom(self):
for _ in range(5):
self._RunAndVerifyGradientsRandom()
@test_util.run_deprecated_v1
def testGradientWithUnknownInputDim(self):
with self.session(use_gpu=True):
x = array_ops.placeholder(dtypes.float32)
y = array_ops.placeholder(dtypes.float32)
c = array_ops.concat([x, y], 2)
output_shape = [10, 2, 9]
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], [x, y], [grad_tensor])
concated_grad = array_ops.concat(grad, 2)
params = {
x: np.random.rand(10, 2, 3).astype("f"),
y: np.random.rand(10, 2, 6).astype("f")
}
result = concated_grad.eval(feed_dict=params)
self.assertAllEqual(result, grad_inp)
@test_util.run_deprecated_v1
def testShapeError(self):
# Rank doesn't match.
with self.assertRaises(ValueError):
array_ops.concat(
[constant_op.constant(10.0, shape=[4, 4, 4, 4]),
constant_op.constant(20.0, shape=[4, 4, 4])
], 1)
# Dimensions don't match in a non-concat dim.
with self.assertRaises(ValueError):
array_ops.concat(
[constant_op.constant(10.0, shape=[1, 2, 1]),
constant_op.constant(20.0, shape=[3, 2, 1])
], 1)
# concat_dim out of range.
with self.assertRaises(ValueError):
array_ops.concat(
[constant_op.constant(10.0, shape=[4, 4, 4]),
constant_op.constant(20.0, shape=[4, 4, 4])
], 3)
# concat_dim out of range
with self.assertRaises(ValueError):
array_ops.concat(
[constant_op.constant(10.0, shape=[4, 4, 4]),
constant_op.constant(20.0, shape=[4, 4, 4])
], -4)
@test_util.run_deprecated_v1
def testShapeWithUnknownConcatDim(self):
p1 = array_ops.placeholder(dtypes.float32)
c1 = constant_op.constant(10.0, shape=[4, 4, 4, 4])
p2 = array_ops.placeholder(dtypes.float32)
c2 = constant_op.constant(20.0, shape=[4, 4, 4, 4])
dim = array_ops.placeholder(dtypes.int32)
concat = array_ops.concat([p1, c1, p2, c2], dim)
self.assertEqual(4, concat.get_shape().ndims)
# All dimensions unknown.
concat2 = array_ops.concat([p1, p2], dim)
self.assertEqual(None, concat2.get_shape())
# Rank doesn't match.
c3 = constant_op.constant(30.0, shape=[4, 4, 4])
with self.assertRaises(ValueError):
array_ops.concat([p1, c1, p2, c3], dim)
@test_util.run_deprecated_v1
def testZeroSize(self):
# Verify that concat doesn't crash and burn for zero size inputs
np.random.seed(7)
with test_util.use_gpu():
for shape0 in (), (2,):
axis = len(shape0)
for shape1 in (), (3,):
for n0 in 0, 1, 2:
for n1 in 0, 1, 2:
x0 = np.random.randn(*(shape0 + (n0,) + shape1))
x1 = np.random.randn(*(shape0 + (n1,) + shape1))
correct = np.concatenate([x0, x1], axis=axis)
# TODO(irving): Make tf.concat handle map, then drop list().
xs = list(map(constant_op.constant, [x0, x1]))
c = array_ops.concat(xs, axis)
self.assertAllEqual(self.evaluate(c), correct)
# Check gradients
dc = np.random.randn(*c.get_shape().as_list())
dxs = self.evaluate(gradients_impl.gradients(c, xs, dc))
self.assertAllEqual(dc, np.concatenate(dxs, axis=axis))
@test_util.run_deprecated_v1
def testTensorConcatDim0Grad(self):
x_shapes = [[20, 7, 3], [10, 7, 3], [14, 7, 3]]
output_shape = [44, 7, 3]
x_vals = [
np.random.random_sample(x_shape).astype(np.float64)
for x_shape in x_shapes
]
with self.cached_session():
xs = [constant_op.constant(x_val) for x_val in x_vals]
output = array_ops.concat(xs, 0)
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
output_shape)
self.assertLess(err, 1e-11)
@test_util.run_deprecated_v1
def testTensorConcatDim1Grad(self):
x_shapes = [[20, 7, 3], [20, 3, 3], [20, 1, 3]]
output_shape = [20, 11, 3]
x_vals = [
np.random.random_sample(x_shape).astype(np.float64)
for x_shape in x_shapes
]
with self.cached_session():
xs = [constant_op.constant(x_val) for x_val in x_vals]
output = array_ops.concat(xs, 1)
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
output_shape)
self.assertLess(err, 1e-11)
@test_util.run_deprecated_v1
def testIndexedSlicesConcatDim0Grad(self):
x_shapes = [[20, 7, 3], [10, 7, 3], [14, 7, 3]]
output_shape = [4, 7, 3]
x_vals = [
np.random.random_sample(x_shape).astype(np.float64)
for x_shape in x_shapes
]
with self.cached_session():
xs = [constant_op.constant(x_val) for x_val in x_vals]
x_concat = array_ops.concat(xs, 0)
output = array_ops.gather(x_concat, [1, 2, 0, 5])
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
output_shape)
self.assertLess(err, 1e-11)
@test_util.run_deprecated_v1
def testIndexedSlicesConcatDim1Grad(self):
x_shapes = [[20, 7, 3], [20, 3, 3], [20, 1, 3]]
output_shape = [4, 11, 3]
x_vals = [
np.random.random_sample(x_shape).astype(np.float64)
for x_shape in x_shapes
]
with self.cached_session():
xs = [constant_op.constant(x_val) for x_val in x_vals]
x_concat = array_ops.concat(xs, 1)
output = array_ops.gather(x_concat, [1, 2, 0, 5])
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
output_shape)
self.assertLess(err, 1e-11)
@test_util.run_deprecated_v1
def testIndexedSlicesConcatDim2Grad(self):
x_shapes = [[20, 7, 3], [20, 7, 1], [20, 7, 2]]
output_shape = [4, 7, 6]
x_vals = [
np.random.random_sample(x_shape).astype(np.float64)
for x_shape in x_shapes
]
with self.cached_session():
xs = [constant_op.constant(x_val) for x_val in x_vals]
x_concat = array_ops.concat(xs, 2)
output = array_ops.gather(x_concat, [1, 2, 0, 5])
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
output_shape)
self.assertLess(err, 1e-11)
@test_util.run_deprecated_v1
def testIndexedSlicesConcatDim1Grad_UnknownInputDim(self):
x_shapes = [[20, 7, 3], [20, 3, 3], [20, 1, 3]]
output_shape = [4, 11, 3]
with self.cached_session():
x_1 = array_ops.placeholder(dtypes.float64)
x_2 = array_ops.placeholder(dtypes.float64)
x_3 = array_ops.placeholder(dtypes.float64)
xs = [x_1, x_2, x_3]
x_concat = array_ops.concat(xs, 1)
output = array_ops.gather(x_concat, [1, 2, 0, 5])
params = {
x_1: np.random.random_sample(x_shapes[0]).astype(np.float64),
x_2: np.random.random_sample(x_shapes[1]).astype(np.float64),
x_3: np.random.random_sample(x_shapes[2]).astype(np.float64)
}
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
output_shape,
extra_feed_dict=params)
self.assertLess(err, 1e-11)
def testConcatTuple(self):
c1 = np.random.rand(4, 4)
c2 = np.random.rand(4, 4)
concat_list_t = array_ops.concat([c1, c2], 0)
concat_tuple_t = array_ops.concat((c1, c2), 0)
self.assertAllEqual(
self.evaluate(concat_list_t), self.evaluate(concat_tuple_t))
@test_util.run_deprecated_v1
def testConcatNoScalars(self):
scalar = constant_op.constant(7)
dim = array_ops.placeholder(dtypes.int32)
with self.assertRaisesRegexp(
ValueError, r"Can't concatenate scalars \(use tf\.stack instead\)"):
array_ops.concat([scalar, scalar, scalar], dim)
# important as gpu implementation could fail if
# shared memory is not large for all the inputs
@test_util.run_deprecated_v1
def testConcatLargeNumberOfTensors(self):
with self.session(use_gpu=True):
for concat_dim in range(2):
params = {}
p = []
shape = np.array([7, 13])
if test.is_gpu_available():
num_tensors = 5000
else:
num_tensors = 500
for i in np.arange(num_tensors):
input_shape = shape
placeholder = array_ops.placeholder(dtypes.float32, shape=input_shape)
p.append(placeholder)
params[placeholder] = np.random.rand(*input_shape).astype(np.float32)
concat_inputs = p
c = array_ops.concat(concat_inputs, concat_dim)
result = c.eval(feed_dict=params)
self.assertEqual(result.shape, c.get_shape())
cur_offset = 0
for i in np.arange(num_tensors):
# The index into the result is the ':' along all dimensions
# except the concat_dim. slice(0, size) is used for ':', and
# a list of slices is used to index into result.
index = [slice(0, params[p[i]].shape[j]) for j in np.arange(2)]
index[concat_dim] = slice(cur_offset,
cur_offset + params[p[i]].shape[concat_dim])
cur_offset += params[p[i]].shape[concat_dim]
self.assertAllEqual(result[index], params[p[i]])
def testConcatEmpty(self):
with test_util.use_gpu():
t1 = []
t2 = []
output = gen_array_ops.concat_v2([t1, t2], 0)
self.assertFalse(self.evaluate(output)) # Checks that output is empty
@test_util.run_deprecated_v1
def testConcatInvalidAxis(self):
with self.assertRaises(ValueError):
with test_util.use_gpu():
t1 = [1]
t2 = [2]
gen_array_ops.concat_v2([t1, t2], 1).eval()
def testConcatNegativeAxis(self):
with test_util.use_gpu():
t1 = [[1, 2, 3], [4, 5, 6]]
t2 = [[7, 8, 9], [10, 11, 12]]
c = gen_array_ops.concat_v2([t1, t2], -2)
self.assertEqual([4, 3], c.get_shape().as_list())
output = self.evaluate(c)
self.assertAllEqual([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]],
output)
c = gen_array_ops.concat_v2([t1, t2], -1)
self.assertEqual([2, 6], c.get_shape().as_list())
output = self.evaluate(c)
self.assertAllEqual([[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]], output)
def _testGradientsForAxis(
self, inp_tensors, axis, output_shape, feed_dict=None):
with self.cached_session():
c = array_ops.concat(inp_tensors, axis)
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, axis)
result = concated_grad.eval(feed_dict=feed_dict)
self.assertAllEqual(result, grad_inp)
def _testIndexedSlicesGradientsForAxis(
self, inp_tensors, axis, output_shape, gather_indexes, feed_dict=None):
with self.cached_session():
c = array_ops.gather(
array_ops.concat(inp_tensors, axis), gather_indexes)
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.gather(
array_ops.concat(grad, axis), gather_indexes)
result = concated_grad.eval(feed_dict=feed_dict)
self.assertAllEqual(result, grad_inp)
@test_util.run_deprecated_v1
def testGradientsNegativeAxis(self):
x1 = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]
x2 = [[7.0, 8.0, 9.0], [10.0, 11.0, 12.0]]
inp_tensors = [constant_op.constant(x1, shape=(2, 3), dtype=dtypes.float32),
constant_op.constant(x2, shape=(2, 3), dtype=dtypes.float32)]
# Test concat gradient with axis == -2
self._testGradientsForAxis(inp_tensors, -2, output_shape=[4, 3])
# Test concat gradient with unknown-shape tensors.
x1_placeholder = array_ops.placeholder(dtypes.float32)
x2_placeholder = array_ops.placeholder(dtypes.float32)
inp_tensors_placeholders = [x1_placeholder, x2_placeholder]
feed_dict = {x1_placeholder: x1, x2_placeholder: x2}
self._testGradientsForAxis(
inp_tensors_placeholders, -1, output_shape=[2, 6], feed_dict=feed_dict)
# Test IndexedSlices concat gradient.
self._testIndexedSlicesGradientsForAxis(
inp_tensors, -2, output_shape=[2, 3], gather_indexes=[2, 0])
# We don't support calculating IndexedSlices concat gradient for
# negative indexes when rank is not known.
with self.assertRaises(ValueError):
self._testIndexedSlicesGradientsForAxis(
inp_tensors_placeholders, -2, output_shape=[2, 3],
gather_indexes=[2, 0], feed_dict=feed_dict)
def testConcatAxisType(self):
for dtype in [dtypes.int32, dtypes.int64]:
with test_util.use_gpu():
t1 = [[1, 2, 3], [4, 5, 6]]
t2 = [[7, 8, 9], [10, 11, 12]]
c = gen_array_ops.concat_v2([t1, t2],
constant_op.constant(1, dtype=dtype))
self.assertEqual([2, 6], c.get_shape().as_list())
output = self.evaluate(c)
self.assertAllEqual([[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]], output)
class ConcatOffsetTest(test.TestCase):
def testBasic(self):
with test_util.use_gpu():
cdim = constant_op.constant(1, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 5], dtypes.int32)
s2 = constant_op.constant([2, 20, 5], dtypes.int32)
off = gen_array_ops.concat_offset(cdim, [s0, s1, s2])
ans = self.evaluate(off)
self.assertAllEqual(ans, [[0, 0, 0], [0, 3, 0], [0, 10, 0]])
@test_util.run_deprecated_v1
def testNotVector(self):
cdim = constant_op.constant(1, dtypes.int32)
s0 = constant_op.constant([[2, 3, 5]], dtypes.int32)
s1 = constant_op.constant([[2, 7, 5]], dtypes.int32)
off = gen_array_ops.concat_offset(cdim, [s0, s1])
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"should be a vector"):
self.evaluate(off)
@test_util.run_deprecated_v1
def testConcatDimOutOfRange(self):
cdim = constant_op.constant(4, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 5], dtypes.int32)
off = gen_array_ops.concat_offset(cdim, [s0, s1])
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"Concat dim is out of range: 4 vs. 3"):
self.evaluate(off)
@test_util.run_deprecated_v1
def testDimMismatch(self):
cdim = constant_op.constant(1, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 5, 10], dtypes.int32)
off = gen_array_ops.concat_offset(cdim, [s0, s1])
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"should contain 3 elem"):
self.evaluate(off)
@test_util.run_deprecated_v1
@test_util.disable_xla("b/123337890") # Error messages differ
def testSizeMismatch(self):
cdim = constant_op.constant(1, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 10], dtypes.int32)
off = gen_array_ops.concat_offset(cdim, [s0, s1])
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
r"All dimensions except 1 must match. Input 1 has shape \[2 7 10\] "
r"and doesn't match input 0 with shape \[2 3 5\]."):
self.evaluate(off)
def testNegativeDim(self):
with test_util.use_gpu():
cdim = constant_op.constant(-2, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 5], dtypes.int32)
s2 = constant_op.constant([2, 20, 5], dtypes.int32)
off = gen_array_ops.concat_offset(cdim, [s0, s1, s2])
ans = self.evaluate(off)
self.assertAllEqual(ans, [[0, 0, 0], [0, 3, 0], [0, 10, 0]])
cdim = constant_op.constant(-3, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([1, 3, 5], dtypes.int32)
s2 = constant_op.constant([3, 3, 5], dtypes.int32)
off = gen_array_ops.concat_offset(cdim, [s0, s1, s2])
ans = self.evaluate(off)
self.assertAllEqual(ans, [[0, 0, 0], [2, 0, 0], [3, 0, 0]])
def testCreateMemDecBlockedFormat(self):
"""Try to create the mkl concat operation
when one of the input's memory descriptor is in blocked format
"""
if test_util.IsMklEnabled():
s0 = np.ones((1, 8188, 4092, 1), dtype=np.uint8).astype(np.float32)
s1 = array_ops.strided_slice(
s0, [0, 1, 1, 0], [0, -1, -1, 0], [1, 1, 1, 1],
begin_mask=9,
end_mask=9)
s2 = array_ops.slice(s1, [0, 0, 0, 0], [-1, -1, -1, 1])
s3_1 = array_ops.slice(s2, [0, 4, 4, 0], [-1, 8178, 4082, 1])
s3_2 = array_ops.slice(s2, [0, 4, 4, 0], [-1, 8178, 4082, 1])
filter4_1 = constant_op.constant([[[[1.18, -0.51]]]])
s4_1 = nn_ops.conv2d(
s3_1, filter4_1, strides=[1, 1, 1, 1], padding="VALID")
filter4_2 = constant_op.constant([[[[1.38, -0.11]]]])
s4_2 = nn_ops.conv2d(
s3_2, filter4_2, strides=[1, 1, 1, 1], padding="VALID")
s5_1 = array_ops.slice(s4_1, [0, 6, 6, 0], [-1, 1, 1, -1])
s5_2 = array_ops.slice(s4_2, [0, 6, 6, 0], [-1, 1, 1, -1])
x_concat = array_ops.concat([s5_1, s5_2], 3)
self.evaluate(
x_concat
) # This test is only meant to check the creation is not crashed
if __name__ == "__main__":
test.main()
| |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test fixture for tests involving installing/updating Chrome.
Provides an interface to install or update chrome from within a testcase, and
allows users to run tests using installed version of Chrome. User and system
level installations are supported, and either one can be used for running the
tests. Currently the only platform it supports is Windows.
"""
import atexit
import os
import platform
import stat
import sys
import tempfile
import unittest
import urllib
import chrome_installer_win
_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(os.path.dirname(_DIRECTORY), 'pyautolib'))
sys.path.append(os.path.join(_DIRECTORY, os.path.pardir, os.path.pardir,
os.path.pardir, 'third_party', 'webdriver',
'pylib'))
# This import should go after sys.path is set appropriately.
from selenium import webdriver
import selenium.webdriver.chrome.service as service
from selenium.webdriver.chrome.service import WebDriverException
import pyauto_utils
def MakeTempDir(parent_dir=None):
"""Creates a temporary directory and returns an absolute path to it.
The temporary directory is automatically deleted when the python interpreter
exits normally.
Args:
parent_dir: the directory to create the temp dir in. If None, the system
temp dir is used.
Returns:
The absolute path to the temporary directory.
"""
path = tempfile.mkdtemp(dir=parent_dir)
def DeleteDir():
# Don't use shutil.rmtree because it can't delete read-only files on Win.
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
filename = os.path.join(root, name)
os.chmod(filename, stat.S_IWRITE)
os.remove(filename)
for name in dirs:
os.rmdir(os.path.join(root, name))
# Delete parent directory after its contents have been removed.
os.rmdir(path)
atexit.register(DeleteDir)
return path
class InstallTest(unittest.TestCase):
"""Base updater test class.
All dependencies, like Chrome installers and ChromeDriver, are downloaded at
the beginning of the test. Dependencies are downloaded in the temp directory.
This download occurs only once, before the first test is executed. Each test
case starts an instance of ChromeDriver and terminates it upon completion.
All updater tests should derive from this class.
Example:
class SampleUpdater(InstallTest):
def testCanOpenGoogle(self):
self.Install(self.GetUpdateBuilds()[0])
self.StartChrome()
self._driver.get('http://www.google.com/')
self.Install(self.GetUpdateBuilds()[1])
self.StartChrome()
self._driver.get('http://www.google.org/')
Include the following in your updater test script to make it run standalone.
from install_test import Main
if __name__ == '__main__':
Main()
To fire off an updater test, use the command below.
python test_script.py --url=<URL> --update-builds=24.0.1299.0,24.0.1300.0
"""
_installer_paths = {}
_chrome_driver = ''
_installer_options = []
_install_type = chrome_installer_win.InstallationType.USER
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName)
self._driver = None
current_version = chrome_installer_win.ChromeInstallation.GetCurrent()
if current_version:
current_version.Uninstall()
def setUp(self):
"""Called before each unittest to prepare the test fixture."""
self._StartService()
def tearDown(self):
"""Called at the end of each unittest to do any test related cleanup."""
self._driver.quit()
self._service.stop()
self._installation.Uninstall()
def _StartService(self):
"""Starts ChromeDriver service."""
self._service = service.Service(InstallTest._chrome_driver)
self._service.start()
def StartChrome(self, caps={}):
"""Creates a ChromeDriver instance.
Args:
caps: Capabilities that will be passed to ChromeDriver.
"""
self._driver = webdriver.Remote(self._service.service_url, caps)
def Install(self, build, master_pref=None):
"""Helper method that installs the specified Chrome build.
Args:
build: Chrome version number that will be used for installation.
master_pref: Location of the master preferences file.
"""
if self._driver:
try:
self._driver.quit()
except WebDriverException:
pass
options = []
options.extend(self._installer_options)
if self._install_type == chrome_installer_win.InstallationType.SYSTEM:
options.append('--system-level')
if master_pref:
options.append('--installerdata="%s"' % master_pref)
self._installation = chrome_installer_win.Install(
self._installer_paths[build],
self._install_type,
build,
options)
def GetInstallBuild(self):
"""Returns Chorme build to be used for install test scenarios."""
return self._install_build
def GetUpdateBuilds(self):
"""Returns Chrome builds to be used for update test scenarios."""
return self._update_builds
@staticmethod
def _Download(url, path):
"""Downloads a file from the specified URL.
Args:
url: URL where the file is located.
path: Location where file will be downloaded.
"""
if not pyauto_utils.DoesUrlExist(url):
raise RuntimeError('Either the URL or the file name is invalid.')
urllib.urlretrieve(url, path)
@staticmethod
def SetInstallType(install_type):
"""Sets Chrome installation type.
Args:
install_type: Type of installation(i.e., user or system).
"""
InstallTest._install_type = install_type
@staticmethod
def InitTestFixture(install_build, update_builds, base_url, options):
"""Static method for passing command options to InstallTest.
We do not instantiate InstallTest. Therefore, command arguments cannot be
passed to its constructor. Since InstallTest needs to use these options,
and using globals is not an option, this method can be used by the Main
class to pass the arguments it parses onto InstallTest.
Args:
install_build: A string representing the Chrome build to be used for
install testing. Pass this argument only if testing
fresh install scenarios.
update_builds: A list that contains the Chrome builds to be used for
testing update scenarios. Pass this argument only if
testing upgrade scenarios.
base_url: Base url of the 'official chrome builds' page.
options: A list that contains options to be passed to Chrome installer.
"""
system = ({'Windows': 'win',
'Darwin': 'mac',
'Linux': 'linux'}).get(platform.system())
InstallTest._install_build = install_build
InstallTest._update_builds = update_builds
InstallTest._installer_options = options
tempdir = MakeTempDir()
builds = []
if InstallTest._install_build:
builds.append(InstallTest._install_build)
if InstallTest._update_builds:
builds.extend(InstallTest._update_builds)
# Remove any duplicate build numbers.
builds = list(frozenset(builds))
for build in builds:
url = '%s%s/%s/mini_installer.exe' % (base_url, build, system)
installer_path = os.path.join(tempdir, 'mini_installer_%s.exe' % build)
InstallTest._installer_paths[build] = installer_path
InstallTest._Download(url, installer_path)
InstallTest._chrome_driver = os.path.join(tempdir, 'chromedriver.exe')
url = '%s%s/%s/%s/chromedriver.exe' % (base_url, build, system,
'chrome-win32.test')
InstallTest._Download(url, InstallTest._chrome_driver)
| |
# Copyright (c) 2015 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for huawei 18000 storage."""
import json
import mock
import os
import shutil
import tempfile
import time
from xml.dom import minidom
from oslo_log import log as logging
from cinder import exception
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.huawei import constants
from cinder.volume.drivers.huawei import huawei_driver
from cinder.volume.drivers.huawei import huawei_utils
from cinder.volume.drivers.huawei import rest_client
from cinder.volume.drivers.huawei import smartx
LOG = logging.getLogger(__name__)
test_volume = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635',
'size': 2,
'volume_name': 'vol1',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'host': 'ubuntu001@backend001#OpenStack_Pool',
'provider_location': '11',
}
error_volume = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0637',
'size': 2,
'volume_name': 'vol2',
'id': '21ec7341-9256-497b-97d9-ef48edcf0637',
'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0637',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol2',
'display_description': 'test error_volume',
'volume_type_id': None,
'host': 'ubuntu@huawei#OpenStack_Pool_error',
'provider_location': '12',
}
test_snap = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635',
'size': 1,
'volume_name': 'vol1',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'provider_location': '11',
}
FakeConnector = {'initiator': 'iqn.1993-08.debian:01:ec2bff7ac3a3',
'wwpns': ['10000090fa0d6754'],
'wwnns': ['10000090fa0d6755'],
'host': 'ubuntuc',
}
smarttier_opts = {'smarttier': 'true',
'smartpartition': False,
'smartcache': False,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'policy': '3',
'readcachepolicy': '1',
'writecachepolicy': None,
}
# A fake response of success response storage
FAKE_COMMON_SUCCESS_RESPONSE = """
{
"error": {
"code": 0
},
"data":{}
}
"""
# A fake response of login huawei storage
FAKE_GET_LOGIN_STORAGE_RESPONSE = """
{
"error": {
"code": 0
},
"data": {
"username": "admin",
"iBaseToken": "2001031430",
"deviceid": "210235G7J20000000000"
}
}
"""
# A fake response of login out huawei storage
FAKE_LOGIN_OUT_STORAGE_RESPONSE = """
{
"error": {
"code": 0
},
"data": {
"ID": 11
}
}
"""
# A fake response of mock storage pool info
FAKE_STORAGE_POOL_RESPONSE = """
{
"error": {
"code": 0
},
"data": [{
"USERFREECAPACITY": "985661440",
"ID": "0",
"NAME": "OpenStack_Pool",
"USERTOTALCAPACITY": "985661440"
}]
}
"""
# A fake response of lun or lungroup response
FAKE_LUN_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data": {
"ID": "1",
"NAME": "5mFHcBv4RkCcD+JyrWc0SA"
}
}
"""
FAKE_LUN_DELETE_SUCCESS_RESPONSE = """
{
"error": {
"code": 0
},
"data": {
"ID": "11",
"IOCLASSID": "11",
"NAME": "5mFHcBv4RkCcD+JyrWc0SA",
"RUNNINGSTATUS": "2",
"HEALTHSTATUS": "1",
"RUNNINGSTATUS": "27",
"LUNLIST": ""
}
}
"""
FAKE_QUERY_ALL_LUN_RESPONSE = """
{
"error": {
"code": 0
},
"data": [{
"ID": "1",
"NAME": "IexzQZJWSXuX2e9I7c8GNQ"
}]
}
"""
FAKE_LUN_ASSOCIATE_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"ID":"11"
}]
}
"""
FAKE_QUERY_LUN_GROUP_INFO_RESPONSE = """
{
"error": {
"code":0
},
"data":[{
"NAME":"OpenStack_LunGroup_1",
"DESCRIPTION":"5mFHcBv4RkCcD+JyrWc0SA",
"ID":"11",
"TYPE":256
}]
}
"""
FAKE_QUERY_LUN_GROUP_RESPONSE = """
{
"error": {
"code":0
},
"data":{
"NAME":"5mFHcBv4RkCcD+JyrWc0SA",
"DESCRIPTION":"5mFHcBv4RkCcD+JyrWc0SA",
"ID":"11",
"TYPE":256
}
}
"""
FAKE_QUERY_LUN_GROUP_ASSOCIAT_RESPONSE = """
{
"error":{
"code":0
},
"data":{
"NAME":"5mFHcBv4RkCcD+JyrWc0SA",
"DESCRIPTION":"5mFHcBv4RkCcD+JyrWc0SA",
"ID":"11",
"TYPE":256
}
}
"""
FAKE_LUN_COUNT_RESPONSE = """
{
"data":{
"COUNT":"7"
},
"error":{
"code":0,
"description":"0"
}
}
"""
# A fake response of snapshot list response
FAKE_SNAPSHOT_LIST_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data": [{
"ID": 11,
"NAME": "wr_LMKAjS7O_VtsEIREGYw"
},
{
"ID": 12,
"NAME": "SDFAJSDFLKJ"
}]
}
"""
# A fake response of create snapshot response
FAKE_CREATE_SNAPSHOT_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data": {
"ID": 11,
"NAME": "YheUoRwbSX2BxN7"
}
}
"""
# A fake response of get snapshot response
FAKE_GET_SNAPSHOT_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data": {
"ID": 11,
"NAME": "YheUoRwbSX2BxN7"
}
}
"""
# A fake response of get iscsi response
FAKE_GET_ISCSI_INFO_RESPONSE = """
{
"data": [{
"ETHPORTID": "139267",
"ID": "iqn.oceanstor:21004846fb8ca15f::22003:111.111.101.244",
"TPGT": "8196",
"TYPE": 249
},
{
"ETHPORTID": "139268",
"ID": "iqn.oceanstor:21004846fb8ca15f::22003:111.111.102.244",
"TPGT": "8196",
"TYPE": 249
}
],
"error": {
"code": 0,
"description": "0"
}
}
"""
# A fake response of get eth info response
FAKE_GET_ETH_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data": [{
"PARENTTYPE": 209,
"MACADDRESS": "00:22:a1:0a:79:57",
"ETHNEGOTIATE": "-1",
"ERRORPACKETS": "0",
"IPV4ADDR": "192.168.1.2",
"IPV6GATEWAY": "",
"IPV6MASK": "0",
"OVERFLOWEDPACKETS": "0",
"ISCSINAME": "P0",
"HEALTHSTATUS": "1",
"ETHDUPLEX": "2",
"ID": "16909568",
"LOSTPACKETS": "0",
"TYPE": 213,
"NAME": "P0",
"INIORTGT": "4",
"RUNNINGSTATUS": "10",
"IPV4GATEWAY": "",
"BONDNAME": "",
"STARTTIME": "1371684218",
"SPEED": "1000",
"ISCSITCPPORT": "0",
"IPV4MASK": "255.255.0.0",
"IPV6ADDR": "",
"LOGICTYPE": "0",
"LOCATION": "ENG0.A5.P0",
"MTU": "1500",
"PARENTID": "1.5"
},
{
"PARENTTYPE": 209,
"MACADDRESS": "00:22:a1:0a:79:57",
"ETHNEGOTIATE": "-1",
"ERRORPACKETS": "0",
"IPV4ADDR": "192.168.1.1",
"IPV6GATEWAY": "",
"IPV6MASK": "0",
"OVERFLOWEDPACKETS": "0",
"ISCSINAME": "P0",
"HEALTHSTATUS": "1",
"ETHDUPLEX": "2",
"ID": "16909568",
"LOSTPACKETS": "0",
"TYPE": 213,
"NAME": "P0",
"INIORTGT": "4",
"RUNNINGSTATUS": "10",
"IPV4GATEWAY": "",
"BONDNAME": "",
"STARTTIME": "1371684218",
"SPEED": "1000",
"ISCSITCPPORT": "0",
"IPV4MASK": "255.255.0.0",
"IPV6ADDR": "",
"LOGICTYPE": "0",
"LOCATION": "ENG0.A5.P3",
"MTU": "1500",
"PARENTID": "1.5"
}]
}
"""
FAKE_GET_ETH_ASSOCIATE_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"IPV4ADDR": "192.168.1.1",
"HEALTHSTATUS": "1",
"RUNNINGSTATUS": "10"
},
{
"IPV4ADDR": "192.168.1.2",
"HEALTHSTATUS": "1",
"RUNNINGSTATUS": "10"
}
]
}
"""
# A fake response of get iscsi device info response
FAKE_GET_ISCSI_DEVICE_RESPONSE = """
{
"error": {
"code": 0
},
"data": [{
"CMO_ISCSI_DEVICE_NAME": "iqn.2006-08.com.huawei:oceanstor:21000022a:"
}]
}
"""
# A fake response of get iscsi device info response
FAKE_GET_ALL_HOST_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data": [{
"PARENTTYPE": 245,
"NAME": "ubuntuc",
"DESCRIPTION": "",
"RUNNINGSTATUS": "1",
"IP": "",
"PARENTNAME": "",
"OPERATIONSYSTEM": "0",
"LOCATION": "",
"HEALTHSTATUS": "1",
"MODEL": "",
"ID": "1",
"PARENTID": "",
"NETWORKNAME": "",
"TYPE": 21
},
{
"PARENTTYPE": 245,
"NAME": "ubuntu",
"DESCRIPTION": "",
"RUNNINGSTATUS": "1",
"IP": "",
"PARENTNAME": "",
"OPERATIONSYSTEM": "0",
"LOCATION": "",
"HEALTHSTATUS": "1",
"MODEL": "",
"ID": "2",
"PARENTID": "",
"NETWORKNAME": "",
"TYPE": 21
}]
}
"""
# A fake response of get host or hostgroup info response
FAKE_GET_ALL_HOST_GROUP_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data": [{
"NAME": "OpenStack_HostGroup_1",
"DESCRIPTION":"",
"ID":"0",
"TYPE":14
}]
}
"""
FAKE_GET_HOST_GROUP_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data":{
"NAME":"ubuntuc",
"DESCRIPTION":"",
"ID":"0",
"TYPE":14
}
}
"""
# A fake response of lun copy info response
FAKE_GET_LUN_COPY_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data": {
"COPYSTOPTIME": "-1",
"HEALTHSTATUS": "1",
"NAME": "w1PSNvu6RumcZMmSh4/l+Q==",
"RUNNINGSTATUS": "36",
"DESCRIPTION": "w1PSNvu6RumcZMmSh4/l+Q==",
"ID": "0",
"LUNCOPYTYPE": "1",
"COPYPROGRESS": "0",
"COPYSPEED": "2",
"TYPE": 219,
"COPYSTARTTIME": "-1"
}
}
"""
# A fake response of lun copy list info response
FAKE_GET_LUN_COPY_LIST_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data": [{
"COPYSTOPTIME": "1372209335",
"HEALTHSTATUS": "1",
"NAME": "w1PSNvu6RumcZMmSh4/l+Q==",
"RUNNINGSTATUS": "40",
"DESCRIPTION": "w1PSNvu6RumcZMmSh4/l+Q==",
"ID": "0",
"LUNCOPYTYPE": "1",
"COPYPROGRESS": "100",
"COPYSPEED": "2",
"TYPE": 219,
"COPYSTARTTIME": "1372209329"
}]
}
"""
# A fake response of mappingview info response
FAKE_GET_MAPPING_VIEW_INFO_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"WORKMODE":"255",
"HEALTHSTATUS":"1",
"NAME":"IexzQZJWSXuX2e9I7c8GNQ",
"RUNNINGSTATUS":"27",
"DESCRIPTION":"",
"ENABLEINBANDCOMMAND":"true",
"ID":"1",
"INBANDLUNWWN":"",
"TYPE":245
},
{
"WORKMODE":"255",
"HEALTHSTATUS":"1",
"NAME":"YheUoRwbSX2BxN767nvLSw",
"RUNNINGSTATUS":"27",
"DESCRIPTION":"",
"ENABLEINBANDCOMMAND":"true",
"ID":"2",
"INBANDLUNWWN":"",
"TYPE":245
}]
}
"""
FAKE_GET_MAPPING_VIEW_RESPONSE = """
{
"error":{
"code":0
},
"data":{
"WORKMODE":"255",
"HEALTHSTATUS":"1",
"NAME":"mOWtSXnaQKi3hpB3tdFRIQ",
"RUNNINGSTATUS":"27",
"DESCRIPTION":"",
"ENABLEINBANDCOMMAND":"true",
"ID":"1",
"INBANDLUNWWN":"",
"TYPE":245
}
}
"""
FAKE_FC_INFO_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"HEALTHSTATUS":"1",
"NAME":"",
"MULTIPATHTYPE":"1",
"ISFREE":"true",
"RUNNINGSTATUS":"27",
"ID":"10000090fa0d6754",
"OPERATIONSYSTEM":"255",
"TYPE":223
},
{
"HEALTHSTATUS":"1",
"NAME":"",
"MULTIPATHTYPE":"1",
"ISFREE":"true",
"RUNNINGSTATUS":"27",
"ID":"10000090fa0d6755",
"OPERATIONSYSTEM":"255",
"TYPE":223
}]
}
"""
FAKE_ISCSI_INITIATOR_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"CHAPNAME":"mm-user",
"HEALTHSTATUS":"1",
"ID":"iqn.1993-08.org.debian:01:9073aba6c6f",
"ISFREE":"true",
"MULTIPATHTYPE":"1",
"NAME":"",
"OPERATIONSYSTEM":"255",
"RUNNINGSTATUS":"28",
"TYPE":222,
"USECHAP":"true"
}]
}
"""
FAKE_HOST_LINK_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"PARENTTYPE":21,
"TARGET_ID":"0000000000000000",
"INITIATOR_NODE_WWN":"20000090fa0d6754",
"INITIATOR_TYPE":"223",
"RUNNINGSTATUS":"27",
"PARENTNAME":"ubuntuc",
"INITIATOR_ID":"10000090fa0d6754",
"TARGET_PORT_WWN":"24000022a10a2a39",
"HEALTHSTATUS":"1",
"INITIATOR_PORT_WWN":"10000090fa0d6754",
"ID":"010000090fa0d675-0000000000110400",
"TARGET_NODE_WWN":"21000022a10a2a39",
"PARENTID":"1",
"CTRL_ID":"0",
"TYPE":255,
"TARGET_TYPE":"212"
}]
}
"""
FAKE_PORT_GROUP_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"ID":11,
"NAME": "portgroup-test"
}]
}
"""
FAKE_ISCSI_INITIATOR_RESPONSE = """
{
"error":{
"code": 0
},
"data":[{
"CHAPNAME": "mm-user",
"HEALTHSTATUS": "1",
"ID": "iqn.1993-08.org.debian:01:9073aba6c6f",
"ISFREE": "true",
"MULTIPATHTYPE": "1",
"NAME": "",
"OPERATIONSYSTEM": "255",
"RUNNINGSTATUS": "28",
"TYPE": 222,
"USECHAP": "true"
}]
}
"""
FAKE_ISCSI_INITIATOR_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"CHAPNAME":"mm-user",
"HEALTHSTATUS":"1",
"ID":"iqn.1993-08.org.debian:01:9073aba6c6f",
"ISFREE":"true",
"MULTIPATHTYPE":"1",
"NAME":"",
"OPERATIONSYSTEM":"255",
"RUNNINGSTATUS":"28",
"TYPE":222,
"USECHAP":"true"
}]
}
"""
FAKE_ERROR_INFO_RESPONSE = """
{
"error":{
"code":31755596
}
}
"""
FAKE_ERROR_LUN_INFO_RESPONSE = """
{
"error":{
"code":0
},
"data":{
"ID":"11",
"IOCLASSID":"11",
"NAME":"5mFHcBv4RkCcD+JyrWc0SA"
}
}
"""
FAKE_SYSTEM_VERSION_RESPONSE = """
{
"error":{
"code": 0
},
"data":{
"PRODUCTVERSION": "V100R001C10"
}
}
"""
FAKE_QOS_INFO_RESPONSE = """
{
"error":{
"code": 0
},
"data":{
"ID": "11"
}
}
"""
# mock login info map
MAP_COMMAND_TO_FAKE_RESPONSE = {}
MAP_COMMAND_TO_FAKE_RESPONSE['/xx/sessions'] = (
FAKE_GET_LOGIN_STORAGE_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['sessions'] = (
FAKE_LOGIN_OUT_STORAGE_RESPONSE)
# mock storage info map
MAP_COMMAND_TO_FAKE_RESPONSE['storagepool'] = (
FAKE_STORAGE_POOL_RESPONSE)
# mock lun info map
MAP_COMMAND_TO_FAKE_RESPONSE['lun'] = (
FAKE_LUN_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lun/11/GET'] = (
FAKE_LUN_DELETE_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lun/1/GET'] = (
FAKE_LUN_DELETE_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lun/11/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lun/1/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lun?range=[0-65535]/GET'] = (
FAKE_QUERY_ALL_LUN_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lun/associate?TYPE=11&ASSOCIATEOBJTYPE=256'
'&ASSOCIATEOBJID=11/GET'] = (
FAKE_LUN_ASSOCIATE_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lun/associate?TYPE=11&ASSOCIATEOBJTYPE=256'
'&ASSOCIATEOBJID=12/GET'] = (
FAKE_LUN_ASSOCIATE_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lun/associate?ID=1&TYPE=11&ASSOCIATEOBJTYPE=21'
'&ASSOCIATEOBJID=0/GET'] = (
FAKE_LUN_ASSOCIATE_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lun/associate?TYPE=11&ASSOCIATEOBJTYPE=21'
'&ASSOCIATEOBJID=1/GET'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lungroup?range=[0-8191]/GET'] = (
FAKE_QUERY_LUN_GROUP_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lungroup'] = (
FAKE_QUERY_LUN_GROUP_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lungroup/associate'] = (
FAKE_QUERY_LUN_GROUP_ASSOCIAT_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lungroup/associate?ID=11&ASSOCIATEOBJTYPE=11'
'&ASSOCIATEOBJID=1/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lungroup/associate?TYPE=256&ASSOCIATEOBJTYPE=11'
'&ASSOCIATEOBJID=11/GET'] = (
FAKE_LUN_ASSOCIATE_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lungroup/associate?TYPE=256&ASSOCIATEOBJTYPE=11'
'&ASSOCIATEOBJID=1/GET'] = (
FAKE_LUN_ASSOCIATE_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lungroup/associate?ID=11&ASSOCIATEOBJTYPE=11'
'&ASSOCIATEOBJID=11/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lun/count?TYPE=11&ASSOCIATEOBJTYPE=256'
'&ASSOCIATEOBJID=11/GET'] = (
FAKE_LUN_COUNT_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lun/expand/PUT'] = (
FAKE_LUN_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['lungroup/associate?ID=12&ASSOCIATEOBJTYPE=11'
'&ASSOCIATEOBJID=12/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
# mock snapshot info map
MAP_COMMAND_TO_FAKE_RESPONSE['snapshot'] = (
FAKE_CREATE_SNAPSHOT_INFO_RESPONSE)
# mock snapshot info map
MAP_COMMAND_TO_FAKE_RESPONSE['snapshot/11/GET'] = (
FAKE_GET_SNAPSHOT_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['snapshot/activate'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['snapshot/stop/PUT'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['snapshot/11/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['snapshot?range=[0-32767]/GET'] = (
FAKE_SNAPSHOT_LIST_INFO_RESPONSE)
# mock QoS info map
MAP_COMMAND_TO_FAKE_RESPONSE['ioclass/11/GET'] = (
FAKE_LUN_DELETE_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['ioclass/11/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['ioclass/active/11/PUT'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['ioclass/'] = (
FAKE_QOS_INFO_RESPONSE)
# mock iscsi info map
MAP_COMMAND_TO_FAKE_RESPONSE['iscsi_tgt_port/GET'] = (
FAKE_GET_ISCSI_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['eth_port/GET'] = (
FAKE_GET_ETH_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['eth_port/associate?TYPE=213&ASSOCIATEOBJTYPE=257'
'&ASSOCIATEOBJID=11/GET'] = (
FAKE_GET_ETH_ASSOCIATE_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['iscsidevicename'] = (
FAKE_GET_ISCSI_DEVICE_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['iscsi_initiator?range=[0-256]/GET'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['iscsi_initiator/'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['iscsi_initiator/POST'] = (
FAKE_ISCSI_INITIATOR_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['iscsi_initiator/PUT'] = (
FAKE_ISCSI_INITIATOR_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['iscsi_initiator/'
'iqn.1993-08.debian:01:ec2bff7ac3a3/PUT'] = (
FAKE_ISCSI_INITIATOR_RESPONSE)
# mock host info map
MAP_COMMAND_TO_FAKE_RESPONSE['host?range=[0-65535]/GET'] = (
FAKE_GET_ALL_HOST_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['hostgroup?range=[0-8191]/GET'] = (
FAKE_GET_ALL_HOST_GROUP_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['hostgroup'] = (
FAKE_GET_HOST_GROUP_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['host/associate?TYPE=21&ASSOCIATEOBJTYPE=14'
'&ASSOCIATEOBJID=0/GET'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['hostgroup/associate'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
# mock copy info map
MAP_COMMAND_TO_FAKE_RESPONSE['luncopy'] = (
FAKE_GET_LUN_COPY_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['LUNCOPY?range=[0-1023]/GET'] = (
FAKE_GET_LUN_COPY_LIST_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['LUNCOPY/start/PUT'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['LUNCOPY/0/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
# mock mapping view info map
MAP_COMMAND_TO_FAKE_RESPONSE['mappingview?range=[0-8191]/GET'] = (
FAKE_GET_MAPPING_VIEW_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['mappingview'] = (
FAKE_GET_MAPPING_VIEW_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['MAPPINGVIEW/CREATE_ASSOCIATE/PUT'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
# mock FC info map
MAP_COMMAND_TO_FAKE_RESPONSE['fc_initiator?ISFREE=true&range=[0-8191]/GET'] = (
FAKE_FC_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['fc_initiator/10000090fa0d6754/PUT'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['host_link?INITIATOR_TYPE=223'
'&INITIATOR_PORT_WWN=10000090fa0d6754/GET'] = (
FAKE_HOST_LINK_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['portgroup?range=[0-8191]&TYPE=257/GET'] = (
FAKE_PORT_GROUP_RESPONSE)
# mock system info map
MAP_COMMAND_TO_FAKE_RESPONSE['system/'] = (
FAKE_SYSTEM_VERSION_RESPONSE)
def Fake_sleep(time):
pass
class Fake18000Client(rest_client.RestClient):
def __init__(self, configuration):
rest_client.RestClient.__init__(self, configuration)
self.delete_flag = False
self.terminateFlag = False
self.deviceid = None
self.test_fail = False
self.checkFlag = False
self.remove_chap_flag = False
def _change_file_mode(self, filepath):
pass
def _parse_volume_type(self, volume):
poolinfo = self._find_pool_info()
volume_size = self._get_volume_size(poolinfo, volume)
params = {'LUNType': 0,
'WriteType': '1',
'PrefetchType': '3',
'qos_level': 'Qos-high',
'StripUnitSize': '64',
'PrefetchValue': '0',
'PrefetchTimes': '0',
'qos': 'OpenStack_Qos_High',
'MirrorSwitch': '1',
'tier': 'Tier_high',
}
params['volume_size'] = volume_size
params['pool_id'] = poolinfo['ID']
return params
def _get_snapshotid_by_name(self, snapshot_name):
return "11"
def _check_snapshot_exist(self, snapshot_id):
return True
def get_partition_id_by_name(self, name):
return "11"
def add_lun_to_partition(self, lunid, partition_id):
pass
def get_cache_id_by_name(self, name):
return "11"
def add_lun_to_cache(self, lunid, cache_id):
pass
def call(self, url=False, data=None, method=None):
url = url.replace('http://100.115.10.69:8082/deviceManager/rest', '')
command = url.replace('/210235G7J20000000000/', '')
data = None
if method:
command = command + "/" + method
for item in MAP_COMMAND_TO_FAKE_RESPONSE.keys():
if command == item:
data = MAP_COMMAND_TO_FAKE_RESPONSE[item]
if self.test_fail:
data = FAKE_ERROR_INFO_RESPONSE
if command == 'lun/11/GET':
data = FAKE_ERROR_LUN_INFO_RESPONSE
self.test_fail = False
return json.loads(data)
class Fake18000ISCSIStorage(huawei_driver.Huawei18000ISCSIDriver):
"""Fake Huawei Storage, Rewrite some methods of HuaweiISCSIDriver."""
def __init__(self, configuration):
self.configuration = configuration
self.xml_file_path = self.configuration.cinder_huawei_conf_file
def do_setup(self):
self.restclient = Fake18000Client(configuration=self.configuration)
class Fake18000FCStorage(huawei_driver.Huawei18000FCDriver):
"""Fake Huawei Storage, Rewrite some methods of HuaweiISCSIDriver."""
def __init__(self, configuration):
self.configuration = configuration
self.xml_file_path = self.configuration.cinder_huawei_conf_file
def do_setup(self):
self.restclient = Fake18000Client(configuration=self.configuration)
class Huawei18000ISCSIDriverTestCase(test.TestCase):
def setUp(self):
super(Huawei18000ISCSIDriverTestCase, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
self.fake_conf_file = self.tmp_dir + '/cinder_huawei_conf.xml'
self.addCleanup(shutil.rmtree, self.tmp_dir)
self.create_fake_conf_file()
self.addCleanup(os.remove, self.fake_conf_file)
self.configuration = mock.Mock(spec=conf.Configuration)
self.configuration.cinder_huawei_conf_file = self.fake_conf_file
self.xml_file_path = self.configuration.cinder_huawei_conf_file
self.stubs.Set(time, 'sleep', Fake_sleep)
driver = Fake18000ISCSIStorage(configuration=self.configuration)
self.driver = driver
self.driver.do_setup()
self.portgroup = 'portgroup-test'
self.iscsi_iqns = ['iqn.2006-08.com.huawei:oceanstor:21000022a:'
':20503:192.168.1.1',
'iqn.2006-08.com.huawei:oceanstor:21000022a:'
':20500:192.168.1.2']
self.target_ips = ['192.168.1.1',
'192.168.1.2']
self.portgroup_id = 11
def test_login_success(self):
deviceid = self.driver.restclient.login()
self.assertEqual('210235G7J20000000000', deviceid)
def test_create_volume_success(self):
self.driver.restclient.login()
lun_info = self.driver.create_volume(test_volume)
self.assertEqual('1', lun_info['provider_location'])
def test_delete_volume_success(self):
self.driver.restclient.login()
delete_flag = self.driver.delete_volume(test_volume)
self.assertTrue(delete_flag)
def test_create_snapshot_success(self):
self.driver.restclient.login()
lun_info = self.driver.create_snapshot(test_volume)
self.assertEqual(11, lun_info['provider_location'])
def test_delete_snapshot_success(self):
self.driver.restclient.login()
delete_flag = self.driver.delete_snapshot(test_snap)
self.assertTrue(delete_flag)
def test_create_volume_from_snapsuccess(self):
self.driver.restclient.login()
lun_info = self.driver.create_volume_from_snapshot(test_volume,
test_volume)
self.assertEqual('1', lun_info['provider_location'])
def test_initialize_connection_success(self):
self.driver.restclient.login()
iscsi_properties = self.driver.initialize_connection(test_volume,
FakeConnector)
self.assertEqual(1, iscsi_properties['data']['target_lun'])
def test_terminate_connection_success(self):
self.driver.restclient.login()
self.driver.restclient.terminateFlag = True
self.driver.terminate_connection(test_volume, FakeConnector)
self.assertTrue(self.driver.restclient.terminateFlag)
def test_get_volume_status(self):
self.driver.restclient.login()
data = self.driver.get_volume_stats()
self.assertEqual('1.1.1', data['driver_version'])
def test_extend_volume(self):
self.driver.restclient.login()
lun_info = self.driver.extend_volume(test_volume, 3)
self.assertEqual('1', lun_info['provider_location'])
def test_login_fail(self):
self.driver.restclient.test_fail = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.restclient.login)
def test_create_snapshot_fail(self):
self.driver.restclient.login()
self.driver.restclient.test_fail = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot, test_volume)
def test_create_volume_fail(self):
self.driver.restclient.login()
self.driver.restclient.test_fail = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume, test_volume)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume, error_volume)
def test_delete_volume_fail(self):
self.driver.restclient.login()
self.driver.restclient.test_fail = True
delete_flag = self.driver.delete_volume(test_volume)
self.assertTrue(delete_flag)
def test_delete_snapshot_fail(self):
self.driver.restclient.login()
self.driver.restclient.test_fail = True
delete_flag = self.driver.delete_volume(test_snap)
self.assertTrue(delete_flag)
def test_initialize_connection_fail(self):
self.driver.restclient.login()
self.driver.restclient.test_fail = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
test_volume, FakeConnector)
def test_get_default_timeout(self):
result = huawei_utils.get_default_timeout(self.xml_file_path)
self.assertEqual('43200', result)
def test_get_wait_interval(self):
result = huawei_utils.get_wait_interval(self.xml_file_path,
'LUNReadyWaitInterval')
self.assertEqual(2, result)
def test_lun_is_associated_to_lungroup(self):
self.driver.restclient.login()
self.driver.restclient.associate_lun_to_lungroup('11', '11')
result = self.driver.restclient._is_lun_associated_to_lungroup('11',
'11')
self.assertTrue(result)
def test_lun_is_not_associated_to_lun_group(self):
self.driver.restclient.login()
self.driver.restclient.associate_lun_to_lungroup('12', '12')
self.driver.restclient.remove_lun_from_lungroup('12', '12')
result = self.driver.restclient._is_lun_associated_to_lungroup('12',
'12')
self.assertFalse(result)
def test_get_tgtip(self):
self.driver.restclient.login()
portg_id = self.driver.restclient.find_tgt_port_group(self.portgroup)
target_ip = self.driver.restclient._get_tgt_ip_from_portgroup(portg_id)
self.assertEqual(self.target_ips, target_ip)
def test_get_iscsi_params(self):
self.driver.restclient.login()
(iscsi_iqns, target_ips, portgroup_id) = (
self.driver.restclient.get_iscsi_params(self.xml_file_path,
FakeConnector))
self.assertEqual(self.iscsi_iqns, iscsi_iqns)
self.assertEqual(self.target_ips, target_ips)
self.assertEqual(self.portgroup_id, portgroup_id)
def test_get_lun_conf_params(self):
self.driver.restclient.login()
luninfo = huawei_utils.get_lun_conf_params(self.xml_file_path)
luninfo['pool_id'] = '0'
luninfo['volume_size'] = 2
luninfo['volume_description'] = 'test volume'
luninfo = huawei_utils.init_lun_parameters('5mFHcBv4RkCcD+JyrWc0SA',
luninfo)
self.assertEqual('5mFHcBv4RkCcD+JyrWc0SA', luninfo['NAME'])
def tset_get_iscsi_conf(self):
self.driver.restclient.login()
iscsiinfo = huawei_utils.get_iscsi_conf(self.xml_file_path)
self.assertEqual('iqn.1993-08.debian:01:ec2bff7ac3a3',
iscsiinfo['Initiator'])
def test_check_conf_file(self):
self.driver.restclient.login()
self.driver.restclient.checkFlag = True
huawei_utils.check_conf_file(self.xml_file_path)
self.assertTrue(self.driver.restclient.checkFlag)
def test_get_conf_host_os_type(self):
self.driver.restclient.login()
host_os = huawei_utils.get_conf_host_os_type('100.97.10.30',
self.configuration)
self.assertEqual('0', host_os)
def test_find_chap_info(self):
self.driver.restclient.login()
tmp_dict = {}
iscsi_info = {}
tmp_dict['Name'] = 'iqn.1993-08.debian:01:ec2bff7ac3a3'
tmp_dict['CHAPinfo'] = 'mm-user;mm-user@storage'
ini_list = [tmp_dict]
iscsi_info['Initiator'] = ini_list
initiator_name = FakeConnector['initiator']
chapinfo = self.driver.restclient.find_chap_info(iscsi_info,
initiator_name)
chap_username, chap_password = chapinfo.split(';')
self.assertEqual('mm-user', chap_username)
self.assertEqual('mm-user@storage', chap_password)
def test_find_alua_info(self):
self.driver.restclient.login()
tmp_dict = {}
iscsi_info = {}
tmp_dict['Name'] = 'iqn.1993-08.debian:01:ec2bff7ac3a3'
tmp_dict['ALUA'] = '1'
ini_list = [tmp_dict]
iscsi_info['Initiator'] = ini_list
initiator_name = FakeConnector['initiator']
type = self.driver.restclient._find_alua_info(iscsi_info,
initiator_name)
self.assertEqual('1', type)
def test_find_pool_info(self):
self.driver.restclient.login()
pools = {
"error": {"code": 0},
"data": [{
"NAME": "test001",
"ID": "0",
"USERFREECAPACITY": "36",
"USERTOTALCAPACITY": "48",
"USAGETYPE": constants.BLOCK_STORAGE_POOL_TYPE},
{"NAME": "test002",
"ID": "1",
"USERFREECAPACITY": "37",
"USERTOTALCAPACITY": "49",
"USAGETYPE": constants.FILE_SYSTEM_POOL_TYPE}]}
pool_name = 'test001'
test_info = {'CAPACITY': '36', 'ID': '0', 'TOTALCAPACITY': '48'}
pool_info = self.driver.restclient.find_pool_info(pool_name, pools)
self.assertEqual(test_info, pool_info)
pool_name = 'test002'
test_info = {}
pool_info = self.driver.restclient.find_pool_info(pool_name, pools)
self.assertEqual(test_info, pool_info)
pool_name = 'test000'
test_info = {}
pool_info = self.driver.restclient.find_pool_info(pool_name, pools)
self.assertEqual(test_info, pool_info)
def test_get_smartx_specs_opts(self):
self.driver.restclient.login()
smartx_opts = smartx.SmartX().get_smartx_specs_opts(smarttier_opts)
self.assertEqual('3', smartx_opts['policy'])
@mock.patch.object(huawei_utils, 'get_volume_qos',
return_value={'MAXIOPS': '100',
'IOType': '2'})
def test_create_smartqos(self, mock_qos_value):
self.driver.restclient.login()
lun_info = self.driver.create_volume(test_volume)
self.assertEqual('1', lun_info['provider_location'])
@mock.patch.object(huawei_utils, 'get_volume_params',
return_value={'smarttier': 'true',
'smartcache': 'true',
'smartpartition': 'true',
'thin_provisioning_support': 'true',
'thick_provisioning_support': False,
'policy': '2',
'cachename': 'cache-test',
'partitionname': 'partition-test'})
def test_creat_smartx(self, mock_volume_types):
self.driver.restclient.login()
lun_info = self.driver.create_volume(test_volume)
self.assertEqual('1', lun_info['provider_location'])
def create_fake_conf_file(self):
"""Create a fake Config file.
Huawei storage customize a XML configuration file, the configuration
file is used to set the Huawei storage custom parameters, therefore,
in the UT test we need to simulate such a configuration file.
"""
doc = minidom.Document()
config = doc.createElement('config')
doc.appendChild(config)
storage = doc.createElement('Storage')
config.appendChild(storage)
controllerip0 = doc.createElement('ControllerIP0')
controllerip0_text = doc.createTextNode('10.10.10.1')
controllerip0.appendChild(controllerip0_text)
storage.appendChild(controllerip0)
controllerip1 = doc.createElement('ControllerIP1')
controllerip1_text = doc.createTextNode('10.10.10.2')
controllerip1.appendChild(controllerip1_text)
storage.appendChild(controllerip1)
username = doc.createElement('UserName')
username_text = doc.createTextNode('admin')
username.appendChild(username_text)
storage.appendChild(username)
userpassword = doc.createElement('UserPassword')
userpassword_text = doc.createTextNode('Admin@storage')
userpassword.appendChild(userpassword_text)
storage.appendChild(userpassword)
url = doc.createElement('RestURL')
url_text = doc.createTextNode('http://100.115.10.69:8082/'
'deviceManager/rest/')
url.appendChild(url_text)
storage.appendChild(url)
storagepool = doc.createElement('StoragePool')
pool_text = doc.createTextNode('OpenStack_Pool')
storagepool.appendChild(pool_text)
storage.appendChild(storagepool)
lun = doc.createElement('LUN')
config.appendChild(lun)
storagepool = doc.createElement('StoragePool')
pool_text = doc.createTextNode('OpenStack_Pool')
storagepool.appendChild(pool_text)
lun.appendChild(storagepool)
timeout = doc.createElement('Timeout')
timeout_text = doc.createTextNode('43200')
timeout.appendChild(timeout_text)
lun.appendChild(timeout)
lun_ready_wait_interval = doc.createElement('LUNReadyWaitInterval')
lun_ready_wait_interval_text = doc.createTextNode('2')
lun_ready_wait_interval.appendChild(lun_ready_wait_interval_text)
lun.appendChild(lun_ready_wait_interval)
prefetch = doc.createElement('Prefetch')
prefetch.setAttribute('Type', '1')
prefetch.setAttribute('Value', '0')
lun.appendChild(prefetch)
iscsi = doc.createElement('iSCSI')
config.appendChild(iscsi)
defaulttargetip = doc.createElement('DefaultTargetIP')
defaulttargetip_text = doc.createTextNode('100.115.10.68')
defaulttargetip.appendChild(defaulttargetip_text)
iscsi.appendChild(defaulttargetip)
initiator = doc.createElement('Initiator')
initiator.setAttribute('Name', 'iqn.1993-08.debian:01:ec2bff7ac3a3')
initiator.setAttribute('TargetIP', '192.168.1.2')
initiator.setAttribute('CHAPinfo', 'mm-user;mm-user@storage')
initiator.setAttribute('ALUA', '1')
initiator.setAttribute('TargetPortGroup', 'portgroup-test')
iscsi.appendChild(initiator)
host = doc.createElement('Host')
host.setAttribute('HostIP', '100.97.10.30')
host.setAttribute('OSType', 'Linux')
config.appendChild(host)
fakefile = open(self.fake_conf_file, 'w')
fakefile.write(doc.toprettyxml(indent=''))
fakefile.close()
class Huawei18000FCDriverTestCase(test.TestCase):
def setUp(self):
super(Huawei18000FCDriverTestCase, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
self.fake_conf_file = self.tmp_dir + '/cinder_huawei_conf.xml'
self.addCleanup(shutil.rmtree, self.tmp_dir)
self.create_fake_conf_file()
self.addCleanup(os.remove, self.fake_conf_file)
self.configuration = mock.Mock(spec=conf.Configuration)
self.configuration.cinder_huawei_conf_file = self.fake_conf_file
self.xml_file_path = self.configuration.cinder_huawei_conf_file
self.stubs.Set(time, 'sleep', Fake_sleep)
driver = Fake18000FCStorage(configuration=self.configuration)
self.driver = driver
self.driver.do_setup()
def test_login_success(self):
deviceid = self.driver.restclient.login()
self.assertEqual('210235G7J20000000000', deviceid)
def test_create_volume_success(self):
self.driver.restclient.login()
lun_info = self.driver.create_volume(test_volume)
self.assertEqual('1', lun_info['provider_location'])
def test_delete_volume_success(self):
self.driver.restclient.login()
delete_flag = self.driver.delete_volume(test_volume)
self.assertTrue(delete_flag)
def test_create_snapshot_success(self):
self.driver.restclient.login()
lun_info = self.driver.create_snapshot(test_volume)
self.assertEqual(11, lun_info['provider_location'])
def test_delete_snapshot_success(self):
self.driver.restclient.login()
delete_flag = self.driver.delete_snapshot(test_snap)
self.assertTrue(delete_flag)
def test_create_volume_from_snapsuccess(self):
self.driver.restclient.login()
lun_info = self.driver.create_volume_from_snapshot(test_volume,
test_volume)
self.assertEqual('1', lun_info['provider_location'])
def test_initialize_connection_success(self):
self.driver.restclient.login()
iscsi_properties = self.driver.initialize_connection(test_volume,
FakeConnector)
self.assertEqual(1, iscsi_properties['data']['target_lun'])
def test_terminate_connection_success(self):
self.driver.restclient.login()
self.driver.restclient.terminateFlag = True
self.driver.terminate_connection(test_volume, FakeConnector)
self.assertTrue(self.driver.restclient.terminateFlag)
def test_get_volume_status(self):
self.driver.restclient.login()
data = self.driver.get_volume_stats()
self.assertEqual('1.1.1', data['driver_version'])
def test_extend_volume(self):
self.driver.restclient.login()
lun_info = self.driver.extend_volume(test_volume, 3)
self.assertEqual('1', lun_info['provider_location'])
def test_login_fail(self):
self.driver.restclient.test_fail = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.restclient.login)
def test_create_snapshot_fail(self):
self.driver.restclient.login()
self.driver.restclient.test_fail = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot, test_volume)
def test_create_volume_fail(self):
self.driver.restclient.login()
self.driver.restclient.test_fail = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume, test_volume)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume, error_volume)
def test_delete_volume_fail(self):
self.driver.restclient.login()
self.driver.restclient.test_fail = True
delete_flag = self.driver.delete_volume(test_volume)
self.assertTrue(delete_flag)
def test_delete_snapshot_fail(self):
self.driver.restclient.login()
self.driver.restclient.test_fail = True
delete_flag = self.driver.delete_snapshot(test_snap)
self.assertTrue(delete_flag)
def test_initialize_connection_fail(self):
self.driver.restclient.login()
self.driver.restclient.test_fail = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
test_volume, FakeConnector)
def test_get_default_timeout(self):
result = huawei_utils.get_default_timeout(self.xml_file_path)
self.assertEqual('43200', result)
def test_get_wait_interval(self):
result = huawei_utils.get_wait_interval(self.xml_file_path,
'LUNReadyWaitInterval')
self.assertEqual(2, result)
def test_lun_is_associated_to_lungroup(self):
self.driver.restclient.login()
self.driver.restclient.associate_lun_to_lungroup('11', '11')
result = self.driver.restclient._is_lun_associated_to_lungroup('11',
'11')
self.assertTrue(result)
def test_lun_is_not_associated_to_lun_group(self):
self.driver.restclient.login()
self.driver.restclient.associate_lun_to_lungroup('12', '12')
self.driver.restclient.remove_lun_from_lungroup('12', '12')
result = self.driver.restclient._is_lun_associated_to_lungroup('12',
'12')
self.assertFalse(result)
def test_get_lun_conf_params(self):
self.driver.restclient.login()
luninfo = huawei_utils.get_lun_conf_params(self.xml_file_path)
luninfo['pool_id'] = '0'
luninfo['volume_size'] = 2
luninfo['volume_description'] = 'test volume'
luninfo = huawei_utils.init_lun_parameters('5mFHcBv4RkCcD+JyrWc0SA',
luninfo)
self.assertEqual('5mFHcBv4RkCcD+JyrWc0SA', luninfo['NAME'])
def test_check_conf_file(self):
self.driver.restclient.login()
self.driver.restclient.checkFlag = True
huawei_utils.check_conf_file(self.xml_file_path)
self.assertTrue(self.driver.restclient.checkFlag)
def test_get_conf_host_os_type(self):
self.driver.restclient.login()
host_os = huawei_utils.get_conf_host_os_type('100.97.10.30',
self.configuration)
self.assertEqual('0', host_os)
def create_fake_conf_file(self):
"""Create a fake Config file
Huawei storage customize a XML configuration file,
the configuration file is used to set the Huawei storage custom
parameters, therefore, in the UT test we need to simulate such a
configuration file
"""
doc = minidom.Document()
config = doc.createElement('config')
doc.appendChild(config)
storage = doc.createElement('Storage')
config.appendChild(storage)
controllerip0 = doc.createElement('ControllerIP0')
controllerip0_text = doc.createTextNode('10.10.10.1')
controllerip0.appendChild(controllerip0_text)
storage.appendChild(controllerip0)
controllerip1 = doc.createElement('ControllerIP1')
controllerip1_text = doc.createTextNode('10.10.10.2')
controllerip1.appendChild(controllerip1_text)
storage.appendChild(controllerip1)
username = doc.createElement('UserName')
username_text = doc.createTextNode('admin')
username.appendChild(username_text)
storage.appendChild(username)
userpassword = doc.createElement('UserPassword')
userpassword_text = doc.createTextNode('Admin@storage')
userpassword.appendChild(userpassword_text)
storage.appendChild(userpassword)
url = doc.createElement('RestURL')
url_text = doc.createTextNode('http://100.115.10.69:8082/'
'deviceManager/rest/')
url.appendChild(url_text)
storage.appendChild(url)
storagepool = doc.createElement('StoragePool')
pool_text = doc.createTextNode('OpenStack_Pool')
storagepool.appendChild(pool_text)
storage.appendChild(storagepool)
lun = doc.createElement('LUN')
config.appendChild(lun)
storagepool = doc.createElement('StoragePool')
pool_text = doc.createTextNode('OpenStack_Pool')
storagepool.appendChild(pool_text)
lun.appendChild(storagepool)
timeout = doc.createElement('Timeout')
timeout_text = doc.createTextNode('43200')
timeout.appendChild(timeout_text)
lun.appendChild(timeout)
lun_ready_wait_interval = doc.createElement('LUNReadyWaitInterval')
lun_ready_wait_interval_text = doc.createTextNode('2')
lun_ready_wait_interval.appendChild(lun_ready_wait_interval_text)
lun.appendChild(lun_ready_wait_interval)
iscsi = doc.createElement('iSCSI')
config.appendChild(iscsi)
defaulttargetip = doc.createElement('DefaultTargetIP')
defaulttargetip_text = doc.createTextNode('100.115.10.68')
defaulttargetip.appendChild(defaulttargetip_text)
iscsi.appendChild(defaulttargetip)
initiator = doc.createElement('Initiator')
initiator.setAttribute('Name', 'iqn.1993-08.debian:01:ec2bff7ac3a3')
initiator.setAttribute('TargetIP', '192.168.1.2')
iscsi.appendChild(initiator)
prefetch = doc.createElement('Prefetch')
prefetch.setAttribute('Type', '1')
prefetch.setAttribute('Value', '0')
lun.appendChild(prefetch)
host = doc.createElement('Host')
host.setAttribute('HostIP', '100.97.10.30')
host.setAttribute('OSType', 'Linux')
config.appendChild(host)
fakefile = open(self.fake_conf_file, 'w')
fakefile.write(doc.toprettyxml(indent=''))
fakefile.close()
| |
# coding=utf-8
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import re
from contextlib import contextmanager
from pants.backend.native.config.environment import (GCCCppToolchain, GCCCToolchain,
LLVMCppToolchain, LLVMCToolchain, Platform)
from pants.backend.native.register import rules as native_backend_rules
from pants.backend.native.subsystems.binaries.gcc import GCC
from pants.backend.native.subsystems.binaries.llvm import LLVM
from pants.backend.native.subsystems.libc_dev import LibcDev
from pants.backend.native.subsystems.native_toolchain import NativeToolchain
from pants.util.contextutil import environment_as, pushd, temporary_dir
from pants.util.dirutil import is_executable, safe_open
from pants.util.process_handler import subprocess
from pants.util.strutil import safe_shlex_join
from pants_test.engine.scheduler_test_base import SchedulerTestBase
from pants_test.subsystem.subsystem_util import global_subsystem_instance, init_subsystems
from pants_test.test_base import TestBase
class TestNativeToolchain(TestBase, SchedulerTestBase):
def setUp(self):
super(TestNativeToolchain, self).setUp()
init_subsystems([LibcDev, NativeToolchain], options={
'libc': {
'enable_libc_search': True,
},
})
self.platform = Platform.create()
self.toolchain = global_subsystem_instance(NativeToolchain)
self.rules = native_backend_rules()
gcc_subsystem = global_subsystem_instance(GCC)
self.gcc_version = gcc_subsystem.version()
llvm_subsystem = global_subsystem_instance(LLVM)
self.llvm_version = llvm_subsystem.version()
def _sched(self, *args, **kwargs):
return self.mk_scheduler(rules=self.rules, *args, **kwargs)
def test_gcc_version(self):
scheduler = self._sched()
gcc_c_toolchain = self.execute_expecting_one_result(
scheduler, GCCCToolchain, self.toolchain).value
gcc = gcc_c_toolchain.c_toolchain.c_compiler
gcc_version_out = self._invoke_capturing_output(
[gcc.exe_filename, '--version'],
env=gcc.as_invocation_environment_dict)
gcc_version_regex = re.compile('^gcc.*{}$'.format(re.escape(self.gcc_version)),
flags=re.MULTILINE)
self.assertIsNotNone(gcc_version_regex.search(gcc_version_out))
def test_gpp_version(self):
scheduler = self._sched()
gcc_cpp_toolchain = self.execute_expecting_one_result(
scheduler, GCCCppToolchain, self.toolchain).value
gpp = gcc_cpp_toolchain.cpp_toolchain.cpp_compiler
gpp_version_out = self._invoke_capturing_output(
[gpp.exe_filename, '--version'],
env=gpp.as_invocation_environment_dict)
gpp_version_regex = re.compile(r'^g\+\+.*{}$'.format(re.escape(self.gcc_version)),
flags=re.MULTILINE)
self.assertIsNotNone(gpp_version_regex.search(gpp_version_out))
def test_clang_version(self):
scheduler = self._sched()
llvm_c_toolchain = self.execute_expecting_one_result(
scheduler, LLVMCToolchain, self.toolchain).value
clang = llvm_c_toolchain.c_toolchain.c_compiler
clang_version_out = self._invoke_capturing_output(
[clang.exe_filename, '--version'],
env=clang.as_invocation_environment_dict)
clang_version_regex = re.compile('^clang version {}'.format(re.escape(self.llvm_version)),
flags=re.MULTILINE)
self.assertIsNotNone(clang_version_regex.search(clang_version_out))
def test_clangpp_version(self):
scheduler = self._sched()
clangpp_version_regex = re.compile('^clang version {}'.format(re.escape(self.llvm_version)),
flags=re.MULTILINE)
llvm_cpp_toolchain = self.execute_expecting_one_result(
scheduler, LLVMCppToolchain, self.toolchain).value
clangpp = llvm_cpp_toolchain.cpp_toolchain.cpp_compiler
clanggpp_version_out = self._invoke_capturing_output(
[clangpp.exe_filename, '--version'],
env=clangpp.as_invocation_environment_dict)
self.assertIsNotNone(clangpp_version_regex.search(clanggpp_version_out))
@contextmanager
def _hello_world_source_environment(self, toolchain_type, file_name, contents):
with temporary_dir() as tmpdir:
scheduler = self._sched(work_dir=tmpdir)
source_file_path = os.path.join(tmpdir, file_name)
with safe_open(source_file_path, mode='wb') as fp:
fp.write(contents)
toolchain = self.execute_expecting_one_result(scheduler, toolchain_type, self.toolchain).value
with pushd(tmpdir):
yield toolchain
def _invoke_compiler(self, compiler, args):
cmd = [compiler.exe_filename] + compiler.extra_args + args
return self._invoke_capturing_output(
cmd,
compiler.as_invocation_environment_dict)
def _invoke_linker(self, linker, args):
cmd = [linker.exe_filename] + linker.extra_args + args
return self._invoke_capturing_output(
cmd,
linker.as_invocation_environment_dict)
def _invoke_capturing_output(self, cmd, env=None):
if env is None:
env = os.environ.copy()
try:
with environment_as(**env):
return subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
raise Exception(
"Command failed while invoking the native toolchain "
"with code '{code}', cwd='{cwd}', cmd='{cmd}', env='{env}'. "
"Combined stdout and stderr:\n{out}"
.format(code=e.returncode,
cwd=os.getcwd(),
# safe_shlex_join() is just for pretty-printing.
cmd=safe_shlex_join(cmd),
env=env,
out=e.output),
e)
def _do_compile_link(self, compiler, linker, source_file, outfile, output):
intermediate_obj_file_name = '{}.o'.format(outfile)
self._invoke_compiler(
compiler,
['-c', source_file, '-o', intermediate_obj_file_name])
self.assertTrue(os.path.isfile(intermediate_obj_file_name))
self._invoke_linker(
linker,
[intermediate_obj_file_name, '-o', outfile])
self.assertTrue(is_executable(outfile))
program_out = self._invoke_capturing_output([os.path.abspath(outfile)])
self.assertEqual((output + '\n'), program_out)
def test_hello_c_gcc(self):
with self._hello_world_source_environment(GCCCToolchain, 'hello.c', contents="""
#include "stdio.h"
int main() {
printf("%s\\n", "I C the world!");
}
""") as gcc_c_toolchain:
c_toolchain = gcc_c_toolchain.c_toolchain
compiler = c_toolchain.c_compiler
linker = c_toolchain.c_linker
self._do_compile_link(compiler, linker, 'hello.c', 'hello_gcc', "I C the world!")
def test_hello_c_clang(self):
with self._hello_world_source_environment(LLVMCToolchain, 'hello.c', contents="""
#include "stdio.h"
int main() {
printf("%s\\n", "I C the world!");
}
""") as llvm_c_toolchain:
c_toolchain = llvm_c_toolchain.c_toolchain
compiler = c_toolchain.c_compiler
linker = c_toolchain.c_linker
self._do_compile_link(compiler, linker, 'hello.c', 'hello_clang', "I C the world!")
def test_hello_cpp_gpp(self):
with self._hello_world_source_environment(GCCCppToolchain, 'hello.cpp', contents="""
#include <iostream>
int main() {
std::cout << "I C the world, ++ more!" << std::endl;
}
""") as gcc_cpp_toolchain:
cpp_toolchain = gcc_cpp_toolchain.cpp_toolchain
compiler = cpp_toolchain.cpp_compiler
linker = cpp_toolchain.cpp_linker
self._do_compile_link(compiler, linker, 'hello.cpp', 'hello_gpp', "I C the world, ++ more!")
def test_hello_cpp_clangpp(self):
with self._hello_world_source_environment(LLVMCppToolchain, 'hello.cpp', contents="""
#include <iostream>
int main() {
std::cout << "I C the world, ++ more!" << std::endl;
}
""") as llvm_cpp_toolchain:
cpp_toolchain = llvm_cpp_toolchain.cpp_toolchain
compiler = cpp_toolchain.cpp_compiler
linker = cpp_toolchain.cpp_linker
self._do_compile_link(compiler, linker, 'hello.cpp', 'hello_clangpp',
"I C the world, ++ more!")
| |
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from datetime import date
from datetime import datetime
from freezegun import freeze_time
from mock import patch
from ggrc.app import db
from ggrc.models import Notification, Person
from ggrc.notifications import common
from ggrc_workflows.models import Cycle, CycleTaskGroupObjectTask
from integration.ggrc import TestCase
from integration.ggrc.api_helper import Api
from integration.ggrc.generator import ObjectGenerator
from integration.ggrc_workflows.generator import WorkflowsGenerator
class TestOneTimeWfEndDateChange(TestCase):
""" This class contains simple one time workflow tests that are not
in the gsheet test grid
"""
def setUp(self):
TestCase.setUp(self)
self.api = Api()
self.wf_generator = WorkflowsGenerator()
self.object_generator = ObjectGenerator()
Notification.query.delete()
self.random_objects = self.object_generator.generate_random_objects(2)
_, self.user = self.object_generator.generate_person(
user_role="gGRC Admin")
self.create_test_cases()
def init_decorator(init):
def new_init(self, *args, **kwargs):
init(self, *args, **kwargs)
if hasattr(self, "created_at"):
self.created_at = datetime.now()
return new_init
Notification.__init__ = init_decorator(Notification.__init__)
@patch("ggrc.notifications.common.send_email")
def test_no_date_change(self, mock_mail):
def get_person(person_id):
return db.session.query(Person).filter(Person.id == person_id).one()
with freeze_time("2015-04-10 03:21:34"):
_, workflow = self.wf_generator.generate_workflow(
self.one_time_workflow_1)
_, cycle = self.wf_generator.generate_cycle(workflow)
self.wf_generator.activate_workflow(workflow)
with freeze_time("2015-04-11 03:21:34"):
user = get_person(self.user.id)
_, notif_data = common.get_todays_notifications()
self.assertIn("cycle_started", notif_data[user.email])
with freeze_time("2015-05-02 03:21:34"):
_, notif_data = common.get_todays_notifications()
self.assertIn(user.email, notif_data)
self.assertIn("cycle_started", notif_data[user.email])
self.assertNotIn("due_in", notif_data[user.email])
self.assertNotIn("due_today", notif_data[user.email])
with freeze_time("2015-05-02 03:21:34"):
common.send_todays_digest_notifications()
_, notif_data = common.get_todays_notifications()
self.assertEqual(notif_data, {})
# one email to owner and one to assigne
self.assertEqual(mock_mail.call_count, 2)
with freeze_time("2015-05-04 03:21:34"): # one day before due date
_, notif_data = common.get_todays_notifications()
user = get_person(self.user.id)
self.assertIn("due_in", notif_data[user.email])
self.assertEqual(len(notif_data[user.email]["due_in"]), 2)
with freeze_time("2015-05-04 03:21:34"): # one day before due date
common.send_todays_digest_notifications()
_, notif_data = common.get_todays_notifications()
self.assertEqual(notif_data, {})
# one email to owner and one to assigne
self.assertEqual(mock_mail.call_count, 3)
with freeze_time("2015-05-05 03:21:34"): # due date
_, notif_data = common.get_todays_notifications()
self.assertIn("due_today", notif_data[user.email])
self.assertEqual(len(notif_data[user.email]["due_today"]), 2)
@patch("ggrc.notifications.common.send_email")
def test_move_end_date_to_future(self, mock_mail):
"""
test moving the end date to the future, befor due_in and due_today
notifications have been sent
"""
def get_person(person_id):
return db.session.query(Person).filter(Person.id == person_id).one()
with freeze_time("2015-04-10 03:21:34"):
_, workflow = self.wf_generator.generate_workflow(
self.one_time_workflow_1)
_, cycle = self.wf_generator.generate_cycle(workflow)
self.wf_generator.activate_workflow(workflow)
with freeze_time("2015-04-11 03:21:34"):
user = get_person(self.user.id)
_, notif_data = common.get_todays_notifications()
self.assertIn("cycle_started", notif_data[user.email])
with freeze_time("2015-05-02 03:21:34"):
_, notif_data = common.get_todays_notifications()
self.assertIn(user.email, notif_data)
self.assertIn("cycle_started", notif_data[user.email])
self.assertNotIn("due_in", notif_data[user.email])
self.assertNotIn("due_today", notif_data[user.email])
with freeze_time("2015-05-02 03:21:34"):
common.send_todays_digest_notifications()
_, notif_data = common.get_todays_notifications()
self.assertEqual(notif_data, {})
# one email to owner and one to assigne
self.assertEqual(mock_mail.call_count, 2)
with freeze_time("2015-05-03 03:21:34"):
cycle = Cycle.query.get(cycle.id)
task1 = CycleTaskGroupObjectTask.query.get(
cycle.cycle_task_group_object_tasks[0].id)
task2 = CycleTaskGroupObjectTask.query.get(
cycle.cycle_task_group_object_tasks[1].id)
self.wf_generator.modify_object(
task1, data={"end_date": date(2015, 5, 15)})
self.wf_generator.modify_object(
task2, data={"end_date": date(2015, 5, 15)})
with freeze_time("2015-05-04 03:21:34"): # one day befor due date
_, notif_data = common.get_todays_notifications()
user = get_person(self.user.id)
self.assertEqual(notif_data, {})
with freeze_time("2015-05-05 03:21:34"): # due date
_, notif_data = common.get_todays_notifications()
self.assertEqual(notif_data, {})
with freeze_time("2015-05-14 03:21:34"): # due date
_, notif_data = common.get_todays_notifications()
self.assertIn(user.email, notif_data)
self.assertIn("due_in", notif_data[user.email])
self.assertEqual(len(notif_data[user.email]["due_in"]),
len(self.random_objects))
with freeze_time("2015-05-15 03:21:34"): # due date
_, notif_data = common.get_todays_notifications()
self.assertIn(user.email, notif_data)
# yesterdays mail has not been sent
self.assertIn("due_in", notif_data[user.email])
self.assertIn("due_today", notif_data[user.email])
self.assertEqual(len(notif_data[user.email]["due_today"]),
len(self.random_objects))
@patch("ggrc.notifications.common.send_email")
def test_move_end_date_to_past(self, mock_mail):
def get_person(person_id):
return db.session.query(Person).filter(Person.id == person_id).one()
with freeze_time("2015-04-10 03:21:34"):
_, workflow = self.wf_generator.generate_workflow(
self.one_time_workflow_1)
_, cycle = self.wf_generator.generate_cycle(workflow)
self.wf_generator.activate_workflow(workflow)
with freeze_time("2015-05-02 03:21:34"):
common.send_todays_digest_notifications()
_, notif_data = common.get_todays_notifications()
self.assertEqual(notif_data, {})
# one email to owner and one to assigne
self.assertEqual(mock_mail.call_count, 2)
with freeze_time("2015-05-03 03:21:34"):
cycle = Cycle.query.get(cycle.id)
task1 = CycleTaskGroupObjectTask.query.get(
cycle.cycle_task_group_object_tasks[0].id)
task2 = CycleTaskGroupObjectTask.query.get(
cycle.cycle_task_group_object_tasks[1].id)
self.wf_generator.modify_object(
task1, data={"end_date": date(2015, 5, 1)})
self.wf_generator.modify_object(
task2, data={"end_date": date(2015, 5, 1)})
with freeze_time("2015-05-03 03:21:34"): # one day befor due date
_, notif_data = common.get_todays_notifications()
self.assertEqual(notif_data, {})
with freeze_time("2015-05-04 03:21:34"): # due date
_, notif_data = common.get_todays_notifications()
self.assertEqual(notif_data, {})
with freeze_time("2015-05-05 03:21:34"): # due date
_, notif_data = common.get_todays_notifications()
self.assertEqual(notif_data, {})
@patch("ggrc.notifications.common.send_email")
def test_move_end_date_to_today(self, mock_mail):
def get_person(person_id):
return db.session.query(Person).filter(Person.id == person_id).one()
with freeze_time("2015-04-10 03:21:34"):
_, workflow = self.wf_generator.generate_workflow(
self.one_time_workflow_1)
_, cycle = self.wf_generator.generate_cycle(workflow)
self.wf_generator.activate_workflow(workflow)
with freeze_time("2015-05-02 03:21:34"):
common.send_todays_digest_notifications()
_, notif_data = common.get_todays_notifications()
self.assertEqual(notif_data, {})
# one email to owner and one to assigne
self.assertEqual(mock_mail.call_count, 2)
with freeze_time("2015-05-03 03:21:34"):
cycle = Cycle.query.get(cycle.id)
task1 = CycleTaskGroupObjectTask.query.get(
cycle.cycle_task_group_object_tasks[0].id)
task2 = CycleTaskGroupObjectTask.query.get(
cycle.cycle_task_group_object_tasks[1].id)
self.wf_generator.modify_object(
task1, data={"end_date": date(2015, 5, 3)})
self.wf_generator.modify_object(
task2, data={"end_date": date(2015, 5, 4)})
with freeze_time("2015-05-03 03:21:34"): # one day befor due date
user = get_person(self.user.id)
_, notif_data = common.get_todays_notifications()
self.assertNotEquals(notif_data, {})
self.assertIn(user.email, notif_data)
self.assertIn("due_today", notif_data[user.email])
self.assertIn("due_in", notif_data[user.email])
self.assertEqual(len(notif_data[user.email]["due_today"]), 1)
common.send_todays_digest_notifications()
with freeze_time("2015-05-04 03:21:34"): # due date
user = get_person(self.user.id)
_, notif_data = common.get_todays_notifications()
self.assertIn(user.email, notif_data)
self.assertIn("due_today", notif_data[user.email])
self.assertNotIn("due_in", notif_data[user.email])
common.send_todays_digest_notifications()
with freeze_time("2015-05-05 03:21:34"): # due date
_, notif_data = common.get_todays_notifications()
self.assertEqual(notif_data, {})
def create_test_cases(self):
def person_dict(person_id):
return {
"href": "/api/people/%d" % person_id,
"id": person_id,
"type": "Person"
}
self.one_time_workflow_1 = {
"title": "one time test workflow",
"notify_on_change": True,
"description": "some test workflow",
"owners": [person_dict(self.user.id)],
"task_groups": [{
"title": "one time task group",
"contact": person_dict(self.user.id),
"task_group_tasks": [{
"title": "task 1",
"description": "some task",
"contact": person_dict(self.user.id),
"start_date": date(2015, 5, 1), # friday
"end_date": date(2015, 5, 5),
}, {
"title": "task 2",
"description": "some task 2",
"contact": person_dict(self.user.id),
"start_date": date(2015, 5, 1), # friday
"end_date": date(2015, 5, 5),
}],
"task_group_objects": self.random_objects
}]
}
| |
"""A wrapper to use IOCTL_SCSI_PASS_THROUGH_DIRECT"""
from comtypes import GUID
from ctypes import *
from ctypes.wintypes import *
import string
import sys
from win32file import *
from . import *
from .. import *
IOCTL_SCSI_PASS_THROUGH_DIRECT = 0x4d014
IOCTL_STORAGE_GET_DEVICE_NUMBER = 0x2d1080
SCSI_IOCTL_DATA_OUT = 0
SCSI_IOCTL_DATA_IN = 1
SCSI_IOCTL_DATA_UNSPECIFIED = 2
class SCSI_PASS_THROUGH_DIRECT(Structure):
_fields_ = [
('Length', c_ushort),
('ScsiStatus', c_ubyte),
('PathId', c_ubyte),
('TargetId', c_ubyte),
('Lun', c_ubyte),
('CdbLength', c_ubyte),
('SenseInfoLength', c_ubyte),
('DataIn', c_ubyte),
('DataTransferLength', c_ulong),
('TimeOutValue', c_ulong),
('DataBuffer', c_void_p),
('SenseInfoOffset', c_ulong),
('Cdb', c_ubyte * 16),
]
class SCSI_PASS_THROUGH_DIRECT_WITH_BUFFER(Structure):
_fields_ = [
('sptd', SCSI_PASS_THROUGH_DIRECT),
('Filler', c_ulong),
('ucSenseBuf', c_ubyte * 32),
]
class SP_DEVICE_INTERFACE_DATA(Structure):
_fields_ = [
('cbSize', DWORD),
('InterfaceClassGuid', GUID),
('Flags', DWORD),
('Reserved', POINTER(ULONG)),
]
class SP_DEVICE_INTERFACE_DETAIL_DATA(Structure):
_pack_ = 4 if sys.maxsize > 2**32 else 2
_fields_ = [
('cbSize', DWORD),
('DevicePath', WCHAR * 1),
]
class SP_DEVINFO_DATA(Structure):
_fields_ = [
('cbSize', DWORD),
('ClassGuid', GUID),
('DevInst', DWORD),
('Reserved', POINTER(ULONG)),
]
class STORAGE_DEVICE_NUMBER(Structure):
_fields_ = [
('DeviceType', DWORD),
('DeviceNumber', ULONG),
('PartitionNumber', ULONG),
]
SetupDiGetClassDevs = windll.setupapi.SetupDiGetClassDevsW
SetupDiGetClassDevs.restype = HANDLE
SetupDiGetClassDevs.argtypes = [POINTER(GUID), LPCWSTR, HWND, DWORD]
SetupDiDestroyDeviceInfoList = windll.setupapi.SetupDiDestroyDeviceInfoList
SetupDiDestroyDeviceInfoList.restype = BOOL
SetupDiDestroyDeviceInfoList.argtypes = [HANDLE]
SetupDiEnumDeviceInterfaces = windll.setupapi.SetupDiEnumDeviceInterfaces
SetupDiEnumDeviceInterfaces.restype = BOOL
SetupDiEnumDeviceInterfaces.argtypes = [HANDLE, c_void_p, POINTER(GUID), DWORD, POINTER(SP_DEVICE_INTERFACE_DATA)]
SetupDiGetDeviceInterfaceDetail = windll.setupapi.SetupDiGetDeviceInterfaceDetailW
SetupDiGetDeviceInterfaceDetail.restype = BOOL
SetupDiGetDeviceInterfaceDetail.argtypes = [HANDLE, POINTER(SP_DEVICE_INTERFACE_DATA), POINTER(SP_DEVICE_INTERFACE_DETAIL_DATA), DWORD, POINTER(DWORD), POINTER(SP_DEVINFO_DATA)]
CM_Get_Child = windll.CfgMgr32.CM_Get_Child
CM_Get_Child.restype = DWORD
CM_Get_Child.argtypes = [POINTER(DWORD), DWORD, ULONG]
CM_Get_Sibling = windll.CfgMgr32.CM_Get_Sibling
CM_Get_Sibling.restype = DWORD
CM_Get_Sibling.argtypes = [POINTER(DWORD), DWORD, ULONG]
GUID_DEVINTERFACE_USB_DEVICE = GUID('{A5DCBF10-6530-11D2-901F-00C04FB951ED}')
GUID_DEVINTERFACE_DISK = GUID('{53F56307-B6BF-11D0-94F2-00A0C91EFB8B}')
DIGCF_PRESENT = 2
DIGCF_DEVICEINTERFACE = 16
class MscContext(BaseUsbContext):
def __init__(self):
super(MscContext, self).__init__('Windows-MSC', USB_CLASS_MSC)
def listDevices(self, vendor):
return (dev for dev in _listDevices() if dev.idVendor == vendor)
def openDevice(self, device):
return _MscDriver(device.handle)
def _listDeviceClass(guid):
handle = SetupDiGetClassDevs(byref(guid), None, None, DIGCF_DEVICEINTERFACE | DIGCF_PRESENT)
if handle == INVALID_HANDLE_VALUE:
raise Exception('SetupDiGetClassDevs failed')
i = 0
interfaceData = SP_DEVICE_INTERFACE_DATA()
interfaceData.cbSize = sizeof(SP_DEVICE_INTERFACE_DATA)
while SetupDiEnumDeviceInterfaces(handle, None, byref(guid), i, byref(interfaceData)):
size = c_ulong(0)
SetupDiGetDeviceInterfaceDetail(handle, byref(interfaceData), None, 0, byref(size), None)
interfaceDetailData = SP_DEVICE_INTERFACE_DETAIL_DATA()
interfaceDetailData.cbSize = sizeof(SP_DEVICE_INTERFACE_DETAIL_DATA)
resize(interfaceDetailData, size.value)
devInfoData = SP_DEVINFO_DATA()
devInfoData.cbSize = sizeof(SP_DEVINFO_DATA)
if not SetupDiGetDeviceInterfaceDetail(handle, byref(interfaceData), byref(interfaceDetailData), size, None, byref(devInfoData)):
raise Exception('SetupDiGetDeviceInterfaceDetail failed')
yield devInfoData.DevInst, wstring_at(byref(interfaceDetailData, SP_DEVICE_INTERFACE_DETAIL_DATA.DevicePath.offset))
i += 1
if not SetupDiDestroyDeviceInfoList(handle):
raise Exception('SetupDiDestroyDeviceInfoList failed')
def _listDeviceChildren(inst):
child = DWORD(inst)
f = CM_Get_Child
while not f(byref(child), child, 0):
yield child.value
f = CM_Get_Sibling
def _listLogicalDrives(type=DRIVE_REMOVABLE):
mask = GetLogicalDrives()
for i, l in enumerate(string.ascii_uppercase):
if mask & (1 << i) and GetDriveType('%s:\\' % l) == type:
yield '\\\\.\\%s:' % l
def _getStorageNumber(path):
handle = CreateFile(path, 0, FILE_SHARE_READ | FILE_SHARE_WRITE, None, OPEN_EXISTING, 0, None)
deviceNumber = STORAGE_DEVICE_NUMBER()
try:
DeviceIoControl(handle, IOCTL_STORAGE_GET_DEVICE_NUMBER, None, deviceNumber)
storageNumber = deviceNumber.DeviceType, deviceNumber.DeviceNumber
except:
storageNumber = None
CloseHandle(handle)
return storageNumber
def _listDevices():
"""Lists all detected mass storage devices"""
# Similar to what calibre does: https://github.com/kovidgoyal/calibre/blob/master/src/calibre/devices/winusb.py
logicalDrives = dict((_getStorageNumber(l), l) for l in _listLogicalDrives())
disks = dict(_listDeviceClass(GUID_DEVINTERFACE_DISK))
usbDevices = dict(_listDeviceClass(GUID_DEVINTERFACE_USB_DEVICE))
for usbInst, usbPath in usbDevices.items():
for diskInst in _listDeviceChildren(usbInst):
if diskInst in disks:
storageNumber = _getStorageNumber(disks[diskInst])
if storageNumber and storageNumber in logicalDrives:
idVendor, idProduct = parseDeviceId(usbPath)
yield UsbDeviceHandle(logicalDrives[storageNumber], idVendor, idProduct)
break# only return the first disk for every device
class _MscDriver(BaseMscDriver):
"""Communicate with a USB mass storage device"""
def __init__(self, device):
self.device = device
def _sendScsiCommand(self, command, direction, data):
sptd = SCSI_PASS_THROUGH_DIRECT_WITH_BUFFER(sptd = SCSI_PASS_THROUGH_DIRECT(
Length = sizeof(SCSI_PASS_THROUGH_DIRECT),
DataIn = direction,
DataTransferLength = sizeof(data) if data else 0,
DataBuffer = cast(data, c_void_p),
CdbLength = len(command),
Cdb = (c_ubyte * 16).from_buffer_copy(command.ljust(16, b'\0')),
TimeOutValue = 5,
SenseInfoLength = SCSI_PASS_THROUGH_DIRECT_WITH_BUFFER.ucSenseBuf.size,
SenseInfoOffset = SCSI_PASS_THROUGH_DIRECT_WITH_BUFFER.ucSenseBuf.offset,
))
handle = CreateFile('\\\\.\\%s' % self.device, GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ | FILE_SHARE_WRITE, None, OPEN_EXISTING, 0, None)
result = DeviceIoControl(handle, IOCTL_SCSI_PASS_THROUGH_DIRECT, sptd, sizeof(SCSI_PASS_THROUGH_DIRECT_WITH_BUFFER))
CloseHandle(handle)
if SCSI_PASS_THROUGH_DIRECT.from_buffer_copy(result).ScsiStatus != 0:
sense = parseMscSense(result[SCSI_PASS_THROUGH_DIRECT_WITH_BUFFER.ucSenseBuf.offset:])
if sense == MSC_SENSE_OK:
raise Exception('Mass storage error')
return sense
return MSC_SENSE_OK
def sendCommand(self, command):
return self._sendScsiCommand(command, SCSI_IOCTL_DATA_UNSPECIFIED, None)
def sendWriteCommand(self, command, data):
buffer = (c_ubyte * len(data)).from_buffer_copy(data)
return self._sendScsiCommand(command, SCSI_IOCTL_DATA_OUT, buffer)
def sendReadCommand(self, command, size):
buffer = (c_ubyte * size)()
status = self._sendScsiCommand(command, SCSI_IOCTL_DATA_IN, buffer)
return status, bytes(bytearray(buffer))
| |
from __future__ import print_function, unicode_literals
import logging
import os
import re
import sys
from rbtools.api.errors import APIError
from rbtools.commands import Command, CommandError, Option, OptionGroup
from rbtools.utils.commands import (AlreadyStampedError,
get_review_request,
stamp_commit_with_review_url)
from rbtools.utils.console import confirm
from rbtools.utils.review_request import (get_draft_or_current_value,
get_revisions,
guess_existing_review_request)
class Post(Command):
"""Create and update review requests."""
name = 'post'
author = 'The Review Board Project'
description = 'Uploads diffs to create and update review requests.'
args = '[revisions]'
GUESS_AUTO = 'auto'
GUESS_YES = 'yes'
GUESS_NO = 'no'
GUESS_YES_INPUT_VALUES = (True, 'yes', 1, '1')
GUESS_NO_INPUT_VALUES = (False, 'no', 0, '0')
GUESS_CHOICES = (GUESS_AUTO, GUESS_YES, GUESS_NO)
option_list = [
OptionGroup(
name='Posting Options',
description='Controls the behavior of a post, including what '
'review request gets posted and how, and what '
'happens after it is posted.',
option_list=[
Option('-u', '--update',
dest='update',
action='store_true',
default=False,
help='Automatically determines the existing review '
'request to update.',
added_in='0.5.3'),
Option('-r', '--review-request-id',
dest='rid',
metavar='ID',
default=None,
help='Specifies the existing review request ID to '
'update.'),
Option('-p', '--publish',
dest='publish',
action='store_true',
default=False,
config_key='PUBLISH',
help='Publishes the review request immediately after '
'posting.'
'\n'
'All required fields must already be filled in '
'on the review request or must be provided when '
'posting.'),
Option('-t', '--trivial-publish',
dest='trivial_publish',
action='store_true',
default=False,
help='Publish the review request immediately after '
'posting, but without sending an e-mail '
'notification.',
added_in='0.8.0'),
Option('-o', '--open',
dest='open_browser',
action='store_true',
config_key='OPEN_BROWSER',
default=False,
help='Opens a web browser to the review request '
'after posting.'),
Option('-s', '--stamp',
dest='stamp_when_posting',
action='store_true',
config_key='STAMP_WHEN_POSTING',
default=False,
help='Stamps the commit message with the review '
'request URL while posting the review.',
added_in='0.7.3'),
Option('--submit-as',
dest='submit_as',
metavar='USERNAME',
config_key='SUBMIT_AS',
default=None,
help='The username to use as the author of the '
'review request, instead of the logged in user.',
extended_help=(
"This is useful when used in a repository's "
"post-commit script to update or create review "
"requests. See :ref:`automating-rbt-post` for "
"more information on this use case."
)),
Option('--change-only',
dest='change_only',
action='store_true',
default=False,
help='Updates fields from the change description, '
'but does not upload a new diff '
'(Perforce/Plastic only).'),
Option('--diff-only',
dest='diff_only',
action='store_true',
default=False,
help='Uploads a new diff, but does not update '
'fields from the change description '
'(Perforce/Plastic only).'),
]
),
Command.server_options,
Command.repository_options,
OptionGroup(
name='Review Request Field Options',
description='Options for setting the contents of fields in the '
'review request.',
option_list=[
Option('-g', '--guess-fields',
dest='guess_fields',
action='store',
config_key='GUESS_FIELDS',
nargs='?',
default=GUESS_AUTO,
const=GUESS_YES,
choices=GUESS_CHOICES,
help='Equivalent to setting both --guess-summary '
'and --guess-description.',
extended_help=(
'This can optionally take a value to control the '
'guessing behavior. See :ref:`guessing-behavior` '
'for more information.'
)),
Option('--guess-summary',
dest='guess_summary',
action='store',
config_key='GUESS_SUMMARY',
nargs='?',
default=GUESS_AUTO,
const=GUESS_YES,
choices=GUESS_CHOICES,
help='Generates the Summary field based on the '
'commit messages (Bazaar/Git/Mercurial only).',
extended_help=(
'This can optionally take a value to control the '
'guessing behavior. See :ref:`guessing-behavior` '
'for more information.'
)),
Option('--guess-description',
dest='guess_description',
action='store',
config_key='GUESS_DESCRIPTION',
nargs='?',
default=GUESS_AUTO,
const=GUESS_YES,
choices=GUESS_CHOICES,
help='Generates the Description field based on the '
'commit messages (Bazaar/Git/Mercurial only).',
extended_help=(
'This can optionally take a value to control the '
'guessing behavior. See :ref:`guessing-behavior` '
'for more information.'
)),
Option('-m', '--change-description',
dest='change_description',
default=None,
metavar='TEXT',
help='A description of what changed in this update '
'of the review request. This is ignored for new '
'review requests.'),
Option('--summary',
dest='summary',
metavar='TEXT',
default=None,
help='The new contents for the Summary field.'),
Option('--description',
dest='description',
metavar='TEXT',
default=None,
help='The new contents for the Description field.'),
Option('--description-file',
dest='description_file',
default=None,
metavar='FILENAME',
help='A text file containing the new contents for the '
'Description field.'),
Option('--testing-done',
dest='testing_done',
metavar='TEXT',
default=None,
help='The new contents for the Testing Done field.'),
Option('--testing-done-file',
dest='testing_file',
default=None,
metavar='FILENAME',
help='A text file containing the new contents for the '
'Testing Done field.'),
Option('--branch',
dest='branch',
config_key='BRANCH',
metavar='BRANCH',
default=None,
help='The branch the change will be committed on or '
'affects. This is a free-form field and does not '
'control any behavior.'),
Option('--bugs-closed',
dest='bugs_closed',
metavar='BUG_ID[,...]',
default=None,
help='The comma-separated list of bug IDs closed.'),
Option('--target-groups',
dest='target_groups',
config_key='TARGET_GROUPS',
metavar='NAME[,...]',
default=None,
help='The names of the groups that should perform the '
'review.'),
Option('--target-people',
dest='target_people',
metavar='USERNAME[,...]',
config_key='TARGET_PEOPLE',
default=None,
help='The usernames of the people who should perform '
'the review.'),
Option('--depends-on',
dest='depends_on',
config_key='DEPENDS_ON',
metavar='ID[,...]',
default=None,
help='A comma-separated list of review request IDs '
'that this review request will depend on.',
added_in='0.6.1'),
Option('--markdown',
dest='markdown',
action='store_true',
config_key='MARKDOWN',
default=False,
help='Specifies if the summary, description, and '
'change description should should be interpreted '
'as Markdown-formatted text.'
'\n'
'This is only supported in Review Board 2.0+.',
added_in='0.6'),
]
),
Command.diff_options,
Command.perforce_options,
Command.subversion_options,
Command.tfs_options,
]
def post_process_options(self):
super(Post, self).post_process_options()
# -g implies --guess-summary and --guess-description
if self.options.guess_fields:
self.options.guess_fields = self.normalize_guess_value(
self.options.guess_fields, '--guess-fields')
self.options.guess_summary = self.options.guess_fields
self.options.guess_description = self.options.guess_fields
if self.options.revision_range:
raise CommandError(
'The --revision-range argument has been removed. To post a '
'diff for one or more specific revisions, pass those '
'revisions as arguments. For more information, see the '
'RBTools 0.6 Release Notes.')
if self.options.svn_changelist:
raise CommandError(
'The --svn-changelist argument has been removed. To use a '
'Subversion changelist, pass the changelist name as an '
'additional argument after the command.')
# Only one of --description and --description-file can be used
if self.options.description and self.options.description_file:
raise CommandError('The --description and --description-file '
'options are mutually exclusive.')
# If --description-file is used, read that file
if self.options.description_file:
if os.path.exists(self.options.description_file):
with open(self.options.description_file, 'r') as fp:
self.options.description = fp.read()
else:
raise CommandError(
'The description file %s does not exist.'
% self.options.description_file)
# Only one of --testing-done and --testing-done-file can be used
if self.options.testing_done and self.options.testing_file:
raise CommandError('The --testing-done and --testing-done-file '
'options are mutually exclusive.')
# If --testing-done-file is used, read that file
if self.options.testing_file:
if os.path.exists(self.options.testing_file):
with open(self.options.testing_file, 'r') as fp:
self.options.testing_done = fp.read()
else:
raise CommandError('The testing file %s does not exist.'
% self.options.testing_file)
# If we have an explicitly specified summary, override
# --guess-summary
if self.options.summary:
self.options.guess_summary = self.GUESS_NO
else:
self.options.guess_summary = self.normalize_guess_value(
self.options.guess_summary, '--guess-summary')
# If we have an explicitly specified description, override
# --guess-description
if self.options.description:
self.options.guess_description = self.GUESS_NO
else:
self.options.guess_description = self.normalize_guess_value(
self.options.guess_description, '--guess-description')
# If the --diff-filename argument is used, we can't do automatic
# updating.
if self.options.diff_filename and self.options.update:
raise CommandError('The --update option cannot be used when '
'using --diff-filename.')
# If we have an explicitly specified review request ID, override
# --update
if self.options.rid and self.options.update:
self.options.update = False
if self.options.trivial_publish:
self.options.publish = True
def normalize_guess_value(self, guess, arg_name):
if guess in self.GUESS_YES_INPUT_VALUES:
return self.GUESS_YES
elif guess in self.GUESS_NO_INPUT_VALUES:
return self.GUESS_NO
elif guess == self.GUESS_AUTO:
return guess
else:
raise CommandError('Invalid value "%s" for argument "%s"'
% (guess, arg_name))
def get_repository_path(self, repository_info, api_root):
"""Get the repository path from the server.
This will compare the paths returned by the SCM client
with those one the server, and return the first match.
"""
if isinstance(repository_info.path, list):
repositories = api_root.get_repositories(only_fields='path',
only_links='')
for repo in repositories.all_items:
if repo['path'] in repository_info.path:
repository_info.path = repo['path']
break
if isinstance(repository_info.path, list):
error_str = [
'There was an error creating this review request.\n',
'\n',
'There was no matching repository path found on the server.\n',
'Unknown repository paths found:\n',
]
for foundpath in repository_info.path:
error_str.append('\t%s\n' % foundpath)
error_str += [
'Ask the administrator to add one of these repositories\n',
'to the Review Board server.\n',
]
raise CommandError(''.join(error_str))
return repository_info.path
def post_request(self, repository_info, repository, server_url, api_root,
review_request_id=None, changenum=None, diff_content=None,
parent_diff_content=None, commit_id=None,
base_commit_id=None,
submit_as=None, retries=3, base_dir=None):
"""Creates or updates a review request, and uploads a diff.
On success the review request id and url are returned.
"""
supports_posting_commit_ids = \
self.tool.capabilities.has_capability('review_requests',
'commit_ids')
if review_request_id:
review_request = get_review_request(
review_request_id, api_root,
only_fields='absolute_url,bugs_closed,id,status,public',
only_links='diffs,draft')
if review_request.status == 'submitted':
raise CommandError(
'Review request %s is marked as %s. In order to update '
'it, please reopen the review request and try again.'
% (review_request_id, review_request.status))
else:
# No review_request_id, so we will create a new review request.
try:
# Until we are Python 2.7+ only, the keys in request_data have
# to be bytes. See bug 3753 for details.
request_data = {
b'repository': repository
}
if changenum:
request_data[b'changenum'] = changenum
elif commit_id and supports_posting_commit_ids:
request_data[b'commit_id'] = commit_id
if submit_as:
request_data[b'submit_as'] = submit_as
review_requests = api_root.get_review_requests(
only_fields='',
only_links='create')
review_request = review_requests.create(**request_data)
except APIError as e:
if e.error_code == 204 and changenum:
# The change number is already in use. Get the review
# request for that change and update it instead.
rid = e.rsp['review_request']['id']
review_request = api_root.get_review_request(
review_request_id=rid,
only_fields='absolute_url,bugs_closed,id,status',
only_links='diffs,draft')
if not self.options.diff_only:
review_request = review_request.update(
changenum=changenum)
else:
raise CommandError('Error creating review request: %s' % e)
if (not repository_info.supports_changesets or
not self.options.change_only):
try:
diff_kwargs = {
'parent_diff': parent_diff_content,
'base_dir': base_dir,
}
if (base_commit_id and
self.tool.capabilities.has_capability('diffs',
'base_commit_ids')):
# Both the Review Board server and SCMClient support
# base commit IDs, so pass that along when creating
# the diff.
diff_kwargs['base_commit_id'] = base_commit_id
review_request.get_diffs(only_fields='').upload_diff(
diff_content, **diff_kwargs)
except APIError as e:
error_msg = [
u'Error uploading diff\n\n',
]
if e.error_code == 101 and e.http_status == 403:
error_msg.append(
u'You do not have permissions to modify '
u'this review request\n')
elif e.error_code == 219:
error_msg.append(
u'The generated diff file was empty. This '
u'usually means no files were\n'
u'modified in this change.\n')
else:
error_msg.append(str(e).decode('utf-8') + u'\n')
error_msg.append(
u'Your review request still exists, but the diff is '
u'not attached.\n')
error_msg.append(u'%s\n' % review_request.absolute_url)
raise CommandError(u'\n'.join(error_msg))
try:
draft = review_request.get_draft(only_fields='commit_id')
except APIError as e:
raise CommandError('Error retrieving review request draft: %s' % e)
# Stamp the commit message with the review request URL before posting
# the review, so that we can use the stamped commit message when
# guessing the description. This enables the stamped message to be
# present on the review if the user has chosen to publish immediately
# upon posting.
if self.options.stamp_when_posting:
if not self.tool.can_amend_commit:
print('Cannot stamp review URL onto the commit message; '
'stamping is not supported with %s.' % self.tool.name)
else:
try:
stamp_commit_with_review_url(self.revisions,
review_request.absolute_url,
self.tool)
print('Stamped review URL onto the commit message.')
except AlreadyStampedError:
print('Commit message has already been stamped')
except Exception as e:
logging.debug('Caught exception while stamping the '
'commit message. Proceeding to post '
'without stamping.', exc_info=True)
print('Could not stamp review URL onto the commit '
'message.')
# If the user has requested to guess the summary or description,
# get the commit message and override the summary and description
# options. The guessing takes place after stamping so that the
# guessed description matches the commit when rbt exits.
if not self.options.diff_filename:
self.check_guess_fields()
# Update the review request draft fields based on options set
# by the user, or configuration.
update_fields = {}
if self.options.target_groups:
update_fields['target_groups'] = self.options.target_groups
if self.options.target_people:
update_fields['target_people'] = self.options.target_people
if self.options.depends_on:
update_fields['depends_on'] = self.options.depends_on
if self.options.summary:
update_fields['summary'] = self.options.summary
if self.options.branch:
update_fields['branch'] = self.options.branch
if self.options.bugs_closed:
# Append to the existing list of bugs.
self.options.bugs_closed = self.options.bugs_closed.strip(', ')
bug_set = (set(re.split('[, ]+', self.options.bugs_closed)) |
set(review_request.bugs_closed))
self.options.bugs_closed = ','.join(bug_set)
update_fields['bugs_closed'] = self.options.bugs_closed
if self.options.description:
update_fields['description'] = self.options.description
if self.options.testing_done:
update_fields['testing_done'] = self.options.testing_done
if ((self.options.description or self.options.testing_done) and
self.options.markdown and
self.tool.capabilities.has_capability('text', 'markdown')):
# The user specified that their Description/Testing Done are
# valid Markdown, so tell the server so it won't escape the text.
update_fields['text_type'] = 'markdown'
if self.options.publish:
update_fields['public'] = True
if (self.options.trivial_publish and
self.tool.capabilities.has_capability('review_requests',
'trivial_publish')):
update_fields['trivial'] = True
if self.options.change_description is not None:
if review_request.public:
update_fields['changedescription'] = \
self.options.change_description
if (self.options.markdown and
self.tool.capabilities.has_capability('text', 'markdown')):
update_fields['changedescription_text_type'] = 'markdown'
else:
update_fields['changedescription_text_type'] = 'plain'
else:
logging.error(
'The change description field can only be set when '
'publishing an update. Use --description instead.')
if supports_posting_commit_ids and commit_id != draft.commit_id:
update_fields['commit_id'] = commit_id or ''
if update_fields:
try:
draft = draft.update(**update_fields)
except APIError as e:
raise CommandError(
'Error updating review request draft: %s\n\n'
'Your review request still exists, but the diff is not '
'attached.\n\n'
'%s\n'
% (e, review_request.absolute_url))
return review_request.id, review_request.absolute_url
def check_guess_fields(self):
"""Checks and handles field guesses for the review request.
This will attempt to guess the values for the summary and
description fields, based on the contents of the commit message
at the provided revisions, if requested by the caller.
If the backend doesn't support guessing, or if guessing isn't
requested, or if explicit values were set in the options, nothing
will be set for the fields.
"""
is_new_review_request = (not self.options.rid and
not self.options.update)
guess_summary = (
self.options.guess_summary == self.GUESS_YES or
(self.options.guess_summary == self.GUESS_AUTO and
is_new_review_request))
guess_description = (
self.options.guess_description == self.GUESS_YES or
(self.options.guess_description == self.GUESS_AUTO and
is_new_review_request))
if self.revisions and (guess_summary or guess_description):
try:
commit_message = self.tool.get_commit_message(self.revisions)
if commit_message:
if guess_summary:
self.options.summary = commit_message['summary']
if guess_description:
self.options.description = \
commit_message['description']
except NotImplementedError:
# The SCMClient doesn't support getting commit messages,
# so we can't provide the guessed versions.
pass
def _ask_review_request_match(self, review_request):
question = ("Update Review Request #%s: '%s'? "
% (review_request.id,
get_draft_or_current_value(
'summary', review_request)))
return confirm(question)
def main(self, *args):
"""Create and update review requests."""
# The 'args' tuple must be made into a list for some of the
# SCM Clients code. The way arguments were structured in
# post-review meant this was a list, and certain parts of
# the code base try and concatenate args to the end of
# other lists. Until the client code is restructured and
# cleaned up we will satisfy the assumption here.
self.cmd_args = list(args)
self.post_process_options()
origcwd = os.path.abspath(os.getcwd())
repository_info, self.tool = self.initialize_scm_tool(
client_name=self.options.repository_type)
server_url = self.get_server_url(repository_info, self.tool)
api_client, api_root = self.get_api(server_url)
self.setup_tool(self.tool, api_root=api_root)
if (self.options.exclude_patterns and
not self.tool.supports_diff_exclude_patterns):
raise CommandError(
'The %s backend does not support excluding files via the '
'-X/--exclude commandline options or the EXCLUDE_PATTERNS '
'.reviewboardrc option.' % self.tool.name)
# Check if repository info on reviewboard server match local ones.
repository_info = repository_info.find_server_repository_info(api_root)
if self.options.diff_filename:
self.revisions = None
parent_diff = None
base_commit_id = None
commit_id = None
if self.options.diff_filename == '-':
if hasattr(sys.stdin, 'buffer'):
# Make sure we get bytes on Python 3.x
diff = sys.stdin.buffer.read()
else:
diff = sys.stdin.read()
else:
try:
diff_path = os.path.join(origcwd,
self.options.diff_filename)
with open(diff_path, 'rb') as fp:
diff = fp.read()
except IOError as e:
raise CommandError('Unable to open diff filename: %s' % e)
else:
self.revisions = get_revisions(self.tool, self.cmd_args)
if self.revisions:
extra_args = None
else:
extra_args = self.cmd_args
# Generate a diff against the revisions or arguments, filtering
# by the requested files if provided.
diff_info = self.tool.diff(
revisions=self.revisions,
include_files=self.options.include_files or [],
exclude_patterns=self.options.exclude_patterns or [],
extra_args=extra_args)
diff = diff_info['diff']
parent_diff = diff_info.get('parent_diff')
base_commit_id = diff_info.get('base_commit_id')
commit_id = diff_info.get('commit_id')
repository = (
self.options.repository_name or
self.options.repository_url or
self.get_repository_path(repository_info, api_root))
base_dir = self.options.basedir or repository_info.base_path
if repository is None:
raise CommandError('Could not find the repository on the Review '
'Board server.')
if len(diff) == 0:
raise CommandError("There don't seem to be any diffs!")
# Validate the diffs to ensure that they can be parsed and that
# all referenced files can be found.
#
# Review Board 2.0.14+ (with the diffs.validation.base_commit_ids
# capability) is required to successfully validate against hosting
# services that need a base_commit_id. This is basically due to
# the limitations of a couple Git-specific hosting services
# (Beanstalk, Bitbucket, and Unfuddle).
#
# In order to validate, we need to either not be dealing with a
# base commit ID (--diff-filename), or be on a new enough version
# of Review Board, or be using a non-Git repository.
can_validate_base_commit_ids = \
self.tool.capabilities.has_capability('diffs', 'validation',
'base_commit_ids')
if (not base_commit_id or
can_validate_base_commit_ids or
self.tool.name != 'Git'):
# We can safely validate this diff before posting it, but we
# need to ensure we only pass base_commit_id if the capability
# is set.
validate_kwargs = {}
if can_validate_base_commit_ids:
validate_kwargs['base_commit_id'] = base_commit_id
try:
diff_validator = api_root.get_diff_validation()
diff_validator.validate_diff(
repository,
diff,
parent_diff=parent_diff,
base_dir=base_dir,
**validate_kwargs)
except APIError as e:
msg_prefix = ''
if e.error_code == 207:
msg_prefix = '%s: ' % e.rsp['file']
raise CommandError('Error validating diff\n\n%s%s' %
(msg_prefix, e))
except AttributeError:
# The server doesn't have a diff validation resource. Post as
# normal.
pass
if (repository_info.supports_changesets and
not self.options.diff_filename and
'changenum' in diff_info):
changenum = diff_info['changenum']
else:
changenum = self.tool.get_changenum(self.revisions)
# Not all scm clients support get_changenum, so if get_changenum
# returns None (the default for clients that don't have changenums),
# we'll prefer the existing commit_id.
commit_id = changenum or commit_id
if self.options.update and self.revisions:
review_request = guess_existing_review_request(
repository_info, self.options.repository_name, api_root,
api_client, self.tool, self.revisions,
guess_summary=False, guess_description=False,
is_fuzzy_match_func=self._ask_review_request_match,
submit_as=self.options.submit_as)
if not review_request or not review_request.id:
raise CommandError('Could not determine the existing review '
'request to update.')
self.options.rid = review_request.id
# If only certain files within a commit are being submitted for review,
# do not include the commit id. This prevents conflicts if multiple
# files from the same commit are posted for review separately.
if self.options.include_files or self.options.exclude_patterns:
commit_id = None
request_id, review_url = self.post_request(
repository_info,
repository,
server_url,
api_root,
self.options.rid,
changenum=changenum,
diff_content=diff,
parent_diff_content=parent_diff,
commit_id=commit_id,
base_commit_id=base_commit_id,
submit_as=self.options.submit_as,
base_dir=base_dir)
diff_review_url = review_url + 'diff/'
print('Review request #%s posted.' % request_id)
print()
print(review_url)
print(diff_review_url)
# Load the review up in the browser if requested to.
if self.options.open_browser:
try:
import webbrowser
if 'open_new_tab' in dir(webbrowser):
# open_new_tab is only in python 2.5+
webbrowser.open_new_tab(review_url)
elif 'open_new' in dir(webbrowser):
webbrowser.open_new(review_url)
else:
os.system('start %s' % review_url)
except:
logging.error('Error opening review URL: %s' % review_url)
| |
# Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import httplib
import mock
import os
import sys
import pickle
import socket
import locale
import eventlet
import eventlet.debug
import functools
import random
from ConfigParser import ConfigParser, NoSectionError
from time import time, sleep
from httplib import HTTPException
from urlparse import urlparse
from nose import SkipTest
from contextlib import closing
from gzip import GzipFile
from shutil import rmtree
from tempfile import mkdtemp
from swift.common.middleware.memcache import MemcacheMiddleware
from swift.common.storage_policy import parse_storage_policies, PolicyError
from test import get_config
from test.functional.swift_test_client import Account, Connection, \
ResponseError
# This has the side effect of mocking out the xattr module so that unit tests
# (and in this case, when in-process functional tests are called for) can run
# on file systems that don't support extended attributes.
from test.unit import debug_logger, FakeMemcache
from swift.common import constraints, utils, ring, storage_policy
from swift.common.ring import Ring
from swift.common.wsgi import monkey_patch_mimetools, loadapp
from swift.common.utils import config_true_value
from swift.account import server as account_server
from swift.container import server as container_server
from swift.obj import server as object_server, mem_server as mem_object_server
import swift.proxy.controllers.obj
httplib._MAXHEADERS = constraints.MAX_HEADER_COUNT
DEBUG = True
# In order to get the proper blocking behavior of sockets without using
# threads, where we can set an arbitrary timeout for some piece of code under
# test, we use eventlet with the standard socket library patched. We have to
# perform this setup at module import time, since all the socket module
# bindings in the swiftclient code will have been made by the time nose
# invokes the package or class setup methods.
eventlet.hubs.use_hub(utils.get_hub())
eventlet.patcher.monkey_patch(all=False, socket=True)
eventlet.debug.hub_exceptions(False)
from swiftclient import get_auth, http_connection
has_insecure = False
try:
from swiftclient import __version__ as client_version
# Prevent a ValueError in StrictVersion with '2.0.3.68.ga99c2ff'
client_version = '.'.join(client_version.split('.')[:3])
except ImportError:
# Pre-PBR we had version, not __version__. Anyhow...
client_version = '1.2'
from distutils.version import StrictVersion
if StrictVersion(client_version) >= StrictVersion('2.0'):
has_insecure = True
config = {}
web_front_end = None
normalized_urls = None
# If no config was read, we will fall back to old school env vars
swift_test_auth_version = None
swift_test_auth = os.environ.get('SWIFT_TEST_AUTH')
swift_test_user = [os.environ.get('SWIFT_TEST_USER'), None, None, '', '']
swift_test_key = [os.environ.get('SWIFT_TEST_KEY'), None, None, '', '']
swift_test_tenant = ['', '', '', '', '']
swift_test_perm = ['', '', '', '', '']
swift_test_domain = ['', '', '', '', '']
swift_test_user_id = ['', '', '', '', '']
swift_test_tenant_id = ['', '', '', '', '']
skip, skip2, skip3, skip_service_tokens = False, False, False, False
orig_collate = ''
insecure = False
orig_hash_path_suff_pref = ('', '')
orig_swift_conf_name = None
in_process = False
_testdir = _test_servers = _test_coros = None
class FakeMemcacheMiddleware(MemcacheMiddleware):
"""
Caching middleware that fakes out caching in swift if memcached
does not appear to be running.
"""
def __init__(self, app, conf):
super(FakeMemcacheMiddleware, self).__init__(app, conf)
self.memcache = FakeMemcache()
class InProcessException(BaseException):
pass
def _info(msg):
print >> sys.stderr, msg
def _debug(msg):
if DEBUG:
_info('DEBUG: ' + msg)
def _in_process_setup_swift_conf(swift_conf_src, testdir):
# override swift.conf contents for in-process functional test runs
conf = ConfigParser()
conf.read(swift_conf_src)
try:
section = 'swift-hash'
conf.set(section, 'swift_hash_path_suffix', 'inprocfunctests')
conf.set(section, 'swift_hash_path_prefix', 'inprocfunctests')
section = 'swift-constraints'
max_file_size = (8 * 1024 * 1024) + 2 # 8 MB + 2
conf.set(section, 'max_file_size', max_file_size)
except NoSectionError:
msg = 'Conf file %s is missing section %s' % (swift_conf_src, section)
raise InProcessException(msg)
test_conf_file = os.path.join(testdir, 'swift.conf')
with open(test_conf_file, 'w') as fp:
conf.write(fp)
return test_conf_file
def _in_process_find_conf_file(conf_src_dir, conf_file_name, use_sample=True):
"""
Look for a file first in conf_src_dir, if it exists, otherwise optionally
look in the source tree sample 'etc' dir.
:param conf_src_dir: Directory in which to search first for conf file. May
be None
:param conf_file_name: Name of conf file
:param use_sample: If True and the conf_file_name is not found, then return
any sample conf file found in the source tree sample
'etc' dir by appending '-sample' to conf_file_name
:returns: Path to conf file
:raises InProcessException: If no conf file is found
"""
dflt_src_dir = os.path.normpath(os.path.join(os.path.abspath(__file__),
os.pardir, os.pardir, os.pardir,
'etc'))
conf_src_dir = dflt_src_dir if conf_src_dir is None else conf_src_dir
conf_file_path = os.path.join(conf_src_dir, conf_file_name)
if os.path.exists(conf_file_path):
return conf_file_path
if use_sample:
# fall back to using the corresponding sample conf file
conf_file_name += '-sample'
conf_file_path = os.path.join(dflt_src_dir, conf_file_name)
if os.path.exists(conf_file_path):
return conf_file_path
msg = 'Failed to find config file %s' % conf_file_name
raise InProcessException(msg)
def _in_process_setup_ring(swift_conf, conf_src_dir, testdir):
"""
If SWIFT_TEST_POLICY is set:
- look in swift.conf file for specified policy
- move this to be policy-0 but preserving its options
- copy its ring file to test dir, changing its devices to suit
in process testing, and renaming it to suit policy-0
Otherwise, create a default ring file.
"""
conf = ConfigParser()
conf.read(swift_conf)
sp_prefix = 'storage-policy:'
try:
# policy index 0 will be created if no policy exists in conf
policies = parse_storage_policies(conf)
except PolicyError as e:
raise InProcessException(e)
# clear all policies from test swift.conf before adding test policy back
for policy in policies:
conf.remove_section(sp_prefix + str(policy.idx))
policy_specified = os.environ.get('SWIFT_TEST_POLICY')
if policy_specified:
policy_to_test = policies.get_by_name(policy_specified)
if policy_to_test is None:
raise InProcessException('Failed to find policy name "%s"'
% policy_specified)
_info('Using specified policy %s' % policy_to_test.name)
else:
policy_to_test = policies.default
_info('Defaulting to policy %s' % policy_to_test.name)
# make policy_to_test be policy index 0 and default for the test config
sp_zero_section = sp_prefix + '0'
conf.add_section(sp_zero_section)
for (k, v) in policy_to_test.get_info(config=True).items():
conf.set(sp_zero_section, k, v)
conf.set(sp_zero_section, 'default', True)
with open(swift_conf, 'w') as fp:
conf.write(fp)
# look for a source ring file
ring_file_src = ring_file_test = 'object.ring.gz'
if policy_to_test.idx:
ring_file_src = 'object-%s.ring.gz' % policy_to_test.idx
try:
ring_file_src = _in_process_find_conf_file(conf_src_dir, ring_file_src,
use_sample=False)
except InProcessException as e:
if policy_specified:
raise InProcessException('Failed to find ring file %s'
% ring_file_src)
ring_file_src = None
ring_file_test = os.path.join(testdir, ring_file_test)
if ring_file_src:
# copy source ring file to a policy-0 test ring file, re-homing servers
_info('Using source ring file %s' % ring_file_src)
ring_data = ring.RingData.load(ring_file_src)
obj_sockets = []
for dev in ring_data.devs:
device = 'sd%c1' % chr(len(obj_sockets) + ord('a'))
utils.mkdirs(os.path.join(_testdir, 'sda1'))
utils.mkdirs(os.path.join(_testdir, 'sda1', 'tmp'))
obj_socket = eventlet.listen(('localhost', 0))
obj_sockets.append(obj_socket)
dev['port'] = obj_socket.getsockname()[1]
dev['ip'] = '127.0.0.1'
dev['device'] = device
dev['replication_port'] = dev['port']
dev['replication_ip'] = dev['ip']
ring_data.save(ring_file_test)
else:
# make default test ring, 2 replicas, 4 partitions, 2 devices
_info('No source object ring file, creating 2rep/4part/2dev ring')
obj_sockets = [eventlet.listen(('localhost', 0)) for _ in (0, 1)]
ring_data = ring.RingData(
[[0, 1, 0, 1], [1, 0, 1, 0]],
[{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': obj_sockets[0].getsockname()[1]},
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': obj_sockets[1].getsockname()[1]}],
30)
with closing(GzipFile(ring_file_test, 'wb')) as f:
pickle.dump(ring_data, f)
for dev in ring_data.devs:
_debug('Ring file dev: %s' % dev)
return obj_sockets
def in_process_setup(the_object_server=object_server):
_info('IN-PROCESS SERVERS IN USE FOR FUNCTIONAL TESTS')
_info('Using object_server class: %s' % the_object_server.__name__)
conf_src_dir = os.environ.get('SWIFT_TEST_IN_PROCESS_CONF_DIR')
if conf_src_dir is not None:
if not os.path.isdir(conf_src_dir):
msg = 'Config source %s is not a dir' % conf_src_dir
raise InProcessException(msg)
_info('Using config source dir: %s' % conf_src_dir)
# If SWIFT_TEST_IN_PROCESS_CONF specifies a config source dir then
# prefer config files from there, otherwise read config from source tree
# sample files. A mixture of files from the two sources is allowed.
proxy_conf = _in_process_find_conf_file(conf_src_dir, 'proxy-server.conf')
_info('Using proxy config from %s' % proxy_conf)
swift_conf_src = _in_process_find_conf_file(conf_src_dir, 'swift.conf')
_info('Using swift config from %s' % swift_conf_src)
monkey_patch_mimetools()
global _testdir
_testdir = os.path.join(mkdtemp(), 'tmp_functional')
utils.mkdirs(_testdir)
rmtree(_testdir)
utils.mkdirs(os.path.join(_testdir, 'sda1'))
utils.mkdirs(os.path.join(_testdir, 'sda1', 'tmp'))
utils.mkdirs(os.path.join(_testdir, 'sdb1'))
utils.mkdirs(os.path.join(_testdir, 'sdb1', 'tmp'))
swift_conf = _in_process_setup_swift_conf(swift_conf_src, _testdir)
obj_sockets = _in_process_setup_ring(swift_conf, conf_src_dir, _testdir)
global orig_swift_conf_name
orig_swift_conf_name = utils.SWIFT_CONF_FILE
utils.SWIFT_CONF_FILE = swift_conf
constraints.reload_constraints()
storage_policy.SWIFT_CONF_FILE = swift_conf
storage_policy.reload_storage_policies()
global config
if constraints.SWIFT_CONSTRAINTS_LOADED:
# Use the swift constraints that are loaded for the test framework
# configuration
_c = dict((k, str(v))
for k, v in constraints.EFFECTIVE_CONSTRAINTS.items())
config.update(_c)
else:
# In-process swift constraints were not loaded, somethings wrong
raise SkipTest
global orig_hash_path_suff_pref
orig_hash_path_suff_pref = utils.HASH_PATH_PREFIX, utils.HASH_PATH_SUFFIX
utils.validate_hash_conf()
# We create the proxy server listening socket to get its port number so
# that we can add it as the "auth_port" value for the functional test
# clients.
prolis = eventlet.listen(('localhost', 0))
# The following set of configuration values is used both for the
# functional test frame work and for the various proxy, account, container
# and object servers.
config.update({
# Values needed by the various in-process swift servers
'devices': _testdir,
'swift_dir': _testdir,
'mount_check': 'false',
'client_timeout': '4',
'allow_account_management': 'true',
'account_autocreate': 'true',
'allow_versions': 'True',
# Below are values used by the functional test framework, as well as
# by the various in-process swift servers
'auth_host': '127.0.0.1',
'auth_port': str(prolis.getsockname()[1]),
'auth_ssl': 'no',
'auth_prefix': '/auth/',
# Primary functional test account (needs admin access to the
# account)
'account': 'test',
'username': 'tester',
'password': 'testing',
# User on a second account (needs admin access to the account)
'account2': 'test2',
'username2': 'tester2',
'password2': 'testing2',
# User on same account as first, but without admin access
'username3': 'tester3',
'password3': 'testing3',
# Service user and prefix (emulates glance, cinder, etc. user)
'account5': 'test5',
'username5': 'tester5',
'password5': 'testing5',
'service_prefix': 'SERVICE',
# For tempauth middleware. Update reseller_prefix
'reseller_prefix': 'AUTH, SERVICE',
'SERVICE_require_group': 'service'
})
acc1lis = eventlet.listen(('localhost', 0))
acc2lis = eventlet.listen(('localhost', 0))
con1lis = eventlet.listen(('localhost', 0))
con2lis = eventlet.listen(('localhost', 0))
account_ring_path = os.path.join(_testdir, 'account.ring.gz')
with closing(GzipFile(account_ring_path, 'wb')) as f:
pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
[{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': acc1lis.getsockname()[1]},
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': acc2lis.getsockname()[1]}], 30),
f)
container_ring_path = os.path.join(_testdir, 'container.ring.gz')
with closing(GzipFile(container_ring_path, 'wb')) as f:
pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
[{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': con1lis.getsockname()[1]},
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': con2lis.getsockname()[1]}], 30),
f)
eventlet.wsgi.HttpProtocol.default_request_version = "HTTP/1.0"
# Turn off logging requests by the underlying WSGI software.
eventlet.wsgi.HttpProtocol.log_request = lambda *a: None
logger = utils.get_logger(config, 'wsgi-server', log_route='wsgi')
# Redirect logging other messages by the underlying WSGI software.
eventlet.wsgi.HttpProtocol.log_message = \
lambda s, f, *a: logger.error('ERROR WSGI: ' + f % a)
# Default to only 4 seconds for in-process functional test runs
eventlet.wsgi.WRITE_TIMEOUT = 4
acc1srv = account_server.AccountController(
config, logger=debug_logger('acct1'))
acc2srv = account_server.AccountController(
config, logger=debug_logger('acct2'))
con1srv = container_server.ContainerController(
config, logger=debug_logger('cont1'))
con2srv = container_server.ContainerController(
config, logger=debug_logger('cont2'))
objsrvs = [
(obj_sockets[index],
the_object_server.ObjectController(
config, logger=debug_logger('obj%d' % (index + 1))))
for index in range(len(obj_sockets))
]
logger = debug_logger('proxy')
def get_logger(name, *args, **kwargs):
return logger
with mock.patch('swift.common.utils.get_logger', get_logger):
with mock.patch('swift.common.middleware.memcache.MemcacheMiddleware',
FakeMemcacheMiddleware):
try:
app = loadapp(proxy_conf, global_conf=config)
except Exception as e:
raise InProcessException(e)
nl = utils.NullLogger()
prospa = eventlet.spawn(eventlet.wsgi.server, prolis, app, nl)
acc1spa = eventlet.spawn(eventlet.wsgi.server, acc1lis, acc1srv, nl)
acc2spa = eventlet.spawn(eventlet.wsgi.server, acc2lis, acc2srv, nl)
con1spa = eventlet.spawn(eventlet.wsgi.server, con1lis, con1srv, nl)
con2spa = eventlet.spawn(eventlet.wsgi.server, con2lis, con2srv, nl)
objspa = [eventlet.spawn(eventlet.wsgi.server, objsrv[0], objsrv[1], nl)
for objsrv in objsrvs]
global _test_coros
_test_coros = \
(prospa, acc1spa, acc2spa, con1spa, con2spa) + tuple(objspa)
# Create accounts "test" and "test2"
def create_account(act):
ts = utils.normalize_timestamp(time())
account_ring = Ring(_testdir, ring_name='account')
partition, nodes = account_ring.get_nodes(act)
for node in nodes:
# Note: we are just using the http_connect method in the object
# controller here to talk to the account server nodes.
conn = swift.proxy.controllers.obj.http_connect(
node['ip'], node['port'], node['device'], partition, 'PUT',
'/' + act, {'X-Timestamp': ts, 'x-trans-id': act})
resp = conn.getresponse()
assert(resp.status == 201)
create_account('AUTH_test')
create_account('AUTH_test2')
cluster_info = {}
def get_cluster_info():
# The fallback constraints used for testing will come from the current
# effective constraints.
eff_constraints = dict(constraints.EFFECTIVE_CONSTRAINTS)
# We'll update those constraints based on what the /info API provides, if
# anything.
global cluster_info
try:
conn = Connection(config)
conn.authenticate()
cluster_info.update(conn.cluster_info())
except (ResponseError, socket.error):
# Failed to get cluster_information via /info API, so fall back on
# test.conf data
pass
else:
try:
eff_constraints.update(cluster_info['swift'])
except KeyError:
# Most likely the swift cluster has "expose_info = false" set
# in its proxy-server.conf file, so we'll just do the best we
# can.
print >>sys.stderr, "** Swift Cluster not exposing /info **"
# Finally, we'll allow any constraint present in the swift-constraints
# section of test.conf to override everything. Note that only those
# constraints defined in the constraints module are converted to integers.
test_constraints = get_config('swift-constraints')
for k in constraints.DEFAULT_CONSTRAINTS:
try:
test_constraints[k] = int(test_constraints[k])
except KeyError:
pass
except ValueError:
print >>sys.stderr, "Invalid constraint value: %s = %s" % (
k, test_constraints[k])
eff_constraints.update(test_constraints)
# Just make it look like these constraints were loaded from a /info call,
# even if the /info call failed, or when they are overridden by values
# from the swift-constraints section of test.conf
cluster_info['swift'] = eff_constraints
def setup_package():
in_process_env = os.environ.get('SWIFT_TEST_IN_PROCESS')
if in_process_env is not None:
use_in_process = utils.config_true_value(in_process_env)
else:
use_in_process = None
global in_process
if use_in_process:
# Explicitly set to True, so barrel on ahead with in-process
# functional test setup.
in_process = True
# NOTE: No attempt is made to a read local test.conf file.
else:
if use_in_process is None:
# Not explicitly set, default to using in-process functional tests
# if the test.conf file is not found, or does not provide a usable
# configuration.
config.update(get_config('func_test'))
if config:
in_process = False
else:
in_process = True
else:
# Explicitly set to False, do not attempt to use in-process
# functional tests, be sure we attempt to read from local
# test.conf file.
in_process = False
config.update(get_config('func_test'))
if in_process:
in_mem_obj_env = os.environ.get('SWIFT_TEST_IN_MEMORY_OBJ')
in_mem_obj = utils.config_true_value(in_mem_obj_env)
try:
in_process_setup(the_object_server=(
mem_object_server if in_mem_obj else object_server))
except InProcessException as exc:
print >> sys.stderr, ('Exception during in-process setup: %s'
% str(exc))
raise
global web_front_end
web_front_end = config.get('web_front_end', 'integral')
global normalized_urls
normalized_urls = config.get('normalized_urls', False)
global orig_collate
orig_collate = locale.setlocale(locale.LC_COLLATE)
locale.setlocale(locale.LC_COLLATE, config.get('collate', 'C'))
global insecure
insecure = config_true_value(config.get('insecure', False))
global swift_test_auth_version
global swift_test_auth
global swift_test_user
global swift_test_key
global swift_test_tenant
global swift_test_perm
global swift_test_domain
global swift_test_service_prefix
swift_test_service_prefix = None
if config:
swift_test_auth_version = str(config.get('auth_version', '1'))
swift_test_auth = 'http'
if config_true_value(config.get('auth_ssl', 'no')):
swift_test_auth = 'https'
if 'auth_prefix' not in config:
config['auth_prefix'] = '/'
try:
suffix = '://%(auth_host)s:%(auth_port)s%(auth_prefix)s' % config
swift_test_auth += suffix
except KeyError:
pass # skip
if 'service_prefix' in config:
swift_test_service_prefix = utils.append_underscore(
config['service_prefix'])
if swift_test_auth_version == "1":
swift_test_auth += 'v1.0'
try:
if 'account' in config:
swift_test_user[0] = '%(account)s:%(username)s' % config
else:
swift_test_user[0] = '%(username)s' % config
swift_test_key[0] = config['password']
except KeyError:
# bad config, no account/username configured, tests cannot be
# run
pass
try:
swift_test_user[1] = '%s%s' % (
'%s:' % config['account2'] if 'account2' in config else '',
config['username2'])
swift_test_key[1] = config['password2']
except KeyError:
pass # old config, no second account tests can be run
try:
swift_test_user[2] = '%s%s' % (
'%s:' % config['account'] if 'account'
in config else '', config['username3'])
swift_test_key[2] = config['password3']
except KeyError:
pass # old config, no third account tests can be run
try:
swift_test_user[4] = '%s%s' % (
'%s:' % config['account5'], config['username5'])
swift_test_key[4] = config['password5']
swift_test_tenant[4] = config['account5']
except KeyError:
pass # no service token tests can be run
for _ in range(3):
swift_test_perm[_] = swift_test_user[_]
else:
swift_test_user[0] = config['username']
swift_test_tenant[0] = config['account']
swift_test_key[0] = config['password']
swift_test_user[1] = config['username2']
swift_test_tenant[1] = config['account2']
swift_test_key[1] = config['password2']
swift_test_user[2] = config['username3']
swift_test_tenant[2] = config['account']
swift_test_key[2] = config['password3']
if 'username4' in config:
swift_test_user[3] = config['username4']
swift_test_tenant[3] = config['account4']
swift_test_key[3] = config['password4']
swift_test_domain[3] = config['domain4']
if 'username5' in config:
swift_test_user[4] = config['username5']
swift_test_tenant[4] = config['account5']
swift_test_key[4] = config['password5']
for _ in range(5):
swift_test_perm[_] = swift_test_tenant[_] + ':' \
+ swift_test_user[_]
global skip
skip = not all([swift_test_auth, swift_test_user[0], swift_test_key[0]])
if skip:
print >>sys.stderr, 'SKIPPING FUNCTIONAL TESTS DUE TO NO CONFIG'
global skip2
skip2 = not all([not skip, swift_test_user[1], swift_test_key[1]])
if not skip and skip2:
print >>sys.stderr, \
'SKIPPING SECOND ACCOUNT FUNCTIONAL TESTS' \
' DUE TO NO CONFIG FOR THEM'
global skip3
skip3 = not all([not skip, swift_test_user[2], swift_test_key[2]])
if not skip and skip3:
print >>sys.stderr, \
'SKIPPING THIRD ACCOUNT FUNCTIONAL TESTS DUE TO NO CONFIG FOR THEM'
global skip_if_not_v3
skip_if_not_v3 = (swift_test_auth_version != '3'
or not all([not skip,
swift_test_user[3],
swift_test_key[3]]))
if not skip and skip_if_not_v3:
print >>sys.stderr, \
'SKIPPING FUNCTIONAL TESTS SPECIFIC TO AUTH VERSION 3'
global skip_service_tokens
skip_service_tokens = not all([not skip, swift_test_user[4],
swift_test_key[4], swift_test_tenant[4],
swift_test_service_prefix])
if not skip and skip_service_tokens:
print >>sys.stderr, \
'SKIPPING FUNCTIONAL TESTS SPECIFIC TO SERVICE TOKENS'
get_cluster_info()
def teardown_package():
global orig_collate
locale.setlocale(locale.LC_COLLATE, orig_collate)
# clean up containers and objects left behind after running tests
conn = Connection(config)
conn.authenticate()
account = Account(conn, config.get('account', config['username']))
account.delete_containers()
global in_process
if in_process:
try:
for server in _test_coros:
server.kill()
except Exception:
pass
try:
rmtree(os.path.dirname(_testdir))
except Exception:
pass
utils.HASH_PATH_PREFIX, utils.HASH_PATH_SUFFIX = \
orig_hash_path_suff_pref
utils.SWIFT_CONF_FILE = orig_swift_conf_name
constraints.reload_constraints()
class AuthError(Exception):
pass
class InternalServerError(Exception):
pass
url = [None, None, None, None, None]
token = [None, None, None, None, None]
service_token = [None, None, None, None, None]
parsed = [None, None, None, None, None]
conn = [None, None, None, None, None]
def connection(url):
if has_insecure:
return http_connection(url, insecure=insecure)
return http_connection(url)
def get_url_token(user_index, os_options):
authargs = dict(snet=False,
tenant_name=swift_test_tenant[user_index],
auth_version=swift_test_auth_version,
os_options=os_options,
insecure=insecure)
return get_auth(swift_test_auth,
swift_test_user[user_index],
swift_test_key[user_index],
**authargs)
def retry(func, *args, **kwargs):
"""
You can use the kwargs to override:
'retries' (default: 5)
'use_account' (default: 1) - which user's token to pass
'url_account' (default: matches 'use_account') - which user's storage URL
'resource' (default: url[url_account] - URL to connect to; retry()
will interpolate the variable :storage_url: if present
'service_user' - add a service token from this user (1 indexed)
"""
global url, token, service_token, parsed, conn
retries = kwargs.get('retries', 5)
attempts, backoff = 0, 1
# use account #1 by default; turn user's 1-indexed account into 0-indexed
use_account = kwargs.pop('use_account', 1) - 1
service_user = kwargs.pop('service_user', None)
if service_user:
service_user -= 1 # 0-index
# access our own account by default
url_account = kwargs.pop('url_account', use_account + 1) - 1
os_options = {'user_domain_name': swift_test_domain[use_account],
'project_domain_name': swift_test_domain[use_account]}
while attempts <= retries:
auth_failure = False
attempts += 1
try:
if not url[use_account] or not token[use_account]:
url[use_account], token[use_account] = get_url_token(
use_account, os_options)
parsed[use_account] = conn[use_account] = None
if not parsed[use_account] or not conn[use_account]:
parsed[use_account], conn[use_account] = \
connection(url[use_account])
# default resource is the account url[url_account]
resource = kwargs.pop('resource', '%(storage_url)s')
template_vars = {'storage_url': url[url_account]}
parsed_result = urlparse(resource % template_vars)
if isinstance(service_user, int):
if not service_token[service_user]:
dummy, service_token[service_user] = get_url_token(
service_user, os_options)
kwargs['service_token'] = service_token[service_user]
return func(url[url_account], token[use_account],
parsed_result, conn[url_account],
*args, **kwargs)
except (socket.error, HTTPException):
if attempts > retries:
raise
parsed[use_account] = conn[use_account] = None
if service_user:
service_token[service_user] = None
except AuthError:
auth_failure = True
url[use_account] = token[use_account] = None
if service_user:
service_token[service_user] = None
except InternalServerError:
pass
if attempts <= retries:
if not auth_failure:
sleep(backoff)
backoff *= 2
raise Exception('No result after %s retries.' % retries)
def check_response(conn):
resp = conn.getresponse()
if resp.status == 401:
resp.read()
raise AuthError()
elif resp.status // 100 == 5:
resp.read()
raise InternalServerError()
return resp
def load_constraint(name):
global cluster_info
try:
c = cluster_info['swift'][name]
except KeyError:
raise SkipTest("Missing constraint: %s" % name)
if not isinstance(c, int):
raise SkipTest("Bad value, %r, for constraint: %s" % (c, name))
return c
def get_storage_policy_from_cluster_info(info):
policies = info['swift'].get('policies', {})
default_policy = []
non_default_policies = []
for p in policies:
if p.get('default', {}):
default_policy.append(p)
else:
non_default_policies.append(p)
return default_policy, non_default_policies
def reset_acl():
def post(url, token, parsed, conn):
conn.request('POST', parsed.path, '', {
'X-Auth-Token': token,
'X-Account-Access-Control': '{}'
})
return check_response(conn)
resp = retry(post, use_account=1)
resp.read()
def requires_acls(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
global skip, cluster_info
if skip or not cluster_info:
raise SkipTest('Requires account ACLs')
# Determine whether this cluster has account ACLs; if not, skip test
if not cluster_info.get('tempauth', {}).get('account_acls'):
raise SkipTest('Requires account ACLs')
if swift_test_auth_version != '1':
# remove when keystoneauth supports account acls
raise SkipTest('Requires account ACLs')
reset_acl()
try:
rv = f(*args, **kwargs)
finally:
reset_acl()
return rv
return wrapper
class FunctionalStoragePolicyCollection(object):
def __init__(self, policies):
self._all = policies
self.default = None
for p in self:
if p.get('default', False):
assert self.default is None, 'Found multiple default ' \
'policies %r and %r' % (self.default, p)
self.default = p
@classmethod
def from_info(cls, info=None):
if not (info or cluster_info):
get_cluster_info()
info = info or cluster_info
try:
policy_info = info['swift']['policies']
except KeyError:
raise AssertionError('Did not find any policy info in %r' % info)
policies = cls(policy_info)
assert policies.default, \
'Did not find default policy in %r' % policy_info
return policies
def __len__(self):
return len(self._all)
def __iter__(self):
return iter(self._all)
def __getitem__(self, index):
return self._all[index]
def filter(self, **kwargs):
return self.__class__([p for p in self if all(
p.get(k) == v for k, v in kwargs.items())])
def exclude(self, **kwargs):
return self.__class__([p for p in self if all(
p.get(k) != v for k, v in kwargs.items())])
def select(self):
return random.choice(self)
def requires_policies(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
if skip:
raise SkipTest
try:
self.policies = FunctionalStoragePolicyCollection.from_info()
except AssertionError:
raise SkipTest("Unable to determine available policies")
if len(self.policies) < 2:
raise SkipTest("Multiple policies not enabled")
return f(self, *args, **kwargs)
return wrapper
| |
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import os
import unittest
from hashlib import sha1
from base64 import b64encode
from unittest.mock import Mock
from autobahn.websocket.protocol import WebSocketServerProtocol
from autobahn.websocket.protocol import WebSocketServerFactory
from autobahn.websocket.protocol import WebSocketClientProtocol
from autobahn.websocket.protocol import WebSocketClientFactory
from autobahn.websocket.protocol import WebSocketProtocol
from autobahn.websocket.types import ConnectingRequest
from autobahn.testutil import FakeTransport
import txaio
class WebSocketClientProtocolTests(unittest.TestCase):
def setUp(self):
t = FakeTransport()
f = WebSocketClientFactory()
p = WebSocketClientProtocol()
p.factory = f
p.transport = t
p._create_transport_details = Mock()
p._connectionMade()
p.state = p.STATE_OPEN
p.websocket_version = 18
self.protocol = p
self.transport = t
def tearDown(self):
for call in [
self.protocol.autoPingPendingCall,
self.protocol.autoPingTimeoutCall,
self.protocol.openHandshakeTimeoutCall,
self.protocol.closeHandshakeTimeoutCall,
]:
if call is not None:
call.cancel()
def test_auto_ping(self):
self.protocol.autoPingInterval = 1
self.protocol.websocket_protocols = [Mock()]
self.protocol.websocket_extensions = []
self.protocol._onOpen = lambda: None
self.protocol._wskey = '0' * 24
self.protocol.peer = Mock()
# usually provided by the Twisted or asyncio specific
# subclass, but we're testing the parent here...
self.protocol._onConnect = Mock()
self.protocol._closeConnection = Mock()
# set up a connection
self.protocol._actuallyStartHandshake(
ConnectingRequest(
host="example.com",
port=80,
resource="/ws",
)
)
key = self.protocol.websocket_key + WebSocketProtocol._WS_MAGIC
self.protocol.data = (
b"HTTP/1.1 101 Switching Protocols\x0d\x0a"
b"Upgrade: websocket\x0d\x0a"
b"Connection: upgrade\x0d\x0a"
b"Sec-Websocket-Accept: " + b64encode(sha1(key).digest()) + b"\x0d\x0a\x0d\x0a"
)
self.protocol.processHandshake()
self.assertTrue(self.protocol.autoPingPendingCall is not None)
class WebSocketServerProtocolTests(unittest.TestCase):
"""
Tests for autobahn.websocket.protocol.WebSocketProtocol.
"""
def setUp(self):
t = FakeTransport()
f = WebSocketServerFactory()
p = WebSocketServerProtocol()
p.factory = f
p.transport = t
p._connectionMade()
p.state = p.STATE_OPEN
p.websocket_version = 18
self.protocol = p
self.transport = t
def tearDown(self):
for call in [
self.protocol.autoPingPendingCall,
self.protocol.autoPingTimeoutCall,
self.protocol.openHandshakeTimeoutCall,
self.protocol.closeHandshakeTimeoutCall,
]:
if call is not None:
call.cancel()
def test_auto_ping(self):
proto = Mock()
proto._get_seconds = Mock(return_value=1)
self.protocol.autoPingInterval = 1
self.protocol.websocket_protocols = [proto]
self.protocol.websocket_extensions = []
self.protocol._onOpen = lambda: None
self.protocol._wskey = '0' * 24
self.protocol.succeedHandshake(proto)
self.assertTrue(self.protocol.autoPingPendingCall is not None)
def test_sendClose_none(self):
"""
sendClose with no code or reason works.
"""
self.protocol.sendClose()
# We closed properly
self.assertEqual(self.transport._written, b"\x88\x00")
self.assertEqual(self.protocol.state, self.protocol.STATE_CLOSING)
def test_sendClose_str_reason(self):
"""
sendClose with a str reason works.
"""
self.protocol.sendClose(code=1000, reason="oh no")
# We closed properly
self.assertEqual(self.transport._written[2:], b"\x03\xe8oh no")
self.assertEqual(self.protocol.state, self.protocol.STATE_CLOSING)
def test_sendClose_unicode_reason(self):
"""
sendClose with a unicode reason works.
"""
self.protocol.sendClose(code=1000, reason="oh no")
# We closed properly
self.assertEqual(self.transport._written[2:], b"\x03\xe8oh no")
self.assertEqual(self.protocol.state, self.protocol.STATE_CLOSING)
def test_sendClose_toolong(self):
"""
sendClose with a too-long reason will truncate it.
"""
self.protocol.sendClose(code=1000, reason="abc" * 1000)
# We closed properly
self.assertEqual(self.transport._written[2:],
b"\x03\xe8" + (b"abc" * 41))
self.assertEqual(self.protocol.state, self.protocol.STATE_CLOSING)
def test_sendClose_reason_with_no_code(self):
"""
Trying to sendClose with a reason but no code will raise an Exception.
"""
with self.assertRaises(Exception) as e:
self.protocol.sendClose(reason="abc")
self.assertIn("close reason without close code", str(e.exception))
# We shouldn't have closed
self.assertEqual(self.transport._written, b"")
self.assertEqual(self.protocol.state, self.protocol.STATE_OPEN)
def test_sendClose_invalid_code_type(self):
"""
Trying to sendClose with a non-int code will raise an Exception.
"""
with self.assertRaises(Exception) as e:
self.protocol.sendClose(code="134")
self.assertIn("invalid type", str(e.exception))
# We shouldn't have closed
self.assertEqual(self.transport._written, b"")
self.assertEqual(self.protocol.state, self.protocol.STATE_OPEN)
def test_sendClose_invalid_code_value(self):
"""
Trying to sendClose with a non-valid int code will raise an Exception.
"""
with self.assertRaises(Exception) as e:
self.protocol.sendClose(code=10)
self.assertIn("invalid close code 10", str(e.exception))
# We shouldn't have closed
self.assertEqual(self.transport._written, b"")
self.assertEqual(self.protocol.state, self.protocol.STATE_OPEN)
if os.environ.get('USE_TWISTED', False):
class TwistedProtocolTests(unittest.TestCase):
"""
Tests which require a specific framework's protocol class to work
(in this case, using Twisted)
"""
def setUp(self):
from autobahn.twisted.websocket import WebSocketServerProtocol
from autobahn.twisted.websocket import WebSocketServerFactory
t = FakeTransport()
f = WebSocketServerFactory()
p = WebSocketServerProtocol()
p.factory = f
p.transport = t
p._connectionMade()
p.state = p.STATE_OPEN
p.websocket_version = 18
self.protocol = p
self.transport = t
def tearDown(self):
for call in [
self.protocol.autoPingPendingCall,
self.protocol.autoPingTimeoutCall,
self.protocol.openHandshakeTimeoutCall,
self.protocol.closeHandshakeTimeoutCall,
]:
if call is not None:
call.cancel()
def test_loseConnection(self):
"""
If we lose our connection before openHandshakeTimeout fires, it is
cleaned up
"""
# so, I guess a little cheezy, but we depend on the asyncio or
# twisted class to call _connectionLost at some point; faking
# that here
self.protocol._connectionLost(txaio.create_failure(RuntimeError("testing")))
self.assertTrue(self.protocol.openHandshakeTimeoutCall is None)
| |
from django.db import models
from django.test import TestCase
from django_override_db_tables import (
LockingOverrideDatabaseTables,
ReplaceDatabaseTable,
OverrideDatabaseTables,
SwappableDbTableModel,
)
import threading
import time
class TestModel(models.Model):
name = models.CharField(max_length=20)
class Meta:
db_table = 'pigeon'
app_label = 'test'
class LockingOverrideTests(TestCase):
"""Test LockingOverrideDatabaseTables."""
def test_success(self):
qset = TestModel.objects.filter(name='James')
self.assertEqual(
"""SELECT "pigeon"."id", "pigeon"."name" FROM "pigeon" """
"""WHERE "pigeon"."name" = James""",
str(qset.query).strip(),
)
with LockingOverrideDatabaseTables(TestModel, 'skyrat'):
# existing queryset should be unaffected
self.assertEqual(
"""SELECT "pigeon"."id", "pigeon"."name" FROM "pigeon" """
"""WHERE "pigeon"."name" = James""",
str(qset.query).strip(),
)
# but new ones should use the override
qset = TestModel.objects.filter(name='Katia')
self.assertEqual(
"""SELECT "skyrat"."id", "skyrat"."name" FROM "skyrat" """
"""WHERE "skyrat"."name" = Katia""",
str(qset.query).strip(),
)
# qset was created inside the context manager, and will have
# resolved tables already
self.assertEqual(
"""SELECT "skyrat"."id", "skyrat"."name" FROM "skyrat" """
"""WHERE "skyrat"."name" = Katia""",
str(qset.query).strip(),
)
# however a new one will be back to normal
qset = TestModel.objects.filter(name='James')
self.assertEqual(
"""SELECT "pigeon"."id", "pigeon"."name" FROM "pigeon" """
"""WHERE "pigeon"."name" = James""",
str(qset.query).strip(),
)
def test_exception(self):
try:
with LockingOverrideDatabaseTables(TestModel, 'skyrat'):
raise ValueError
self.fail("Should have raised a ValueError.")
except ValueError:
pass
# table configuration should have been restored
qset = TestModel.objects.filter(name='James')
self.assertEqual(
"""SELECT "pigeon"."id", "pigeon"."name" FROM "pigeon" """
"""WHERE "pigeon"."name" = James""",
str(qset.query).strip(),
)
def test_nesting(self):
with LockingOverrideDatabaseTables(TestModel, 'skyrat'):
qset = TestModel.objects.filter(name='Katia')
self.assertEqual(
"""SELECT "skyrat"."id", "skyrat"."name" FROM "skyrat" """
"""WHERE "skyrat"."name" = Katia""",
str(qset.query).strip(),
)
with LockingOverrideDatabaseTables(TestModel, 'columbidae'):
qset2 = TestModel.objects.filter(name='Nick')
self.assertEqual(
"""SELECT "columbidae"."id", "columbidae"."name" """
"""FROM "columbidae" """
"""WHERE "columbidae"."name" = Nick""",
str(qset2.query).strip(),
)
qset3 = TestModel.objects.filter(name='Katia')
self.assertEqual(
"""SELECT "skyrat"."id", "skyrat"."name" FROM "skyrat" """
"""WHERE "skyrat"."name" = Katia""",
str(qset3.query).strip(),
)
# and resets correctly at the end
qset = TestModel.objects.filter(name='James')
self.assertEqual(
"""SELECT "pigeon"."id", "pigeon"."name" FROM "pigeon" """
"""WHERE "pigeon"."name" = James""",
str(qset.query).strip(),
)
class LockingOverrideConcurrency(TestCase):
"""Test that overrides in multiple threads won't conflict."""
def test_two_threads(self):
# Both are moved gradually through their sequence by
# having this (the main) thread repeatedly release
# a semaphore that prevents them moving further forward.
#
# This both served to demonstrate the problem of interleved
# context processors, *and* still works once each thread's
# stack of context processors uses a lock to prevent them
# running concurrently.
sequence = []
def log_position(thread, position):
where = "%s%s" % (thread, position)
sequence.append(where)
# print(where)
def first(semaphore):
first.as_expected = False
semaphore.acquire(True)
with LockingOverrideDatabaseTables(TestModel, 'columbidae'):
log_position('f', 'I')
qset = TestModel.objects.filter(name='Nick')
if (
"""SELECT "columbidae"."id", "columbidae"."name" """
"""FROM "columbidae" """
"""WHERE "columbidae"."name" = Nick""" !=
str(qset.query).strip()
):
return
log_position('f', 'II')
qset = TestModel.objects.filter(name='James')
if (
"""SELECT "pigeon"."id", "pigeon"."name" FROM "pigeon" """
"""WHERE "pigeon"."name" = James""" !=
str(qset.query).strip()
):
return
first.as_expected = True
def second(semaphore):
second.as_expected = False
with LockingOverrideDatabaseTables(TestModel, 'skyrat'):
log_position('s', 'I')
semaphore.acquire(True)
qset = TestModel.objects.filter(name='Katia')
if (
"""SELECT "skyrat"."id", "skyrat"."name" FROM "skyrat" """
"""WHERE "skyrat"."name" = Katia""" !=
str(qset.query).strip()
):
return
log_position('s', 'II')
qset = TestModel.objects.filter(name='James')
if (
"""SELECT "pigeon"."id", "pigeon"."name" FROM "pigeon" """
"""WHERE "pigeon"."name" = James""" !=
str(qset.query).strip()
):
return
second.as_expected = True
sem1 = threading.Semaphore()
sem2 = threading.Semaphore()
sem1.acquire(True)
sem2.acquire(True)
first_thread = threading.Thread(target=first, args=[sem1])
second_thread = threading.Thread(target=second, args=[sem2])
first_thread.daemon = True
second_thread.daemon = True
first_thread.start()
second_thread.start()
# The second thread will run until it is inside its
# context processor. Let's make sure we give it time
# to get there before unblocking the first thread.
time.sleep(1)
# Let the first thread run through everything. It won't,
# because by then the second thread will have obtained the
# global lock on overrides.
sem1.release()
# And let the second thread run to completion.
sem2.release()
# At this point, it releases the global lock and the first
# thread can run to completion.
# and wait for both to complete
first_thread.join()
second_thread.join()
self.assertEqual(True, first.as_expected)
self.assertEqual(True, second.as_expected)
# Check that the operations were carried out in the correct
# order
#
# This is what it should look like without locking:
#
# self.assertEqual(
# [ 'sI', 'fI', 'fII', 'sII' ],
# sequence
# )
#
# This is what it looks like with locking:
#
self.assertEqual(
['sI', 'sII', 'fI', 'fII'],
sequence
)
# then check that everything has been reset correctly
qset = TestModel.objects.filter(name='James')
self.assertEqual(
"""SELECT "pigeon"."id", "pigeon"."name" FROM "pigeon" """
"""WHERE "pigeon"."name" = James""",
str(qset.query).strip(),
)
class AbstractTestModel(models.Model):
name = models.CharField(max_length=20)
class Meta:
abstract = True
db_table = 'pigeon'
app_label = 'test'
class ReplaceTests(TestCase):
"""Test ReplaceDatabaseTable."""
# almost identical to LockingOverrideTests, but needs the return from
# the context manager, and has to be based on the AbstractTestModel
# (so we don't care about resetting the db table of anything we
# replace the db table of).
def test_success(self):
with ReplaceDatabaseTable(AbstractTestModel, 'skyrat') as TM:
qset = TM.objects.filter(name='Katia')
self.assertEqual(
"""SELECT "skyrat"."id", "skyrat"."name" FROM "skyrat" """
"""WHERE "skyrat"."name" = Katia""",
str(qset.query).strip(),
)
# qset was created inside the context manager, and will have
# resolved tables already
self.assertEqual(
"""SELECT "skyrat"."id", "skyrat"."name" FROM "skyrat" """
"""WHERE "skyrat"."name" = Katia""",
str(qset.query).strip(),
)
def test_exception(self):
try:
with ReplaceDatabaseTable(AbstractTestModel, 'skyrat'):
raise ValueError
self.fail("Should have raised a ValueError.")
except ValueError:
pass
def test_nesting(self):
with ReplaceDatabaseTable(AbstractTestModel, 'skyrat') as TM:
qset = TM.objects.filter(name='Katia')
self.assertEqual(
"""SELECT "skyrat"."id", "skyrat"."name" FROM "skyrat" """
"""WHERE "skyrat"."name" = Katia""",
str(qset.query).strip(),
)
with ReplaceDatabaseTable(AbstractTestModel, 'columbidae') as TM2:
qset2 = TM2.objects.filter(name='Nick')
self.assertEqual(
"""SELECT "columbidae"."id", "columbidae"."name" """
"""FROM "columbidae" """
"""WHERE "columbidae"."name" = Nick""",
str(qset2.query).strip(),
)
qset3 = TM.objects.filter(name='Katia')
self.assertEqual(
"""SELECT "skyrat"."id", "skyrat"."name" FROM "skyrat" """
"""WHERE "skyrat"."name" = Katia""",
str(qset3.query).strip(),
)
class ReplaceConcurrency(TestCase):
"""Test that replace in multiple threads won't conflict."""
def test_two_threads(self):
# Both are moved gradually through their sequence by
# having this (the main) thread repeatedly release
# a semaphore that prevents them moving further forward.
#
# Could probably be done more simply, but this works and
# matches LockingOverrideConcurrency, above.
sequence = []
def log_position(thread, position):
where = "%s%s" % (thread, position)
sequence.append(where)
# print(where)
def first(sem1, sem2):
first.as_expected = False
sem1.acquire(True)
with ReplaceDatabaseTable(AbstractTestModel, 'columbidae') as TM:
log_position('f', 'I')
qset = TM.objects.filter(name='Nick')
if (
"""SELECT "columbidae"."id", "columbidae"."name" """
"""FROM "columbidae" """
"""WHERE "columbidae"."name" = Nick""" !=
str(qset.query).strip()
):
return
log_position('f', 'II')
sem2.release()
first.as_expected = True
def second(sem1, sem2):
second.as_expected = False
with ReplaceDatabaseTable(AbstractTestModel, 'skyrat') as TM:
log_position('s', 'I')
sem1.release()
sem2.acquire(True)
qset = TM.objects.filter(name='Katia')
if (
"""SELECT "skyrat"."id", "skyrat"."name" FROM "skyrat" """
"""WHERE "skyrat"."name" = Katia""" !=
str(qset.query).strip()
):
return
log_position('s', 'II')
second.as_expected = True
sem1 = threading.Semaphore()
sem2 = threading.Semaphore()
sem1.acquire(True)
sem2.acquire(True)
first_thread = threading.Thread(target=first, args=[sem1, sem2])
second_thread = threading.Thread(target=second, args=[sem1, sem2])
first_thread.daemon = True
second_thread.daemon = True
first_thread.start()
second_thread.start()
# and wait for both to complete
first_thread.join()
second_thread.join()
self.assertEqual(True, first.as_expected)
self.assertEqual(True, second.as_expected)
# Check that the operations were carried out in the correct
# order
self.assertEqual(
['sI', 'fI', 'fII', 'sII'],
sequence
)
class OverridableTestModel(SwappableDbTableModel):
name = models.CharField(max_length=20)
class Meta:
db_table = 'pigeon'
app_label = 'test'
class OverrideTests(TestCase):
"""Test OverrideDatabaseTables."""
# almost identical to LockingOverrideTests, but must use a
# SwappableDbTableModel.
def test_success(self):
qset = OverridableTestModel.objects.filter(name='James')
self.assertEqual(
"""SELECT "pigeon"."id", "pigeon"."name" FROM "pigeon" """
"""WHERE "pigeon"."name" = James""",
str(qset.query).strip(),
)
with OverrideDatabaseTables(OverridableTestModel, 'skyrat'):
# existing queryset should be unaffected
self.assertEqual(
"""SELECT "pigeon"."id", "pigeon"."name" FROM "pigeon" """
"""WHERE "pigeon"."name" = James""",
str(qset.query).strip(),
)
# but new ones should use the override
qset = OverridableTestModel.objects.filter(name='Katia')
self.assertEqual(
"""SELECT "skyrat"."id", "skyrat"."name" FROM "skyrat" """
"""WHERE "skyrat"."name" = Katia""",
str(qset.query).strip(),
)
# qset was created inside the context manager, and will have
# resolved tables already
self.assertEqual(
"""SELECT "skyrat"."id", "skyrat"."name" FROM "skyrat" """
"""WHERE "skyrat"."name" = Katia""",
str(qset.query).strip(),
)
# however a new one will be back to normal
qset = OverridableTestModel.objects.filter(name='James')
self.assertEqual(
"""SELECT "pigeon"."id", "pigeon"."name" FROM "pigeon" """
"""WHERE "pigeon"."name" = James""",
str(qset.query).strip(),
)
def test_exception(self):
try:
with OverrideDatabaseTables(OverridableTestModel, 'skyrat'):
raise ValueError
self.fail("Should have raised a ValueError.")
except ValueError:
pass
# table configuration should have been restored
qset = OverridableTestModel.objects.filter(name='James')
self.assertEqual(
"""SELECT "pigeon"."id", "pigeon"."name" FROM "pigeon" """
"""WHERE "pigeon"."name" = James""",
str(qset.query).strip(),
)
def test_nesting(self):
with OverrideDatabaseTables(OverridableTestModel, 'skyrat'):
qset = OverridableTestModel.objects.filter(name='Katia')
self.assertEqual(
"""SELECT "skyrat"."id", "skyrat"."name" FROM "skyrat" """
"""WHERE "skyrat"."name" = Katia""",
str(qset.query).strip(),
)
with OverrideDatabaseTables(OverridableTestModel, 'columbidae'):
qset2 = OverridableTestModel.objects.filter(name='Nick')
self.assertEqual(
"""SELECT "columbidae"."id", "columbidae"."name" """
"""FROM "columbidae" """
"""WHERE "columbidae"."name" = Nick""",
str(qset2.query).strip(),
)
qset3 = OverridableTestModel.objects.filter(name='Katia')
self.assertEqual(
"""SELECT "skyrat"."id", "skyrat"."name" FROM "skyrat" """
"""WHERE "skyrat"."name" = Katia""",
str(qset3.query).strip(),
)
# and resets correctly at the end
qset = OverridableTestModel.objects.filter(name='James')
self.assertEqual(
"""SELECT "pigeon"."id", "pigeon"."name" FROM "pigeon" """
"""WHERE "pigeon"."name" = James""",
str(qset.query).strip(),
)
class OverrideConcurrency(TestCase):
"""Test that replace in multiple threads won't conflict."""
def test_two_threads(self):
# Both are moved gradually through their sequence by
# having this (the main) thread repeatedly release
# a semaphore that prevents them moving further forward.
#
# Could probably be done more simply, but this works and
# matches OverrideConcurrency, above.
sequence = []
def log_position(thread, position):
where = "%s%s" % (thread, position)
sequence.append(where)
# print(where)
def first(sem1, sem2):
first.as_expected = False
sem1.acquire(True)
with OverrideDatabaseTables(OverridableTestModel, 'columbidae'):
log_position('f', 'I')
qset = OverridableTestModel.objects.filter(name='Nick')
if (
"""SELECT "columbidae"."id", "columbidae"."name" """
"""FROM "columbidae" """
"""WHERE "columbidae"."name" = Nick""" !=
str(qset.query).strip()
):
return
log_position('f', 'II')
sem2.release()
qset = OverridableTestModel.objects.filter(name='James')
if (
"""SELECT "pigeon"."id", "pigeon"."name" FROM "pigeon" """
"""WHERE "pigeon"."name" = James""" !=
str(qset.query).strip()
):
print str(qset.query)
return
first.as_expected = True
def second(sem1, sem2):
second.as_expected = False
with OverrideDatabaseTables(OverridableTestModel, 'skyrat'):
log_position('s', 'I')
sem1.release()
sem2.acquire(True)
qset = OverridableTestModel.objects.filter(name='Katia')
if (
"""SELECT "skyrat"."id", "skyrat"."name" FROM "skyrat" """
"""WHERE "skyrat"."name" = Katia""" !=
str(qset.query).strip()
):
return
log_position('s', 'II')
qset = OverridableTestModel.objects.filter(name='James')
if (
"""SELECT "pigeon"."id", "pigeon"."name" FROM "pigeon" """
"""WHERE "pigeon"."name" = James""" !=
str(qset.query).strip()
):
return
second.as_expected = True
sem1 = threading.Semaphore()
sem2 = threading.Semaphore()
sem1.acquire(True)
sem2.acquire(True)
first_thread = threading.Thread(target=first, args=[sem1, sem2])
second_thread = threading.Thread(target=second, args=[sem1, sem2])
first_thread.daemon = True
second_thread.daemon = True
first_thread.start()
second_thread.start()
# and wait for both to complete
first_thread.join()
second_thread.join()
self.assertEqual(True, first.as_expected)
self.assertEqual(True, second.as_expected)
# Check that the operations were carried out in the correct
# order
self.assertEqual(
['sI', 'fI', 'fII', 'sII'],
sequence
)
# then check that everything has been reset correctly
qset = OverridableTestModel.objects.filter(name='James')
self.assertEqual(
"""SELECT "pigeon"."id", "pigeon"."name" FROM "pigeon" """
"""WHERE "pigeon"."name" = James""",
str(qset.query).strip(),
)
| |
from .._abstract.abstract import BaseAGSServer
from datetime import datetime
import csv, json
########################################################################
class Log(BaseAGSServer):
""" Log of a server """
_url = None
_securityHandler = None
_operations = None
_resources = None
_proxy_port = None
_proxy_url = None
_json = None
#----------------------------------------------------------------------
def __init__(self, url, securityHandler,
proxy_url=None, proxy_port=None,
initialize=False):
"""Constructor
Inputs:
url - admin url
securityHandler - Handler that handles site security
username - admin username
password - admin password
"""
self._proxy_port = proxy_port
self._proxy_url = proxy_url
self._url = url
self._securityHandler = securityHandler
if initialize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
""" populates server admin information """
params = {
"f" : "json"
}
json_dict = self._do_get(url=self._url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
self._json = json.dumps(json_dict)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.iteritems():
if k in attributes:
setattr(self, "_"+ k, json_dict[k])
else:
print k, " - attribute not implemented in Logs."
del k
del v
#----------------------------------------------------------------------
def __str__(self):
"""returns the object as a string"""
if self._json is None:
self.__init()
return self._json
#----------------------------------------------------------------------
@property
def operations(self):
""" returns the operations """
if self._operations is None:
self.__init()
return self._operations
#----------------------------------------------------------------------
@property
def resources(self):
""" returns the log resources """
if self._resources is None:
self.__init()
return self._resources
#----------------------------------------------------------------------
def countErrorReports(self, machine="*"):
""" This operation counts the number of error reports (crash
reports) that have been generated on each machine.
Input:
machine - name of the machine in the cluster. * means all
machines. This is default
Output:
dictionary with report count and machine name
"""
params = {
"f": "json",
"machine" : machine
}
return self._do_post(url=self._url + "/countErrorReports",
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def clean(self):
""" Deletes all the log files on all server machines in the site. """
params = {
"f" : "json",
}
return self._do_post(url=self._url + "/clean",
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
@property
def logSettings(self):
""" returns the current log settings """
params = {
"f" : "json"
}
sURL = self._url + "/settings"
return self._do_get(url=sURL, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)['settings']
#----------------------------------------------------------------------
def editLogSettings(self,
logLevel="WARNING",
logDir=None,
maxLogFileAge=90,
maxErrorReportsCount=10):
"""
The log settings are for the entire site.
Inputs:
logLevel - Can be one of [OFF, SEVERE, WARNING, INFO, FINE,
VERBOSE, DEBUG].
logDir - File path to the root of the log directory
maxLogFileAge - number of days that a server should save a log
file.
maxErrorReportsCount - maximum number of error report files
per machine
"""
lURL = self._url + "/settings/edit"
allowed_levels = ("OFF", "SEVERE", "WARNING", "INFO", "FINE", "VERBOSE", "DEBUG")
currentSettings= self.logSettings
currentSettings["f"] ="json"
if logLevel.upper() in allowed_levels:
currentSettings['logLevel'] = logLevel.upper()
if logDir is not None:
currentSettings['logDir'] = logDir
if maxLogFileAge is not None and \
isinstance(maxLogFileAge, int):
currentSettings['maxLogFileAge'] = maxLogFileAge
if maxErrorReportsCount is not None and \
isinstance(maxErrorReportsCount, int) and\
maxErrorReportsCount > 0:
currentSettings['maxErrorReportsCount'] = maxErrorReportsCount
return self._do_post(url=lURL, param_dict=currentSettings,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def query(self,
startTime=None,
endTime=None,
sinceServerStart=False,
level="WARNING",
services="*",
machines="*",
server="*",
codes=[],
processIds=[],
export=False,
exportType="CSV", #CSV or TAB
out_path=None
):
"""
The query operation on the logs resource provides a way to
aggregate, filter, and page through logs across the entire site.
Inputs:
"""
allowed_levels = ("SEVERE", "WARNING", "INFO",
"FINE", "VERBOSE", "DEBUG")
qFilter = {
"services": "*",
"machines": "*",
"server" : "*"
}
if len(processIds) > 0:
qFilter['processIds'] = processIds
if len(codes) > 0:
qFilter['codes'] = codes
params = {
"f" : "json",
"sinceServerStart" : sinceServerStart,
"pageSize" : 10000
}
if startTime is not None and \
isinstance(startTime, datetime):
params['startTime'] = startTime.strftime("%Y-%m-%dT%H:%M:%S")
if endTime is not None and \
isinstance(endTime, datetime):
params['endTime'] = endTime.strftime("%Y-%m-%dT%H:%M:%S")
if level.upper() in allowed_levels:
params['level'] = level
if server != "*":
qFilter['server'] = server.split(',')
if services != "*":
qFilter['services'] = services.split(',')
if machines != "*":
qFilter['machines'] = machines.split(",")
params['filter'] = qFilter
if export == True and \
out_path is not None:
messages = self._do_post(self._url + "/query", params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
with open(name=out_path, mode='wb') as f:
hasKeys = False
if exportType == "TAB":
csvwriter = csv.writer(f, delimiter='\t')
else:
csvwriter = csv.writer(f)
for message in messages['logMessages']:
if hasKeys == False:
csvwriter.writerow(message.keys())
hasKeys = True
csvwriter.writerow(message.values())
del message
del messages
return out_path
else:
return self._do_post(self._url + "/query", params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
| |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utilities for geometric operations in tensorflow."""
import math
import numpy as np
import tensorflow as tf
# ldif is an internal package, should be imported last.
# pylint: disable=g-bad-import-order
from ldif.util import np_util
from ldif.util import camera_util
from ldif.util import tf_util
# pylint: enable=g-bad-import-order
def ray_sphere_intersect(ray_start, ray_direction, sphere_center, sphere_radius,
max_t):
"""Intersect rays with each of a set of spheres.
Args:
ray_start: Tensor with shape [batch_size, ray_count, 3]. The end point of
the rays. In the same coordinate space as the spheres.
ray_direction: Tensor with shape [batch_size, ray_count, 3]. The extant ray
direction.
sphere_center: Tensor with shape [batch_size, sphere_count, 3]. The center
of the spheres.
sphere_radius: Tensor with shape [batch_size, sphere_count, 1]. The radius
of the spheres.
max_t: The maximum intersection distance.
Returns:
intersections: Tensor with shape [batch_size, ray_count, sphere_count]. If
no intersection is found between [0, max_t), then the value will be max_t.
"""
# We apply the algebraic solution:
batch_size, ray_count = ray_start.get_shape().as_list()[:2]
sphere_count = sphere_center.get_shape().as_list()[1]
ray_direction = tf.reshape(ray_direction, [batch_size, ray_count, 1, 3])
ray_start = tf.reshape(ray_start, [batch_size, ray_count, 1, 3])
sphere_center = tf.reshape(sphere_center, [batch_size, 1, sphere_count, 3])
sphere_radius = tf.reshape(sphere_radius, [batch_size, 1, sphere_count, 1])
a = 1.0
b = 2.0 * ray_direction * (ray_start - sphere_center)
ray_sphere_distance = tf.reduce_sum(
tf.square(ray_start - sphere_center), axis=-1, keep_dims=True)
c = ray_sphere_distance - tf.square(sphere_radius)
discriminant = tf.square(b) - 4 * a * c
# Assume it's positive, then zero out later:
ta = tf.divide((-b + tf.sqrt(discriminant)), 2 * a)
tb = tf.divide((-b - tf.sqrt(discriminant)), 2 * a)
t0 = tf.minimum(ta, tb)
t1 = tf.maximum(ta, tb)
t = tf.where(t0 > 0, t0, t1)
intersection_invalid = tf.logical_or(
tf.logical_or(discriminant < 0, t < 0), t > max_t)
t = tf.where(intersection_invalid, max_t * tf.ones_like(t), t)
return t
def to_homogeneous(t, is_point):
"""Makes a homogeneous space tensor given a tensor with ultimate coordinates.
Args:
t: Tensor with shape [..., K], where t is a tensor of points in
K-dimensional space.
is_point: Boolean. True for points, false for directions
Returns:
Tensor with shape [..., K+1]. t padded to be homogeneous.
"""
padding = 1 if is_point else 0
rank = len(t.get_shape().as_list())
paddings = []
for _ in range(rank):
paddings.append([0, 0])
paddings[-1][1] = 1
return tf.pad(
t, tf.constant(paddings), mode='CONSTANT', constant_values=padding)
def transform_points_with_normals(points, tx, normals=None):
"""Transforms a pointcloud with normals to a new coordinate frame.
Args:
points: Tensor with shape [batch_size, point_count, 3 or 6].
tx: Tensor with shape [batch_size, 4, 4]. Takes column-vectors from the
current frame to the new frame as T*x.
normals: Tensor with shape [batch_size, point_count, 3] if provided. None
otherwise. If the points tensor contains normals, this should be None.
Returns:
If tensor 'points' has shape [..., 6], then a single tensor with shape
[..., 6] in the new frame. If 'points' has shape [..., 3], then returns
either one or two tensors of shape [..., 3] depending on whether 'normals'
is None.
"""
if len(points.shape) != 3:
raise ValueError(f'Invalid points shape: {points.get_shape().as_list()}')
if len(tx.shape) != 3:
raise ValueError(f'Invalid tx shape: {tx.get_shape().as_list()}')
are_concatenated = points.shape[-1] == 6
if are_concatenated:
points, normals = tf.split(points, [3, 3], axis=-1)
transformed_samples = apply_4x4(
points, tx, are_points=True, batch_rank=1, sample_rank=1)
if normals is not None:
transformed_normals = apply_4x4(
normals,
tf.linalg.inv(tf.transpose(tx, perm=[0, 2, 1])),
are_points=False,
batch_rank=1,
sample_rank=1)
transformed_normals = transformed_normals / (
tf.linalg.norm(transformed_normals, axis=-1, keepdims=True) + 1e-8)
if are_concatenated:
return tf.concat([transformed_samples, transformed_normals], axis=-1)
if normals is not None:
return transformed_samples, transformed_normals
return transformed_samples
def transform_featured_points(points, tx):
"""Transforms a pointcloud with features.
Args:
points: Tensor with shape [batch_size, point_count, 3+feature_count].
tx: Tensor with shape [batch_size, 4, 4].
Returns:
Tensor with shape [batch_size, point_count, 3+feature_count].
"""
feature_count = points.get_shape().as_list()[-1] - 3
if feature_count == 0:
xyz = points
features = None
else:
xyz, features = tf.split(points, [3, feature_count], axis=2)
xyz = apply_4x4(xyz, tx, are_points=True, batch_rank=1, sample_rank=1)
if feature_count:
return tf.concat([xyz, features], axis=2)
return xyz
def rotation_to_tx(rot):
"""Maps a 3x3 rotation matrix to a 4x4 homogeneous matrix.
Args:
rot: Tensor with shape [..., 3, 3].
Returns:
Tensor with shape [..., 4, 4].
"""
batch_dims = rot.get_shape().as_list()[:-2]
empty_col = tf.zeros(batch_dims + [3, 1], dtype=tf.float32)
rot = tf.concat([rot, empty_col], axis=-1)
hom_row = tf.eye(4, batch_shape=batch_dims)[..., 3:4, :]
return tf.concat([rot, hom_row], axis=-2)
def extract_points_near_origin(points, count, features=None):
"""Returns the points nearest to the origin in a pointcloud.
Args:
points: Tensor with shape [batch_size, point_count, 3 or more].
count: The number of points to extract.
features: Tensor with shape [batch_size, point_count, feature_count] if
present. None otherwise.
Returns:
Either one tensor of size [batch_size, count, 3 or 6] or two tensors of
size [batch_size, count, 3], depending on whether normals was provided and
the shape of the 'points' tensor.
"""
are_concatenated = points.get_shape().as_list()[-1] > 3
if are_concatenated:
feature_count = points.get_shape().as_list()[-1] - 3
original = points
points, features = tf.split(points, [3, feature_count], axis=-1)
else:
assert points.get_shape().as_list()[-1] == 3
candidate_dists = tf.linalg.norm(points, axis=-1)
_, selected_indices = tf.math.top_k(-candidate_dists, k=count, sorted=False)
if are_concatenated:
return tf.gather(original, selected_indices, batch_dims=1)
else:
selected_points = tf.gather(points, selected_indices, batch_dims=1)
if features is not None:
return selected_points, tf.gather(
features, selected_indices, batch_dims=1)
return selected_points
def local_views_of_shape(global_points,
world2local,
local_point_count,
global_normals=None,
global_features=None,
zeros_invalid=False,
zero_threshold=1e-6,
expand_region=True,
threshold=4.0):
"""Computes a set of local point cloud observations from a global observation.
It is assumed for optimization purposes that
global_point_count >> local_point_count.
Args:
global_points: Tensor with shape [batch_size, global_point_count, 3]. The
input observation point cloud in world space.
world2local: Tensor with shape [batch_size, frame_count, 4, 4]. Each 4x4
matrix maps from points in world space to points in a local frame.
local_point_count: Integer. The number of points to output in each local
frame. Whatever this value, the local_point_count closest points to each
local frame origin will be returned.
global_normals: Tensor with shape [batch_size, global_point_count, 3]. The
input observation point cloud's normals in world space. Optional.
global_features: Tensor with shape [batch_size, global_point_count,
feature_count]. The input observation point cloud features, in any space.
Optional.
zeros_invalid: Whether to consider the vector [0, 0, 0] to be invalid.
zero_threshold: Values less than this in magnitude are considered to be 0.
expand_region: Whether to expand outward from the threshold region. If
false, fill with zeros.
threshold: The distance threshold.
Returns:
local_points: Tensor with shape [batch_size, frame_count,
local_point_count, 3].
local_normals: Tensor with shape [batch_size, frame_count,
local_point_count, 3]. None if global_normals not provided.
local_features: Tensor with shape [batch_size, frame_count,
local_point_count, feature_count]. Unlike the local normals and points,
these are not transformed because there may or may not be a good
transformation to apply, depending on what the features are. But they will
be the features associated with the local points that were chosen. None
if global_features not provided.
"""
# Example use case: batch_size = 64, global_point_count = 100000
# local_point_count = 1000, frame_count = 25. Then:
# global_points has size 64*100000*3*4 = 73mb
# local_points has size 64*1000*25*3*4 = 18mb
# If we made an intermediate tensor with shape [batch_size, frame_count,
# global_point_count, 3] -> 64 * 25 * 100000 * 3 * 4 = 1.8 Gb -> bad.
batch_size, _, _ = global_points.get_shape().as_list()
if zeros_invalid:
# If we just set the global points to be very far away, they won't be a
# nearest neighbor
abs_zero = False
if abs_zero:
is_zero = tf.reduce_all(
tf.equal(global_points, 0.0), axis=-1, keepdims=True)
else:
is_zero = tf.reduce_all(
tf.abs(global_points) < zero_threshold, axis=-1, keepdims=True)
global_points = tf.where_v2(is_zero, 100.0, global_points)
_, frame_count, _, _ = world2local.get_shape().as_list()
local2world = tf.matrix_inverse(world2local)
# *sigh* oh well, guess we have to do the transform:
tiled_global = tf.tile(
tf.expand_dims(to_homogeneous(global_points, is_point=True), axis=1),
[1, frame_count, 1, 1])
all_local_points = tf.matmul(tiled_global, world2local, transpose_b=True)
distances = tf.norm(all_local_points, axis=-1)
# thresh = 4.0
# TODO(kgenova) This is potentially a problem because it could introduce
# randomness into the pipeline at inference time.
probabilities = tf.random.uniform(distances.get_shape().as_list())
is_valid = distances < threshold
sample_order = tf.where(is_valid, probabilities, -distances)
_, top_indices = tf.math.top_k(
sample_order, k=local_point_count, sorted=False)
local_points = tf.gather(all_local_points, top_indices, batch_dims=2, axis=-2)
local_points = tf.ensure_shape(
local_points[..., :3], [batch_size, frame_count, local_point_count, 3])
is_valid = tf.expand_dims(is_valid, axis=-1)
# log.info('is_valid shape: ', is_valid.get_shape().as_list())
# log.info('top_indices shape: ', top_indices.get_shape().as_list())
# log.info('all_local_points shape: ', all_local_points.get_shape().as_list())
points_valid = tf.gather(is_valid, top_indices, batch_dims=2, axis=-2)
# points_valid = tf.expand_dims(points_valid, axis=-1)
points_valid = tf.ensure_shape(
points_valid, [batch_size, frame_count, local_point_count, 1])
if not expand_region:
local_points = tf.where_v2(points_valid, local_points, 0.0)
# valid_feature = tf.cast(points_valid, dtype=tf.float32)
if global_normals is not None:
tiled_global_normals = tf.tile(
tf.expand_dims(to_homogeneous(global_normals, is_point=False), axis=1),
[1, frame_count, 1, 1])
# Normals get transformed by the inverse-transpose matrix:
all_local_normals = tf.matmul(
tiled_global_normals, local2world, transpose_b=False)
local_normals = tf.gather(
all_local_normals, top_indices, batch_dims=2, axis=-2)
# Remove the homogeneous coordinate now. It isn't a bug to normalize with
# it since it's zero, but it's confusing.
local_normals = tf.math.l2_normalize(local_normals[..., :3], axis=-1)
local_normals = tf.ensure_shape(
local_normals, [batch_size, frame_count, local_point_count, 3])
else:
local_normals = None
if global_features is not None:
feature_count = global_features.get_shape().as_list()[-1]
local_features = tf.gather(
global_features, top_indices, batch_dims=1, axis=-2)
local_features = tf.ensure_shape(
local_features,
[batch_size, frame_count, local_point_count, feature_count])
else:
local_features = None
return local_points, local_normals, local_features, points_valid
def chamfer_distance(pred, target):
"""Computes the chamfer distance between two point sets, in both directions.
Args:
pred: Tensor with shape [..., pred_point_count, n_dims].
target: Tensor with shape [..., target_point_count, n_dims].
Returns:
pred_to_target, target_to_pred.
pred_to_target: Tensor with shape [..., pred_point_count, 1]. The distance
from each point in pred to the closest point in the target.
target_to_pred: Tensor with shape [..., target_point_count, 1]. The distance
from each point in target to the closet point in the prediction.
"""
with tf.name_scope('chamfer_distance'):
# batching_dimensions = pred.get_shape().as_list()[:-2]
# batching_rank = len(batching_dimensions)
# pred_norm_squared = tf.matmul(pred, pred, transpose_b=True)
# target_norm_squared = tf.matmul(target, target, transpose_b=True)
# target_mul_pred_t = tf.matmul(pred, target, transpose_b=True)
# pred_mul_target_t = tf.matmul(target, pred, transpose_b=True)
differences = tf.expand_dims(
pred, axis=-2) - tf.expand_dims(
target, axis=-3)
squared_distances = tf.reduce_sum(differences * differences, axis=-1)
# squared_distances = tf.matmul(differences, differences, transpose_b=True)
# differences = pred - tf.transpose(target, perm=range(batching_rank) +
# [batching_rank+2, batching_rank+1])
pred_to_target = tf.reduce_min(squared_distances, axis=-1)
target_to_pred = tf.reduce_min(squared_distances, axis=-2)
pred_to_target = tf.expand_dims(pred_to_target, axis=-1)
target_to_pred = tf.expand_dims(target_to_pred, axis=-1)
return tf.sqrt(pred_to_target), tf.sqrt(target_to_pred)
def dodeca_parameters(dodeca_idx):
"""Computes the viewpoint, centroid, and up vectors for the dodecahedron."""
gr = (1.0 + math.sqrt(5.0)) / 2.0
rgr = 1.0 / gr
viewpoints = [[1, 1, 1], [1, 1, -1], [1, -1, 1], [1, -1, -1], [-1, 1, 1],
[-1, 1, -1], [-1, -1, 1], [-1, -1, -1], [0, gr, rgr],
[0, gr, -rgr], [0, -gr, rgr], [0, -gr, -rgr], [rgr, 0, gr],
[rgr, 0, -gr], [-rgr, 0, gr], [-rgr, 0, -gr], [gr, rgr, 0],
[gr, -rgr, 0], [-gr, rgr, 0], [-gr, -rgr, 0]]
viewpoint = 0.6 * np.array(viewpoints[dodeca_idx], dtype=np.float32)
centroid = np.array([0., 0., 0.], dtype=np.float32)
world_up = np.array([0., 1., 0.], dtype=np.float32)
return viewpoint, centroid, world_up
def get_camera_to_world(viewpoint, center, world_up):
"""Computes a 4x4 mapping from camera space to world space."""
towards = center - viewpoint
towards = towards / np.linalg.norm(towards)
right = np.cross(towards, world_up)
right = right / np.linalg.norm(right)
cam_up = np.cross(right, towards)
cam_up = cam_up / np.linalg.norm(cam_up)
rotation = np.stack([right, cam_up, -towards], axis=1)
rotation_4x4 = np.eye(4)
rotation_4x4[:3, :3] = rotation
camera_to_world = rotation_4x4.copy()
camera_to_world[:3, 3] = viewpoint
return camera_to_world
def get_dodeca_camera_to_worlds():
camera_to_worlds = []
for i in range(20):
camera_to_worlds.append(get_camera_to_world(*dodeca_parameters(i)))
camera_to_worlds = np.stack(camera_to_worlds, axis=0)
return camera_to_worlds
def gaps_depth_render_to_xyz(model_config, depth_image, camera_parameters):
"""Transforms a depth image to camera space assuming its dodeca parameters."""
# TODO(kgenova) Extract viewpoint, width, height from camera parameters.
del camera_parameters
depth_image_height, depth_image_width = depth_image.get_shape().as_list()[1:3]
if model_config.hparams.didx == 0:
viewpoint = np.array([1.03276, 0.757946, -0.564739])
towards = np.array([-0.737684, -0.54139, 0.403385]) # = v/-1.4
up = np.array([-0.47501, 0.840771, 0.259748])
else:
assert False
towards = towards / np.linalg.norm(towards)
right = np.cross(towards, up)
right = right / np.linalg.norm(right)
up = np.cross(right, towards)
up = up / np.linalg.norm(up)
rotation = np.stack([right, up, -towards], axis=1)
rotation_4x4 = np.eye(4)
rotation_4x4[:3, :3] = rotation
camera_to_world = rotation_4x4.copy()
camera_to_world[:3, 3] = viewpoint
camera_to_world = tf.constant(camera_to_world.astype(np.float32))
world_to_camera = tf.reshape(tf.matrix_inverse(camera_to_world), [1, 4, 4])
world_to_camera = tf.tile(world_to_camera, [model_config.hparams.bs, 1, 1])
xyz_image, _, _ = depth_image_to_xyz_image(
depth_image, world_to_camera, xfov=0.5)
xyz_image = tf.reshape(
xyz_image,
[model_config.hparams.bs, depth_image_height, depth_image_width, 3])
return xyz_image
def angle_of_rotation_to_2d_rotation_matrix(angle_of_rotation):
"""Given a batch of rotations, create a batch of 2d rotation matrices.
Args:
angle_of_rotation: Tensor with shape [batch_size].
Returns:
Tensor with shape [batch_size, 2, 2]
"""
c = tf.cos(angle_of_rotation)
s = tf.sin(angle_of_rotation)
top_row = tf.stack([c, -s], axis=1)
bottom_row = tf.stack([s, c], axis=1)
return tf.stack([top_row, bottom_row], axis=1)
def fractional_vector_projection(e0, e1, p, falloff=2.0):
"""Returns a fraction describing whether p projects inside the segment e0 e1.
If p projects inside the segment, the result is 1. If it projects outside,
the result is a fraction that is always greater than 0 but monotonically
decreasing as the distance to the inside of the segment increase.
Args:
e0: Tensor with two elements containing the first endpoint XY locations.
e1: Tensor with two elements containing the second endpoint XY locations.
p: Tensor with shape [batch_size, 2] containing the query points.
falloff: Float or Scalar Tensor specifying the softness of the falloff of
the projection. Larger means a longer falloff.
"""
with tf.name_scope('fractional-vector-projection'):
batch_size = p.shape[0].value
p = tf.reshape(p, [batch_size, 2])
e0 = tf.reshape(e0, [1, 2])
e1 = tf.reshape(e1, [1, 2])
e01 = e1 - e0
# Normalize for vector projection:
e01_norm = tf.sqrt(e01[0, 0] * e01[0, 0] + e01[0, 1] * e01[0, 1])
e01_normalized = e01 / tf.reshape(e01_norm, [1, 1])
e0p = p - e0
e0p_dot_e01_normalized = tf.matmul(
tf.reshape(e0p, [1, batch_size, 2]),
tf.reshape(e01_normalized, [1, 1, 2]),
transpose_b=True)
e0p_dot_e01_normalized = tf.reshape(e0p_dot_e01_normalized,
[batch_size]) / e01_norm
if falloff is None:
left_sided_inside = tf.cast(
tf.logical_and(e0p_dot_e01_normalized >= 0,
e0p_dot_e01_normalized <= 1),
dtype=tf.float32)
return left_sided_inside
# Now that we have done the left side, do the right side:
e10_normalized = -e01_normalized
e1p = p - e1
e1p_dot_e10_normalized = tf.matmul(
tf.reshape(e1p, [1, batch_size, 2]),
tf.reshape(e10_normalized, [1, 1, 2]),
transpose_b=True)
e1p_dot_e10_normalized = tf.reshape(e1p_dot_e10_normalized,
[batch_size]) / e01_norm
# Take the maximum of the two projections so we face it from the positive
# direction:
proj = tf.maximum(e0p_dot_e01_normalized, e1p_dot_e10_normalized)
proj = tf.maximum(proj, 1.0)
# A projection value of 1 means at the border exactly.
# Take the max with 1, to throw out all cases besides 'left' overhang.
falloff_is_relative = True
if falloff_is_relative:
fractional_falloff = 1.0 / (tf.pow(falloff * (proj - 1), 2.0) + 1.0)
return fractional_falloff
else:
# Currently the proj value is given as a distance that is the fraction of
# the length of the line. Instead, multiply by the length of the line
# to get the distance in pixels. Then, set a target '0' distance, (i.e.
# 10 pixels). Divide by that distance so we express distance in multiples
# of the max distance that gets seen.
# threshold at 1, and return 1 - that to get linear falloff from 0 to
# the target distance.
line_length = tf.reshape(e01_norm, [1])
pixel_dist = tf.reshape(proj - 1, [-1]) * line_length
zero_thresh_in_pixels = tf.reshape(
tf.constant([8.0], dtype=tf.float32), [1])
relative_dist = pixel_dist / zero_thresh_in_pixels
return 1.0 / (tf.pow(relative_dist, 3.0) + 1.0)
def rotate_about_point(angle_of_rotation, point, to_rotate):
"""Rotates a single input 2d point by a specified angle around a point."""
with tf.name_scope('rotate-2d'):
cos_angle = tf.cos(angle_of_rotation)
sin_angle = tf.sin(angle_of_rotation)
top_row = tf.stack([cos_angle, -sin_angle], axis=0)
bottom_row = tf.stack([sin_angle, cos_angle], axis=0)
rotation_matrix = tf.reshape(
tf.stack([top_row, bottom_row], axis=0), [1, 2, 2])
to_rotate = tf.reshape(to_rotate, [1, 1, 2])
point = tf.reshape(point, [1, 1, 2])
to_rotate = to_rotate - point
to_rotate = tf.matmul(rotation_matrix, to_rotate, transpose_b=True)
to_rotate = tf.reshape(to_rotate, [1, 1, 2]) + point
return to_rotate
def interpolate_from_grid(samples, grid):
grid_coordinates = (samples + 0.5) * 63.0
return interpolate_from_grid_coordinates(grid_coordinates, grid)
def reflect(samples, reflect_x=False, reflect_y=False, reflect_z=False):
"""Reflects the sample locations across the planes specified in xyz.
Args:
samples: Tensor with shape [..., 3].
reflect_x: Bool.
reflect_y: Bool.
reflect_z: Bool.
Returns:
Tensor with shape [..., 3]. The reflected samples.
"""
assert isinstance(reflect_x, bool)
assert isinstance(reflect_y, bool)
assert isinstance(reflect_z, bool)
floats = [-1.0 if ax else 1.0 for ax in [reflect_x, reflect_y, reflect_z]]
mult = np.array(floats, dtype=np.float32)
shape = samples.get_shape().as_list()
leading_dims = shape[:-1]
assert shape[-1] == 3
mult = mult.reshape([1] * len(leading_dims) + [3])
mult = tf.constant(mult, dtype=tf.float32)
return mult * samples
def z_reflect(samples):
"""Reflects the sample locations across the XY plane.
Args:
samples: Tensor with shape [..., 3]
Returns:
reflected: Tensor with shape [..., 3]. The reflected samples.
"""
return reflect(samples, reflect_z=True)
def get_world_to_camera(idx):
assert idx == 1
eye = tf.constant([[0.671273, 0.757946, -0.966907]], dtype=tf.float32)
look_at = tf.zeros_like(eye)
world_up = tf.constant([[0., 1., 0.]], dtype=tf.float32)
world_to_camera = camera_util.look_at(eye, look_at, world_up)
return world_to_camera
def transform_depth_dodeca_to_xyz_dodeca(depth_dodeca):
"""Lifts a dodecahedron of depth images to world space."""
batch_size = depth_dodeca.get_shape().as_list()[0]
cam2world = get_dodeca_camera_to_worlds()
cam2world = np.reshape(cam2world, [1, 20, 4, 4]).astype(np.float32)
world2cams = np.linalg.inv(cam2world)
world2cams = np.tile(world2cams, [batch_size, 1, 1, 1])
world2cams = tf.unstack(tf.constant(world2cams, dtype=tf.float32), axis=1)
depth_im_stack = tf.unstack(depth_dodeca, axis=1)
assert len(depth_im_stack) == 20
assert len(world2cams) == 20
xyz_images = []
for i in range(20):
world2cam = world2cams[i]
depth_im = depth_im_stack[i]
xyz_image = depth_image_to_xyz_image(depth_im, world2cam, xfov=0.5)[0]
xyz_images.append(xyz_image)
xyz_images = tf.stack(xyz_images, axis=1)
xyz_images = tf.where_v2(depth_dodeca > 0.0, xyz_images, 0.0)
return xyz_images
def transform_depth_dodeca_to_xyz_dodeca_np(depth_dodeca):
graph = tf.Graph()
with graph.as_default():
depth_in = tf.constant(depth_dodeca)
xyz_out = transform_depth_dodeca_to_xyz_dodeca(depth_in)
with tf.Session() as session:
out_np = session.run(xyz_out)
return out_np
def _unbatch(arr):
if arr.shape[0] == 1:
return arr.reshape(arr.shape[1:])
return arr
def to_homogenous_np(arr, is_point=True):
assert arr.shape[-1] in [2, 3]
homogeneous_shape = list(arr.shape[:-1]) + [1]
if is_point:
coord = np.ones(homogeneous_shape, dtype=np.float32)
else:
coord = np.zeros(homogeneous_shape, dtype=np.float32)
return np.concatenate([arr, coord], axis=-1)
def depth_to_cam_np(im, xfov=0.5):
"""Converts a gaps depth image to camera space."""
im = _unbatch(im)
height, width, _ = im.shape
pixel_coords = np_util.make_coordinate_grid(
height, width, is_screen_space=False, is_homogeneous=False)
nic_x = np.reshape(pixel_coords[:, :, 0], [height, width])
nic_y = np.reshape(pixel_coords[:, :, 1], [height, width])
# GAPS nic coordinates have an origin at the center of the image, not
# in the corner:
nic_x = 2 * nic_x - 1.0
nic_y = 2 * nic_y - 1.0
nic_d = -np.reshape(im, [height, width])
aspect = height / float(width)
yfov = math.atan(aspect * math.tan(xfov))
intrinsics_00 = 1.0 / math.tan(xfov)
intrinsics_11 = 1.0 / math.tan(yfov)
cam_x = nic_x * -nic_d / intrinsics_00
cam_y = nic_y * nic_d / intrinsics_11
cam_z = nic_d
cam_xyz = np.stack([cam_x, cam_y, cam_z], axis=2)
return cam_xyz
def apply_tx_np(samples, tx, is_point=True):
shape_in = samples.shape
flat_samples = np.reshape(samples, [-1, 3])
flat_samples = to_homogenous_np(flat_samples, is_point=is_point)
flat_samples = np.matmul(flat_samples, tx.T)
flat_samples = flat_samples[:, :3]
return np.reshape(flat_samples, shape_in)
def depth_image_to_sdf_constraints(im, cam2world, xfov=0.5):
"""Estimates inside/outside constraints from a gaps depth image."""
im = _unbatch(im)
cam2world = _unbatch(cam2world)
height, width, _ = im.shape
cam_xyz = depth_to_cam_np(im, xfov)
world_xyz = apply_tx_np(cam_xyz, cam2world, is_point=True)
ray_xyz = apply_tx_np(cam_xyz, cam2world, is_point=False)
ray_xyz = ray_xyz / np.linalg.norm(ray_xyz, axis=-1, keepdims=True)
delta = 0.005
pos_constraint = world_xyz - delta * ray_xyz
neg_constraint = world_xyz + delta * ray_xyz
sample_shape = [height * width, 3]
pos_constraint = np.reshape(pos_constraint, sample_shape)
neg_constraint = np.reshape(neg_constraint, sample_shape)
sdf_shape = [height * width, 1]
zero = np.zeros(sdf_shape, dtype=np.float32)
# Filter out the background
is_valid = np.reshape(im, [-1]) != 0.0
pos_constraint = pos_constraint[is_valid, :]
neg_constraint = neg_constraint[is_valid, :]
zero = zero[is_valid, :]
samples = np.concatenate([pos_constraint, neg_constraint], axis=0)
constraints = np.concatenate([zero + delta, zero - delta], axis=0)
return samples, constraints
def depth_dodeca_to_sdf_constraints(depth_ims):
"""Estimates inside/outside constraints from a depth dodecahedron."""
cam2world = np.split(get_dodeca_camera_to_worlds(), 20)
depth_ims = np.split(_unbatch(depth_ims), 20)
samps = []
constraints = []
for i in range(20):
s, c = depth_image_to_sdf_constraints(depth_ims[i], cam2world[i])
samps.append(s)
constraints.append(c)
samps = np.concatenate(samps)
constraints = np.concatenate(constraints)
return samps, constraints
def depth_dodeca_to_samples(dodeca):
samples, sdf_constraints = depth_dodeca_to_sdf_constraints(dodeca)
all_samples = np.concatenate([samples, sdf_constraints], axis=-1)
return all_samples
def depth_image_to_class_constraints(im, cam2world, xfov=0.5):
samples, sdf_constraints = depth_image_to_sdf_constraints(im, cam2world, xfov)
class_constraints = sdf_constraints > 0
return samples, class_constraints
def depth_image_to_samples(im, cam2world, xfov=0.5): # pylint:disable=unused-argument
"""A wrapper for depth_image_to_sdf_constraints to return samples."""
samples, sdf_constraints = depth_image_to_sdf_constraints(im, cam2world)
all_samples = np.concatenate([samples, sdf_constraints], axis=-1)
return all_samples
def apply_4x4(tensor, tx, are_points=True, batch_rank=None, sample_rank=None):
"""Applies a 4x4 matrix to 3D points/vectors.
Args:
tensor: Tensor with shape [batching_dims] + [sample_dims] + [3].
tx: Tensor with shape [batching_dims] + [4, 4].
are_points: Boolean. Whether to treat the samples as points or vectors.
batch_rank: The number of leading batch dimensions. Optional, just used to
enforce the shapes are as expected.
sample_rank: The number of sample dimensions. Optional, just used to enforce
the shapes are as expected.
Returns:
Tensor with shape [..., sample_count, 3].
"""
expected_batch_rank = batch_rank
expected_sample_rank = sample_rank
batching_dims = tx.get_shape().as_list()[:-2]
batch_rank = len(batching_dims)
if expected_batch_rank is not None:
assert batch_rank == expected_batch_rank
# flat_batch_count = int(np.prod(batching_dims))
sample_dims = tensor.get_shape().as_list()[batch_rank:-1]
sample_rank = len(sample_dims)
if expected_sample_rank is not None:
assert sample_rank == expected_sample_rank
flat_sample_count = int(np.prod(sample_dims))
tensor = tf.ensure_shape(tensor, batching_dims + sample_dims + [3])
tx = tf.ensure_shape(tx, batching_dims + [4, 4])
assert sample_rank >= 1
assert batch_rank >= 0
if sample_rank > 1:
tensor = tf.reshape(tensor, batching_dims + [flat_sample_count, 3])
initializer = tf.ones if are_points else tf.zeros
w = initializer(batching_dims + [flat_sample_count, 1], dtype=tf.float32)
tensor = tf.concat([tensor, w], axis=-1)
tensor = tf.matmul(tensor, tx, transpose_b=True)
tensor = tensor[..., :3]
if sample_rank > 1:
tensor = tf.reshape(tensor, batching_dims + sample_dims + [3])
return tensor
def depth_image_to_xyz_image(depth_images, world_to_camera, xfov=0.5):
"""Converts GAPS depth images to world space."""
batch_size, height, width, channel_count = depth_images.get_shape().as_list()
assert channel_count == 1
camera_to_world_mat = tf.matrix_inverse(world_to_camera)
pixel_coords = np_util.make_coordinate_grid(
height, width, is_screen_space=False, is_homogeneous=False)
nic_x = np.tile(
np.reshape(pixel_coords[:, :, 0], [1, height, width]), [batch_size, 1, 1])
nic_y = np.tile(
np.reshape(pixel_coords[:, :, 1], [1, height, width]), [batch_size, 1, 1])
nic_x = 2 * nic_x - 1.0
nic_y = 2 * nic_y - 1.0
nic_d = -tf.reshape(depth_images, [batch_size, height, width])
aspect = height / float(width)
yfov = math.atan(aspect * math.tan(xfov))
intrinsics_00 = 1.0 / math.tan(xfov)
intrinsics_11 = 1.0 / math.tan(yfov)
nic_xyz = tf.stack([nic_x, nic_y, nic_d], axis=3)
flat_nic_xyz = tf.reshape(nic_xyz, [batch_size, height * width, 3])
camera_x = (nic_x) * -nic_d / intrinsics_00
camera_y = (nic_y) * nic_d / intrinsics_11
camera_z = nic_d
homogeneous_coord = tf.ones_like(camera_z)
camera_xyz = tf.stack([camera_x, camera_y, camera_z, homogeneous_coord],
axis=3)
flat_camera_xyzw = tf.reshape(camera_xyz, [batch_size, height * width, 4])
flat_world_xyz = tf.matmul(
flat_camera_xyzw, camera_to_world_mat, transpose_b=True)
world_xyz = tf.reshape(flat_world_xyz, [batch_size, height, width, 4])
world_xyz = world_xyz[:, :, :, :3]
return world_xyz, flat_camera_xyzw[:, :, :3], flat_nic_xyz
def interpolate_from_grid_coordinates(samples, grid):
"""Performs trilinear interpolation to estimate the value of a grid function.
This function makes several assumptions to do the lookup:
1) The grid is LHW and has evenly spaced samples in the range (0, 1), which
is really the screen space range [0.5, {L, H, W}-0.5].
Args:
samples: Tensor with shape [batch_size, sample_count, 3].
grid: Tensor with shape [batch_size, length, height, width, 1].
Returns:
sample: Tensor with shape [batch_size, sample_count, 1] and type float32.
mask: Tensor with shape [batch_size, sample_count, 1] and type float32
"""
batch_size, length, height, width = grid.get_shape().as_list()[:4]
# These asserts aren't required by the algorithm, but they are currently
# true for the pipeline:
assert length == height
assert length == width
sample_count = samples.get_shape().as_list()[1]
tf_util.assert_shape(samples, [batch_size, sample_count, 3],
'interpolate_from_grid:samples')
tf_util.assert_shape(grid, [batch_size, length, height, width, 1],
'interpolate_from_grid:grid')
offset_samples = samples # Used to subtract 0.5
lower_coords = tf.cast(tf.math.floor(offset_samples), dtype=tf.int32)
upper_coords = lower_coords + 1
alphas = tf.floormod(offset_samples, 1.0)
maximum_value = grid.get_shape().as_list()[1:4]
size_per_channel = tf.tile(
tf.reshape(tf.constant(maximum_value, dtype=tf.int32), [1, 1, 3]),
[batch_size, sample_count, 1])
# We only need to check that the floor is at least zero and the ceil is
# no greater than the max index, because floor round negative numbers to
# be more negative:
is_valid = tf.logical_and(lower_coords >= 0, upper_coords < size_per_channel)
# Validity mask has shape [batch_size, sample_count] and is 1.0 where all of
# x,y,z are within the [0,1] range of the grid.
validity_mask = tf.reduce_min(
tf.cast(is_valid, dtype=tf.float32), axis=2, keep_dims=True)
lookup_coords = [[[], []], [[], []]]
corners = [[[], []], [[], []]]
flattened_grid = tf.reshape(grid, [batch_size, length * height * width])
for xi, x_coord in enumerate([lower_coords[:, :, 0], upper_coords[:, :, 0]]):
x_coord = tf.clip_by_value(x_coord, 0, width - 1)
for yi, y_coord in enumerate([lower_coords[:, :, 1], upper_coords[:, :,
1]]):
y_coord = tf.clip_by_value(y_coord, 0, height - 1)
for zi, z_coord in enumerate(
[lower_coords[:, :, 2], upper_coords[:, :, 2]]):
z_coord = tf.clip_by_value(z_coord, 0, length - 1)
flat_lookup = z_coord * height * width + y_coord * width + x_coord
lookup_coords[xi][yi].append(flat_lookup)
lookup_result = tf.batch_gather(flattened_grid, flat_lookup)
tf_util.assert_shape(lookup_result, [batch_size, sample_count],
'interpolate_from_grid:lookup_result x/8')
print_op = tf.print('corner xyz=%i, %i, %i' % (xi, yi, zi),
lookup_result, '\n', 'flat_lookup:', flat_lookup,
'\n\n')
with tf.control_dependencies([print_op]):
lookup_result = 1.0 * lookup_result
corners[xi][yi].append(lookup_result)
alpha_x, alpha_y, alpha_z = tf.unstack(alphas, axis=2)
one_minus_alpha_x = 1.0 - alpha_x
one_minus_alpha_y = 1.0 - alpha_y
# First interpolate a face along x:
f00 = corners[0][0][0] * one_minus_alpha_x + corners[1][0][0] * alpha_x
f01 = corners[0][0][1] * one_minus_alpha_x + corners[1][0][1] * alpha_x
f10 = corners[0][1][0] * one_minus_alpha_x + corners[1][1][0] * alpha_x
f11 = corners[0][1][1] * one_minus_alpha_x + corners[1][1][1] * alpha_x
# Next interpolate a long along y:
l0 = f00 * one_minus_alpha_y + f10 * alpha_y
l1 = f01 * one_minus_alpha_y + f11 * alpha_y
# Finally interpolate a point along z:
p = l0 * (1.0 - alpha_z) + l1 * alpha_z
tf_util.assert_shape(p, [batch_size, sample_count], 'interpolate_from_grid:p')
p = tf.reshape(p, [batch_size, sample_count, 1])
validity_mask = tf.reshape(validity_mask, [batch_size, sample_count, 1])
return p, validity_mask
| |
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Unit tests for the contents of fastboot_utils.py
"""
# pylint: disable=protected-access,unused-argument
import collections
import io
import logging
import unittest
from devil import devil_env
from devil.android import device_errors
from devil.android import device_utils
from devil.android import fastboot_utils
from devil.android.sdk import fastboot
from devil.utils import mock_calls
with devil_env.SysPath(devil_env.PYMOCK_PATH):
import mock # pylint: disable=import-error
_BOARD = 'board_type'
_SERIAL = '0123456789abcdef'
_PARTITIONS = [
'bootloader', 'radio', 'boot', 'recovery', 'system', 'userdata', 'cache']
_IMAGES = collections.OrderedDict([
('bootloader', 'bootloader.img'),
('radio', 'radio.img'),
('boot', 'boot.img'),
('recovery', 'recovery.img'),
('system', 'system.img'),
('userdata', 'userdata.img'),
('cache', 'cache.img')
])
_VALID_FILES = [_BOARD + '.zip', 'android-info.txt']
_INVALID_FILES = ['test.zip', 'android-info.txt']
class MockFile(object):
def __init__(self, name='/tmp/some/file'):
self.file = mock.MagicMock(spec=file)
self.file.name = name
def __enter__(self):
return self.file
def __exit__(self, exc_type, exc_val, exc_tb):
pass
@property
def name(self):
return self.file.name
def _FastbootWrapperMock(test_serial):
fastbooter = mock.Mock(spec=fastboot.Fastboot)
fastbooter.__str__ = mock.Mock(return_value=test_serial)
fastbooter.Devices.return_value = [test_serial]
return fastbooter
def _DeviceUtilsMock(test_serial):
device = mock.Mock(spec=device_utils.DeviceUtils)
device.__str__ = mock.Mock(return_value=test_serial)
device.product_board = mock.Mock(return_value=_BOARD)
device.adb = mock.Mock()
return device
class FastbootUtilsTest(mock_calls.TestCase):
def setUp(self):
self.device_utils_mock = _DeviceUtilsMock(_SERIAL)
self.fastboot_wrapper = _FastbootWrapperMock(_SERIAL)
self.fastboot = fastboot_utils.FastbootUtils(
self.device_utils_mock, fastbooter=self.fastboot_wrapper,
default_timeout=2, default_retries=0)
self.fastboot._board = _BOARD
class FastbootUtilsInitTest(FastbootUtilsTest):
def testInitWithDeviceUtil(self):
f = fastboot_utils.FastbootUtils(self.device_utils_mock)
self.assertEqual(str(self.device_utils_mock), str(f._device))
def testInitWithMissing_fails(self):
with self.assertRaises(AttributeError):
fastboot_utils.FastbootUtils(None)
with self.assertRaises(AttributeError):
fastboot_utils.FastbootUtils('')
def testPartitionOrdering(self):
parts = ['bootloader', 'radio', 'boot', 'recovery', 'system', 'userdata',
'cache', 'vendor']
self.assertListEqual(fastboot_utils.ALL_PARTITIONS, parts)
class FastbootUtilsWaitForFastbootMode(FastbootUtilsTest):
# If this test fails by timing out after 1 second.
@mock.patch('time.sleep', mock.Mock())
def testWaitForFastbootMode(self):
self.fastboot.WaitForFastbootMode()
class FastbootUtilsEnableFastbootMode(FastbootUtilsTest):
def testEnableFastbootMode(self):
with self.assertCalls(
self.call.fastboot._device.EnableRoot(),
self.call.fastboot._device.adb.Reboot(to_bootloader=True),
self.call.fastboot.WaitForFastbootMode()):
self.fastboot.EnableFastbootMode()
class FastbootUtilsReboot(FastbootUtilsTest):
def testReboot_bootloader(self):
with self.assertCalls(
self.call.fastboot.fastboot.RebootBootloader(),
self.call.fastboot.WaitForFastbootMode()):
self.fastboot.Reboot(bootloader=True)
def testReboot_normal(self):
with self.assertCalls(
self.call.fastboot.fastboot.Reboot(),
self.call.fastboot._device.WaitUntilFullyBooted(timeout=mock.ANY)):
self.fastboot.Reboot()
class FastbootUtilsFlashPartitions(FastbootUtilsTest):
def testFlashPartitions_wipe(self):
with self.assertCalls(
(self.call.fastboot._VerifyBoard('test'), True),
(mock.call.devil.android.fastboot_utils.
_FindAndVerifyPartitionsAndImages(_PARTITIONS, 'test'), _IMAGES),
(self.call.fastboot.fastboot.Flash('bootloader', 'bootloader.img')),
(self.call.fastboot.Reboot(bootloader=True)),
(self.call.fastboot.fastboot.Flash('radio', 'radio.img')),
(self.call.fastboot.Reboot(bootloader=True)),
(self.call.fastboot.fastboot.Flash('boot', 'boot.img')),
(self.call.fastboot.fastboot.Flash('recovery', 'recovery.img')),
(self.call.fastboot.fastboot.Flash('system', 'system.img')),
(self.call.fastboot.fastboot.Flash('userdata', 'userdata.img')),
(self.call.fastboot.fastboot.Flash('cache', 'cache.img'))):
self.fastboot._FlashPartitions(_PARTITIONS, 'test', wipe=True)
def testFlashPartitions_noWipe(self):
with self.assertCalls(
(self.call.fastboot._VerifyBoard('test'), True),
(mock.call.devil.android.fastboot_utils.
_FindAndVerifyPartitionsAndImages(_PARTITIONS, 'test'), _IMAGES),
(self.call.fastboot.fastboot.Flash('bootloader', 'bootloader.img')),
(self.call.fastboot.Reboot(bootloader=True)),
(self.call.fastboot.fastboot.Flash('radio', 'radio.img')),
(self.call.fastboot.Reboot(bootloader=True)),
(self.call.fastboot.fastboot.Flash('boot', 'boot.img')),
(self.call.fastboot.fastboot.Flash('recovery', 'recovery.img')),
(self.call.fastboot.fastboot.Flash('system', 'system.img'))):
self.fastboot._FlashPartitions(_PARTITIONS, 'test')
class FastbootUtilsFastbootMode(FastbootUtilsTest):
def testFastbootMode_goodWait(self):
with self.assertCalls(
self.call.fastboot.EnableFastbootMode(),
self.call.fastboot.fastboot.SetOemOffModeCharge(False),
self.call.fastboot.fastboot.SetOemOffModeCharge(True),
self.call.fastboot.Reboot(wait_for_reboot=True)):
with self.fastboot.FastbootMode() as fbm:
self.assertEqual(self.fastboot, fbm)
def testFastbootMode_goodNoWait(self):
with self.assertCalls(
self.call.fastboot.EnableFastbootMode(),
self.call.fastboot.fastboot.SetOemOffModeCharge(False),
self.call.fastboot.fastboot.SetOemOffModeCharge(True),
self.call.fastboot.Reboot(wait_for_reboot=False)):
with self.fastboot.FastbootMode(wait_for_reboot=False) as fbm:
self.assertEqual(self.fastboot, fbm)
def testFastbootMode_exception(self):
with self.assertCalls(
self.call.fastboot.EnableFastbootMode(),
self.call.fastboot.fastboot.SetOemOffModeCharge(False),
self.call.fastboot.fastboot.SetOemOffModeCharge(True),
self.call.fastboot.Reboot(wait_for_reboot=True)):
with self.assertRaises(NotImplementedError):
with self.fastboot.FastbootMode() as fbm:
self.assertEqual(self.fastboot, fbm)
raise NotImplementedError
def testFastbootMode_exceptionInEnableFastboot(self):
self.fastboot.EnableFastbootMode = mock.Mock()
self.fastboot.EnableFastbootMode.side_effect = NotImplementedError
with self.assertRaises(NotImplementedError):
with self.fastboot.FastbootMode():
pass
class FastbootUtilsVerifyBoard(FastbootUtilsTest):
def testVerifyBoard_bothValid(self):
mock_file = io.StringIO(u'require board=%s\n' % _BOARD)
with mock.patch('__builtin__.open', return_value=mock_file, create=True):
with mock.patch('os.listdir', return_value=_VALID_FILES):
self.assertTrue(self.fastboot._VerifyBoard('test'))
def testVerifyBoard_BothNotValid(self):
mock_file = io.StringIO(u'abc')
with mock.patch('__builtin__.open', return_value=mock_file, create=True):
with mock.patch('os.listdir', return_value=_INVALID_FILES):
self.assertFalse(self.assertFalse(self.fastboot._VerifyBoard('test')))
def testVerifyBoard_FileNotFoundZipValid(self):
with mock.patch('os.listdir', return_value=[_BOARD + '.zip']):
self.assertTrue(self.fastboot._VerifyBoard('test'))
def testVerifyBoard_ZipNotFoundFileValid(self):
mock_file = io.StringIO(u'require board=%s\n' % _BOARD)
with mock.patch('__builtin__.open', return_value=mock_file, create=True):
with mock.patch('os.listdir', return_value=['android-info.txt']):
self.assertTrue(self.fastboot._VerifyBoard('test'))
def testVerifyBoard_zipNotValidFileIs(self):
mock_file = io.StringIO(u'require board=%s\n' % _BOARD)
with mock.patch('__builtin__.open', return_value=mock_file, create=True):
with mock.patch('os.listdir', return_value=_INVALID_FILES):
self.assertTrue(self.fastboot._VerifyBoard('test'))
def testVerifyBoard_fileNotValidZipIs(self):
mock_file = io.StringIO(u'require board=WrongBoard')
with mock.patch('__builtin__.open', return_value=mock_file, create=True):
with mock.patch('os.listdir', return_value=_VALID_FILES):
self.assertFalse(self.fastboot._VerifyBoard('test'))
def testVerifyBoard_noBoardInFileValidZip(self):
mock_file = io.StringIO(u'Regex wont match')
with mock.patch('__builtin__.open', return_value=mock_file, create=True):
with mock.patch('os.listdir', return_value=_VALID_FILES):
self.assertTrue(self.fastboot._VerifyBoard('test'))
def testVerifyBoard_noBoardInFileInvalidZip(self):
mock_file = io.StringIO(u'Regex wont match')
with mock.patch('__builtin__.open', return_value=mock_file, create=True):
with mock.patch('os.listdir', return_value=_INVALID_FILES):
self.assertFalse(self.fastboot._VerifyBoard('test'))
class FastbootUtilsFindAndVerifyPartitionsAndImages(FastbootUtilsTest):
def testFindAndVerifyPartitionsAndImages_validNoVendor(self):
PARTITIONS = [
'bootloader', 'radio', 'boot', 'recovery', 'system', 'userdata',
'cache', 'vendor'
]
files = [
'bootloader-test-.img',
'radio123.img',
'boot.img',
'recovery.img',
'system.img',
'userdata.img',
'cache.img'
]
img_check = collections.OrderedDict([
('bootloader', 'test/bootloader-test-.img'),
('radio', 'test/radio123.img'),
('boot', 'test/boot.img'),
('recovery', 'test/recovery.img'),
('system', 'test/system.img'),
('userdata', 'test/userdata.img'),
('cache', 'test/cache.img'),
])
parts_check = [
'bootloader', 'radio', 'boot', 'recovery', 'system', 'userdata',
'cache'
]
with mock.patch('os.listdir', return_value=files):
imgs = fastboot_utils._FindAndVerifyPartitionsAndImages(
PARTITIONS, 'test')
parts = imgs.keys()
self.assertDictEqual(imgs, img_check)
self.assertListEqual(parts, parts_check)
def testFindAndVerifyPartitionsAndImages_validVendor(self):
PARTITIONS = [
'bootloader', 'radio', 'boot', 'recovery', 'system', 'userdata',
'cache', 'vendor'
]
files = [
'bootloader-test-.img',
'radio123.img',
'boot.img',
'recovery.img',
'system.img',
'userdata.img',
'cache.img',
'vendor.img'
]
img_check = {
'bootloader': 'test/bootloader-test-.img',
'radio': 'test/radio123.img',
'boot': 'test/boot.img',
'recovery': 'test/recovery.img',
'system': 'test/system.img',
'userdata': 'test/userdata.img',
'cache': 'test/cache.img',
'vendor': 'test/vendor.img',
}
parts_check = [
'bootloader', 'radio', 'boot', 'recovery', 'system', 'userdata',
'cache', 'vendor'
]
with mock.patch('os.listdir', return_value=files):
imgs = fastboot_utils._FindAndVerifyPartitionsAndImages(
PARTITIONS, 'test')
parts = imgs.keys()
self.assertDictEqual(imgs, img_check)
self.assertListEqual(parts, parts_check)
def testFindAndVerifyPartitionsAndImages_badPartition(self):
with mock.patch('os.listdir', return_value=['test']):
with self.assertRaises(KeyError):
fastboot_utils._FindAndVerifyPartitionsAndImages(['test'], 'test')
def testFindAndVerifyPartitionsAndImages_noFile(self):
with mock.patch('os.listdir', return_value=['test']):
with self.assertRaises(device_errors.FastbootCommandFailedError):
fastboot_utils._FindAndVerifyPartitionsAndImages(['cache'], 'test')
class FastbootUtilsFlashDevice(FastbootUtilsTest):
def testFlashDevice_wipe(self):
with self.assertCalls(
self.call.fastboot.EnableFastbootMode(),
self.call.fastboot.fastboot.SetOemOffModeCharge(False),
self.call.fastboot._FlashPartitions(mock.ANY, 'test', wipe=True),
self.call.fastboot.fastboot.SetOemOffModeCharge(True),
self.call.fastboot.Reboot(wait_for_reboot=False)):
self.fastboot.FlashDevice('test', wipe=True)
def testFlashDevice_noWipe(self):
with self.assertCalls(
self.call.fastboot.EnableFastbootMode(),
self.call.fastboot.fastboot.SetOemOffModeCharge(False),
self.call.fastboot._FlashPartitions(mock.ANY, 'test', wipe=False),
self.call.fastboot.fastboot.SetOemOffModeCharge(True),
self.call.fastboot.Reboot(wait_for_reboot=True)):
self.fastboot.FlashDevice('test', wipe=False)
def testFlashDevice_partitions(self):
with self.assertCalls(
self.call.fastboot.EnableFastbootMode(),
self.call.fastboot.fastboot.SetOemOffModeCharge(False),
self.call.fastboot._FlashPartitions(['boot'], 'test', wipe=False),
self.call.fastboot.fastboot.SetOemOffModeCharge(True),
self.call.fastboot.Reboot(wait_for_reboot=True)):
self.fastboot.FlashDevice('test', partitions=['boot'], wipe=False)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main(verbosity=2)
| |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import sys
import atexit
# The legacy core need to be removed before "import core",
# in case of users installing paddlepadde without -U option
core_suffix = 'so'
if os.name == 'nt':
core_suffix = 'pyd'
legacy_core = os.path.abspath(os.path.dirname(
__file__)) + os.sep + 'core.' + core_suffix
if os.path.exists(legacy_core):
sys.stderr.write('Deleting legacy file ' + legacy_core + '\n')
try:
os.remove(legacy_core)
except Exception as e:
raise e
# import all class inside framework into fluid module
from . import framework
from .framework import *
# import all class inside executor into fluid module
from . import executor
from .executor import *
from . import data_feed_desc
from .data_feed_desc import *
from . import dataset
from .dataset import *
from .data import *
from . import trainer_desc
from . import io
from . import evaluator
from . import initializer
from .initializer import set_global_initializer
from . import layers
from . import dygraph
from . import contrib
from . import nets
from . import optimizer
from . import backward
from .backward import gradients
from . import regularizer
from . import average
from . import metrics
from . import transpiler
from . import incubate
from .input import embedding, one_hot
from . import distribute_lookup_table
from .param_attr import ParamAttr, WeightNormParamAttr
from .data_feeder import DataFeeder
from .core import LoDTensor, LoDTensorArray, Scope, _Scope
from .core import CPUPlace, XPUPlace, CUDAPlace, CUDAPinnedPlace, NPUPlace, IPUPlace, MLUPlace, CustomPlace
from .incubate import fleet
from .transpiler import DistributeTranspiler, \
memory_optimize, release_memory, DistributeTranspilerConfig
from .lod_tensor import create_lod_tensor, create_random_int_lodtensor
from . import clip
from . import profiler
from . import unique_name
from . import parallel_executor
from .parallel_executor import *
from . import compiler
from .compiler import *
from paddle.fluid.layers.math_op_patch import monkey_patch_variable
from . import install_check
from .dygraph.nn import *
from .dygraph.layers import *
from .dygraph.base import enable_dygraph, disable_dygraph
from .io import save, load, load_program_state, set_program_state
from .dygraph.checkpoint import save_dygraph, load_dygraph
from .dygraph.varbase_patch_methods import monkey_patch_varbase
from . import generator
from .core import _cuda_synchronize
from .generator import Generator
from .trainer_desc import TrainerDesc, DistMultiTrainer, PipelineTrainer, HeterPipelineTrainer, MultiTrainer, HeterXpuTrainer
from .transpiler import HashName, RoundRobin
from .backward import append_backward
Tensor = LoDTensor
enable_imperative = enable_dygraph
disable_imperative = disable_dygraph
__all__ = framework.__all__ + executor.__all__ + \
trainer_desc.__all__ + transpiler.__all__ + \
parallel_executor.__all__ + lod_tensor.__all__ + \
data_feed_desc.__all__ + compiler.__all__ + backward.__all__ + generator.__all__ + [
'io',
'initializer',
'embedding',
'one_hot',
'layers',
'contrib',
'data',
'dygraph',
'enable_dygraph',
'disable_dygraph',
'enable_imperative',
'disable_imperative',
'transpiler',
'nets',
'optimizer',
'backward',
'regularizer',
'LoDTensor',
'LoDTensorArray',
'CPUPlace',
'XPUPlace',
'CUDAPlace',
'CUDAPinnedPlace',
'NPUPlace',
'IPUPlace',
'MLUPlace',
'Tensor',
'ParamAttr',
'WeightNormParamAttr',
'DataFeeder',
'clip',
'profiler',
'unique_name',
'Scope',
'install_check',
'save',
'load',
'_cuda_synchronize'
]
def __bootstrap__():
"""
Enable reading gflags from environment variables.
Returns:
None
"""
import sys
import os
import platform
from . import core
# NOTE(zhiqiu): When (1)numpy < 1.19; (2) python < 3.7,
# unittest is always imported in numpy (maybe some versions not).
# so is_test is True and p2p is not inited.
in_test = 'unittest' in sys.modules
try:
num_threads = int(os.getenv('OMP_NUM_THREADS', '1'))
except ValueError:
num_threads = 1
if num_threads > 1:
print(
'WARNING: OMP_NUM_THREADS set to {0}, not 1. The computation '
'speed will not be optimized if you use data parallel. It will '
'fail if this PaddlePaddle binary is compiled with OpenBlas since'
' OpenBlas does not support multi-threads.'.format(num_threads),
file=sys.stderr)
print('PLEASE USE OMP_NUM_THREADS WISELY.', file=sys.stderr)
os.environ['OMP_NUM_THREADS'] = str(num_threads)
flag_prefix = "FLAGS_"
read_env_flags = [
key[len(flag_prefix):] for key in core.globals().keys()
if key.startswith(flag_prefix)
]
def remove_flag_if_exists(name):
if name in read_env_flags:
read_env_flags.remove(name)
sysstr = platform.system()
if 'Darwin' in sysstr:
remove_flag_if_exists('use_pinned_memory')
if os.name == 'nt':
remove_flag_if_exists('cpu_deterministic')
if core.is_compiled_with_ipu():
# Currently we request all ipu available for training and testing
# finer control of pod of IPUs will be added later
read_env_flags += []
core.init_gflags(["--tryfromenv=" + ",".join(read_env_flags)])
# Note(zhouwei25): sys may not have argv in some cases,
# Such as: use Python/C API to call Python from C++
try:
core.init_glog(sys.argv[0])
except Exception:
sys.argv = [""]
core.init_glog(sys.argv[0])
# don't init_p2p when in unittest to save time.
core.init_devices()
# TODO(panyx0718): Avoid doing complex initialization logic in __init__.py.
# Consider paddle.init(args) or paddle.main(args)
monkey_patch_variable()
__bootstrap__()
monkey_patch_varbase()
# NOTE(zhiqiu): register npu_finalize on the exit of Python,
# do some clean up manually.
if core.is_compiled_with_npu():
atexit.register(core.npu_finalize)
# NOTE(Aurelius84): clean up ExecutorCacheInfo in advance manually.
atexit.register(core.clear_executor_cache)
| |
import os,sys
import Image
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import MySQLdb
import random
from numpy import genfromtxt
from math import pi
from matplotlib.patches import Ellipse
def p4_overplot(image_id):
db=MySQLdb.connect(db='mysql',host='localhost', user='root')
cursor=db.cursor()
cursor.execute("""use P4""")
cmd='select image_location from HiRISE_images where zooniverse_id='+'"'+image_id+'"'
cursor.execute(cmd)
fetch=cursor.fetchall()
awslocation=fetch[0][0]
os.system('wget '+awslocation+' -O tmp.jpg')
os.system('convert ./tmp.jpg tmp.png')
print awslocation
img=mpimg.imread('tmp.png')
os.remove('tmp.png')
os.remove('tmp.jpg')
maxy=len(img[:,0])
maxx=len(img[0,:])
#print maxx
f=plt.figure()
plt.subplots_adjust(top=0.85)
f.add_subplot(2, 2, 1)
imgplot = plt.imshow(img)
cmd='select distinct classification_id from annotations where image_id='+'"'+image_id+'"'
cursor.execute(cmd)
users=cursor.fetchall()
print len(users)
j=0
fans=np.zeros(len(users),dtype=np.int)
plt.title(image_id)
for i in users:
#print i[0]
cmd='select count(marking) from annotations where image_id='+'"'+image_id+'"'+' and classification_id="'+i[0]+'" and marking="fan"'
cursor.execute(cmd)
fetch=cursor.fetchall()
fans[j]=fetch[0][0]
j=j+1
cmd='select x,y,distance, angle, spread from annotations where image_id='+'"'+image_id+'"'+' and classification_id="'+i[0]+'" and X IS NOT NULL and marking="fan"'
cursor.execute(cmd)
fetch=cursor.fetchall()
#print fetch
if (len(fetch) ==0):
continue
index=np.asarray(fetch)
fetch=0
x=np.copy(index)
x=np.delete(x, 1,1)
x=np.delete(x, 1,1)
x=np.delete(x, 1,1)
x=np.delete(x, 1,1)
x=np.asarray(x, dtype=np.float)
y=np.copy(index)
y=np.delete(y, 0,1)
y=np.delete(y, 1,1)
y=np.delete(y, 1,1)
y=np.delete(y, 1,1)
y=np.asarray(y, dtype=np.float)
distance=np.copy(index)
distance=np.delete(distance, 0,1)
distance=np.delete(distance, 0,1)
distance=np.delete(distance, 1,1)
distance=np.delete(distance, 1,1)
print distance
distance=np.asarray(distance, dtype=np.float)
angle=np.copy(index)
angle=np.delete(angle, 0,1)
angle=np.delete(angle, 0,1)
angle=np.delete(angle, 0,1)
angle=np.delete(angle, 1,1)
print angle
angle=np.asarray(angle, dtype=np.float)
spread=np.copy(index)
spread=np.delete(spread, 0,1)
spread=np.delete(spread, 0,1)
spread=np.delete(spread, 0,1)
spread=np.delete(spread, 0,1)
print spread
spread=np.asarray(spread, dtype=np.float)
# since it's an isosceles triangle we actually want to use half of spread
# so let's just do that now
spread=spread/2.0
x21=np.zeros(len(x), dtype=np.float)
y21=np.zeros(len(y), dtype=np.float)
x22=np.zeros(len(x), dtype=np.float)
y22=np.zeros(len(y), dtype=np.float)
x21=np.cos(angle*pi/180.0)-(np.tan(spread*pi/180.0)*np.sin(angle*pi/180.0))
x21=x21*distance
y21=np.sin(angle*pi/180.0)+(np.tan(spread*pi/180.0)*np.cos(angle*pi/180.0))
y21=distance*y21
x22=np.cos(angle*pi/180.0)+(np.tan(spread*pi/180.0)*np.sin(angle*pi/180.0))
x22=x22*distance
y22=np.sin(angle*pi/180.0)-(np.tan(spread*pi/180.0)*np.cos(angle*pi/180.0))
y22=y22*distance
# okay now need to move to the frame of the image before assuming that the origin was (0,0) but it's (x,y) on the image
x21=x+x21
y21=y+y21
x22=x+x22
y22=y+y22
#print y
for i in np.arange(len(x)):
plt.plot([x[i], x21[i]], [y[i], y21[i]], color='blue')
plt.plot([x[i], x22[i]], [y[i], y22[i]], color='blue')
plt.ylim([maxy,0])
plt.xlim([0,maxx])
f.add_subplot(2, 2, 2)
imgplot = plt.imshow(img)
f.add_subplot(2, 2, 3)
imgplot = plt.imshow(img)
blotches=np.zeros(len(users),dtype=np.int)
j=0
for i in users:
#print i[0]
cmd='select count(marking) from annotations where image_id='+'"'+image_id+'"'+' and classification_id="'+i[0]+'" and marking="blotch"'
cursor.execute(cmd)
fetch=cursor.fetchall()
blotches[j]=fetch[0][0]
j=j+1
cmd='select x,y,radius_1,radius_2, angle from annotations where image_id='+'"'+image_id+'"'+' and classification_id="'+i[0]+'" and X IS NOT NULL and marking="blotch"'
cursor.execute(cmd)
fetch=cursor.fetchall()
#print fetch
if (len(fetch) ==0):
continue
index=np.asarray(fetch)
fetch=0
x=np.copy(index)
x=np.delete(x, 1,1)
x=np.delete(x, 1,1)
x=np.delete(x, 1,1)
x=np.delete(x, 1,1)
x=np.asarray(x, dtype=np.float)
y=np.copy(index)
y=np.delete(y, 0,1)
y=np.delete(y, 1,1)
y=np.delete(y, 1,1)
y=np.delete(y, 1,1)
y=np.asarray(y, dtype=np.float)
radius_1=np.copy(index)
radius_1=np.delete(radius_1, 0,1)
radius_1=np.delete(radius_1, 0,1)
radius_1=np.delete(radius_1, 1,1)
radius_1=np.delete(radius_1, 1,1)
print radius_1
radius_1=np.asarray(radius_1, dtype=np.float)
radius_2=np.copy(index)
radius_2=np.delete(radius_2, 0,1)
radius_2=np.delete(radius_2, 0,1)
radius_2=np.delete(radius_2, 0,1)
radius_2=np.delete(radius_2, 1,1)
radius_2=np.asarray(radius_2, dtype=np.float)
angle=np.copy(index)
angle=np.delete(angle, 0,1)
angle=np.delete(angle, 0,1)
angle=np.delete(angle, 0,1)
angle=np.delete(angle, 0,1)
angle=np.asarray(angle, dtype=np.float)
e1 = Ellipse((x, y), radius_1, radius_2, angle=angle, linewidth=2, fill=False, zorder=2)
ax = plt.gca()
for i in np.arange(len(x)):
e1 = Ellipse((x[i], y[i]), radius_1[i], radius_2[i], angle=angle[i], linewidth=0.5, fill=False, zorder=2, color='red')
ax.add_patch(e1)
plt.ylim([maxy,0])
plt.xlim([0,maxx])
f.add_subplot(2, 2, 4)
imgplot = plt.imshow(img)
#plt.show()
plt.savefig(image_id+'.png', bbox_inches='tight', pad_inches=0.01)
cursor.close()
db.close()
#fs= genfromtxt('done_with_markings.csv', delimiter=',', dtype=np.str)
#print fs[0]
#counter=len(fs)
#index=random.sample(xrange(counter), 2000)
index=[0]
fs=['APF0000our']
print index
for i in index:
print fs[i]
# start of the main
#image_id='APF0000006'
image_id=fs[i]
print image_id
print fs[i]
p4_overplot(image_id)
| |
# coding: utf8
"""
Functionality to read and write the Newick serialization format for trees.
.. seealso:: https://en.wikipedia.org/wiki/Newick_format
"""
from __future__ import unicode_literals
import io
import re
RESERVED_PUNCTUATION = ':;,()'
COMMENT = re.compile('\[[^\]]*\]')
def length_parser(x):
return float(x or 0.0)
def length_formatter(x):
return '%s' % x
class Node(object):
"""
A Node may be a tree, a subtree or a leaf.
A Node has optional name and length (from parent) and a (possibly empty) list of
descendants. It further has an ancestor, which is *None* if the node is the
root node of a tree.
"""
def __init__(self, name=None, length=None, **kw):
"""
:param name: Node label.
:param length: Branch length from the new node to its parent.
:param kw: Recognized keyword arguments:\
`length_parser`: Custom parser for the `length` attribute of a Node.\
`length_formatter`: Custom formatter for the branch length when formatting a\
Node as Newick string.
"""
for char in RESERVED_PUNCTUATION:
if (name and char in name) or (length and char in length):
raise ValueError(
'Node names or branch lengths must not contain "%s"' % char)
self.name = name
self._length = length
self.descendants = []
self.ancestor = None
self._length_parser = kw.pop('length_parser', length_parser)
self._length_formatter = kw.pop('length_formatter', length_formatter)
def __repr__(self):
return 'Node("%s")' % self.name
@property
def length(self):
return self._length_parser(self._length)
@length.setter
def length(self, l):
if l is None:
self._length = l
else:
self._length = self._length_formatter(l)
@classmethod
def create(cls, name=None, length=None, descendants=None, **kw):
"""
Create a new `Node` object.
:param name: Node label.
:param length: Branch length from the new node to its parent.
:param descendants: list of descendants or `None`.
:param kw: Additonal keyword arguments are passed through to `Node.__init__`.
:return: `Node` instance.
"""
node = cls(name=name, length=length, **kw)
for descendant in descendants or []:
node.add_descendant(descendant)
return node
def add_descendant(self, node):
node.ancestor = self
self.descendants.append(node)
@property
def newick(self):
"""The representation of the Node in Newick format."""
label = self.name or ''
if self._length:
label += ':' + self._length
descendants = ','.join([n.newick for n in self.descendants])
if descendants:
descendants = '(' + descendants + ')'
return descendants + label
@property
def is_leaf(self):
return not bool(self.descendants)
@property
def is_binary(self):
return all([len(n.descendants) in (0, 2) for n in self.walk()])
def walk(self, mode=None):
"""
Traverses the (sub)tree rooted at self, yielding each visited Node.
.. seealso:: https://en.wikipedia.org/wiki/Tree_traversal
:param mode: Specifies the algorithm to use when traversing the subtree rooted \
at self. `None` for breadth-first, `'postorder'` for post-order depth-first \
search.
:return: Generator of the visited Nodes.
"""
if mode == 'postorder':
for n in self._postorder():
yield n
else: # default to a breadth-first search
yield self
for node in self.descendants:
for n in node.walk():
yield n
def visit(self, visitor, predicate=None, **kw):
"""
Apply a function to matching nodes in the (sub)tree rooted at self.
:param visitor: A callable accepting a Node object as single argument..
:param predicate: A callable accepting a Node object as single argument and \
returning a boolean signaling whether Node matches; if `None` all nodes match.
:param kw: Addtional keyword arguments are passed through to self.walk.
"""
predicate = predicate or bool
for n in self.walk(**kw):
if predicate(n):
visitor(n)
def _postorder(self):
stack = [self]
descendant_map = {id(node): [n for n in node.descendants] for node in self.walk()}
while stack:
node = stack[-1]
descendants = descendant_map[id(node)]
# if we are at a leave-node, we remove the item from the stack
if not descendants:
stack.pop()
yield node
if stack:
descendant_map[id(stack[-1])].pop(0)
else:
stack.append(descendants[0])
def get_leaves(self):
"""
Get all the leaf nodes of the subtree descending from this node.
:return: List of Nodes with no descendants.
"""
return [n for n in self.walk() if n.is_leaf]
def get_node(self, label):
"""
Gets the specified node by name.
:return: Node or None if name does not exist in tree
"""
for n in self.walk():
if n.name == label:
return n
def get_leaf_names(self):
"""
Get the names of all the leaf nodes of the subtree descending from
this node.
:return: List of names of Nodes with no descendants.
"""
return [n.name for n in self.get_leaves()]
def prune(self, leaves, inverse=False):
"""
Remove all those nodes in the specified list, or if inverse=True,
remove all those nodes not in the specified list. The specified nodes
must be leaves and distinct from the root node.
:param nodes: A list of Node objects
:param inverse: Specifies whether to remove nodes in the list or not\
in the list.
"""
self.visit(
lambda n: n.ancestor.descendants.remove(n),
# We won't prune the root node, even if it is a leave and requested to
# be pruned!
lambda n: ((not inverse and n in leaves) or
(inverse and n.is_leaf and n not in leaves)) and n.ancestor,
mode="postorder")
def prune_by_names(self, leaf_names, inverse=False):
"""
Perform an (inverse) prune, with leaves specified by name.
:param node_names: A list of leaaf Node names (strings)
:param inverse: Specifies whether to remove nodes in the list or not\
in the list.
"""
self.prune([l for l in self.walk() if l.name in leaf_names], inverse)
def remove_redundant_nodes(self, preserve_lengths=True):
"""
Remove all nodes which have only a single child, and attach their
grandchildren to their parent. The resulting tree has the minimum
number of internal nodes required for the number of leaves.
:param preserve_lengths: If true, branch lengths of removed nodes are \
added to those of their children.
"""
for n in self.walk(mode='postorder'):
while n.ancestor and len(n.ancestor.descendants) == 1:
grandfather = n.ancestor.ancestor
father = n.ancestor
if preserve_lengths:
n.length += father.length
if grandfather:
for i, child in enumerate(grandfather.descendants):
if child is father:
del grandfather.descendants[i]
grandfather.add_descendant(n)
father.ancestor = None
else:
self.descendants = n.descendants
if preserve_lengths:
self.length = n.length
def resolve_polytomies(self):
"""
Insert additional nodes with length=0 into the subtree in such a way
that all non-leaf nodes have only 2 descendants, i.e. the tree becomes
a fully resolved binary tree.
"""
def _resolve_polytomies(n):
new = Node(length=self._length_formatter(self._length_parser('0')))
while len(n.descendants) > 1:
new.add_descendant(n.descendants.pop())
n.descendants.append(new)
self.visit(_resolve_polytomies, lambda n: len(n.descendants) > 2)
def remove_names(self):
"""
Set the name of all nodes in the subtree to None.
"""
self.visit(lambda n: setattr(n, 'name', None))
def remove_internal_names(self):
"""
Set the name of all non-leaf nodes in the subtree to None.
"""
self.visit(lambda n: setattr(n, 'name', None), lambda n: not n.is_leaf)
def remove_leaf_names(self):
"""
Set the name of all leaf nodes in the subtree to None.
"""
self.visit(lambda n: setattr(n, 'name', None), lambda n: n.is_leaf)
def remove_lengths(self):
"""
Set the length of all nodes in the subtree to None.
"""
self.visit(lambda n: setattr(n, 'length', None))
def loads(s, strip_comments=False, **kw):
"""
Load a list of trees from a Newick formatted string.
:param s: Newick formatted string.
:param strip_comments: Flag signaling whether to strip comments enclosed in square \
brackets.
:param kw: Keyword arguments are passed through to `Node.create`.
:return: List of Node objects.
"""
kw['strip_comments'] = strip_comments
return [parse_node(ss.strip(), **kw) for ss in s.split(';') if ss.strip()]
def dumps(trees):
"""
Serialize a list of trees in Newick format.
:param trees: List of Node objects or a single Node object.
:return: Newick formatted string.
"""
if isinstance(trees, Node):
trees = [trees]
return ';\n'.join([tree.newick for tree in trees]) + ';'
def load(fp, strip_comments=False, **kw):
"""
Load a list of trees from an open Newick formatted file.
:param fp: open file handle.
:param strip_comments: Flag signaling whether to strip comments enclosed in square \
brackets.
:param kw: Keyword arguments are passed through to `Node.create`.
:return: List of Node objects.
"""
kw['strip_comments'] = strip_comments
return loads(fp.read(), **kw)
def dump(tree, fp):
fp.write(dumps(tree))
def read(fname, encoding='utf8', strip_comments=False, **kw):
"""
Load a list of trees from a Newick formatted file.
:param fname: file path.
:param strip_comments: Flag signaling whether to strip comments enclosed in square \
brackets.
:param kw: Keyword arguments are passed through to `Node.create`.
:return: List of Node objects.
"""
kw['strip_comments'] = strip_comments
with io.open(fname, encoding=encoding) as fp:
return load(fp, **kw)
def write(tree, fname, encoding='utf8'):
with io.open(fname, encoding=encoding, mode='w') as fp:
dump(tree, fp)
def _parse_name_and_length(s):
l = None
if ':' in s:
s, l = s.split(':', 1)
return s or None, l or None
def _parse_siblings(s, **kw):
"""
http://stackoverflow.com/a/26809037
"""
bracket_level = 0
current = []
# trick to remove special-case of trailing chars
for c in (s + ","):
if c == "," and bracket_level == 0:
yield parse_node("".join(current), **kw)
current = []
else:
if c == "(":
bracket_level += 1
elif c == ")":
bracket_level -= 1
current.append(c)
def parse_node(s, strip_comments=False, **kw):
"""
Parse a Newick formatted string into a `Node` object.
:param s: Newick formatted string to parse.
:param strip_comments: Flag signaling whether to strip comments enclosed in square \
brackets.
:param kw: Keyword arguments are passed through to `Node.create`.
:return: `Node` instance.
"""
if strip_comments:
s = COMMENT.sub('', s)
s = s.strip()
parts = s.split(')')
if len(parts) == 1:
descendants, label = [], s
else:
if not parts[0].startswith('('):
raise ValueError('unmatched braces %s' % parts[0][:100])
descendants = list(_parse_siblings(')'.join(parts[:-1])[1:], **kw))
label = parts[-1]
name, length = _parse_name_and_length(label)
return Node.create(name=name, length=length, descendants=descendants, **kw)
| |
#!/usr/bin/env python
###############################################################################
# $Id$
#
# Project: GDAL2Tiles, Google Summer of Code 2007 & 2008
# Global Map Tiles Classes
# Purpose: Convert a raster into TMS tiles, create KML SuperOverlay EPSG:4326,
# generate a simple HTML viewers based on Google Maps and OpenLayers
# Author: Klokan Petr Pridal, klokan at klokan dot cz
# Web: http://www.klokan.cz/projects/gdal2tiles/
#
###############################################################################
# Copyright (c) 2008 Klokan Petr Pridal. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
"""
globalmaptiles.py
Global Map Tiles as defined in Tile Map Service (TMS) Profiles
==============================================================
Functions necessary for generation of global tiles used on the web.
It contains classes implementing coordinate conversions for:
- GlobalMercator (based on EPSG:900913 = EPSG:3785)
for Google Maps, Yahoo Maps, Microsoft Maps compatible tiles
- GlobalGeodetic (based on EPSG:4326)
for OpenLayers Base Map and Google Earth compatible tiles
More info at:
http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification
http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation
http://msdn.microsoft.com/en-us/library/bb259689.aspx
http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates
Created by Klokan Petr Pridal on 2008-07-03.
Google Summer of Code 2008, project GDAL2Tiles for OSGEO.
In case you use this class in your product, translate it to another language
or find it usefull for your project please let me know.
My email: klokan at klokan dot cz.
I would like to know where it was used.
Class is available under the open-source GDAL license (www.gdal.org).
"""
# pylint: disable=W
import math
class GlobalMercator(object):
"""
TMS Global Mercator Profile
---------------------------
Functions necessary for generation of tiles in Spherical Mercator projection,
EPSG:900913 (EPSG:gOOglE, Google Maps Global Mercator), EPSG:3785, OSGEO:41001.
Such tiles are compatible with Google Maps, Microsoft Virtual Earth, Yahoo Maps,
UK Ordnance Survey OpenSpace API, ...
and you can overlay them on top of base maps of those web mapping applications.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Mercator tiles::
LatLon <-> Meters <-> Pixels <-> Tile
WGS84 coordinates Spherical Mercator Pixels in pyramid Tiles in pyramid
lat/lon XY in metres XY pixels Z zoom XYZ from TMS
EPSG:4326 EPSG:900913
.----. --------- -- TMS
/ \ <-> | | <-> /----/ <-> Google
\ / | | /--------/ QuadTree
----- --------- /------------/
KML, public WebMapService Web Clients TileMapService
What is the coordinate extent of Earth in EPSG:900913?
[-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244]
Constant 20037508.342789244 comes from the circumference of the Earth in meters,
which is 40 thousand kilometers, the coordinate origin is in the middle of extent.
In fact you can calculate the constant as: 2 * math.pi * 6378137 / 2.0
$ echo 180 85 | gdaltransform -s_srs EPSG:4326 -t_srs EPSG:900913
Polar areas with abs(latitude) bigger then 85.05112878 are clipped off.
What are zoom level constants (pixels/meter) for pyramid with EPSG:900913?
whole region is on top of pyramid (zoom=0) covered by 256x256 pixels tile,
every lower zoom level resolution is always divided by two
initialResolution = 20037508.342789244 * 2 / 256 = 156543.03392804062
What is the difference between TMS and Google Maps/QuadTree tile name convention?
The tile raster itself is the same (equal extent, projection, pixel size),
there is just different identification of the same raster tile.
Tiles in TMS are counted from [0,0] in the bottom-left corner, id is XYZ.
Google placed the origin [0,0] to the top-left corner, reference is XYZ.
Microsoft is referencing tiles by a QuadTree name, defined on the website:
http://msdn2.microsoft.com/en-us/library/bb259689.aspx
The lat/lon coordinates are using WGS84 datum, yeh?
Yes, all lat/lon we are mentioning should use WGS84 Geodetic Datum.
Well, the web clients like Google Maps are projecting those coordinates by
Spherical Mercator, so in fact lat/lon coordinates on sphere are treated as if
the were on the WGS84 ellipsoid.
From MSDN documentation:
To simplify the calculations, we use the spherical form of projection, not
the ellipsoidal form. Since the projection is used only for map display,
and not for displaying numeric coordinates, we don't need the extra precision
of an ellipsoidal projection. The spherical projection causes approximately
0.33 percent scale distortion in the Y direction, which is not visually noticable.
How do I create a raster in EPSG:900913 and convert coordinates with PROJ.4?
You can use standard GIS tools like gdalwarp, cs2cs or gdaltransform.
All of the tools supports -t_srs 'epsg:900913'.
For other GIS programs check the exact definition of the projection:
More info at http://spatialreference.org/ref/user/google-projection/
The same projection is degined as EPSG:3785. WKT definition is in the official
EPSG database.
Proj4 Text:
+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0
+k=1.0 +units=m +nadgrids=@null +no_defs
Human readable WKT format of EPGS:900913:
PROJCS["Google Maps Global Mercator",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.2572235630016,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4326"]],
PROJECTION["Mercator_1SP"],
PARAMETER["central_meridian",0],
PARAMETER["scale_factor",1],
PARAMETER["false_easting",0],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]]]
"""
def __init__(self, tileSize=256):
"Initialize the TMS Global Mercator pyramid"
self.tileSize = tileSize
self.initialResolution = 2 * math.pi * 6378137 / self.tileSize
# 156543.03392804062 for tileSize 256 pixels
self.originShift = 2 * math.pi * 6378137 / 2.0
# 20037508.342789244
def LatLonToMeters(self, lat, lon ):
"Converts given lat/lon in WGS84 Datum to XY in Spherical Mercator EPSG:900913"
mx = lon * self.originShift / 180.0
my = math.log( math.tan((90 + lat) * math.pi / 360.0 )) / (math.pi / 180.0)
my = my * self.originShift / 180.0
return mx, my
def MetersToLatLon(self, mx, my ):
"Converts XY point from Spherical Mercator EPSG:900913 to lat/lon in WGS84 Datum"
lon = (mx / self.originShift) * 180.0
lat = (my / self.originShift) * 180.0
lat = 180 / math.pi * (2 * math.atan( math.exp( lat * math.pi / 180.0)) - math.pi / 2.0)
return lat, lon
def PixelsToMeters(self, px, py, zoom):
"Converts pixel coordinates in given zoom level of pyramid to EPSG:900913"
res = self.Resolution( zoom )
mx = px * res - self.originShift
my = py * res - self.originShift
return mx, my
def MetersToPixels(self, mx, my, zoom):
"Converts EPSG:900913 to pyramid pixel coordinates in given zoom level"
res = self.Resolution( zoom )
px = (mx + self.originShift) / res
py = (my + self.originShift) / res
return px, py
def PixelsToTile(self, px, py):
"Returns a tile covering region in given pixel coordinates"
tx = int( math.ceil( px / float(self.tileSize) ) - 1 )
ty = int( math.ceil( py / float(self.tileSize) ) - 1 )
return tx, ty
def PixelsToRaster(self, px, py, zoom):
"Move the origin of pixel coordinates to top-left corner"
mapSize = self.tileSize << zoom
return px, mapSize - py
def MetersToTile(self, mx, my, zoom):
"Returns tile for given mercator coordinates"
px, py = self.MetersToPixels( mx, my, zoom)
return self.PixelsToTile( px, py)
def TileBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in EPSG:900913 coordinates"
minx, miny = self.PixelsToMeters( tx*self.tileSize, ty*self.tileSize, zoom )
maxx, maxy = self.PixelsToMeters( (tx+1)*self.tileSize, (ty+1)*self.tileSize, zoom )
return ( minx, miny, maxx, maxy )
def TileLatLonBounds(self, tx, ty, zoom ):
"Returns bounds of the given tile in latutude/longitude using WGS84 datum"
bounds = self.TileBounds( tx, ty, zoom)
minLat, minLon = self.MetersToLatLon(bounds[0], bounds[1])
maxLat, maxLon = self.MetersToLatLon(bounds[2], bounds[3])
return ( minLat, minLon, maxLat, maxLon )
def Resolution(self, zoom ):
"Resolution (meters/pixel) for given zoom level (measured at Equator)"
# return (2 * math.pi * 6378137) / (self.tileSize * 2**zoom)
return self.initialResolution / (2**zoom)
def ZoomForPixelSize(self, pixelSize ):
"Maximal scaledown zoom of the pyramid closest to the pixelSize."
for i in range(30):
if pixelSize > self.Resolution(i):
return i-1 if i!=0 else 0 # We don't want to scale up
def GoogleTile(self, tx, ty, zoom):
"Converts TMS tile coordinates to Google Tile coordinates"
# coordinate origin is moved from bottom-left to top-left corner of the extent
return tx, (2**zoom - 1) - ty
def QuadTree(self, tx, ty, zoom ):
"Converts TMS tile coordinates to Microsoft QuadTree"
quadKey = ""
ty = (2**zoom - 1) - ty
for i in range(zoom, 0, -1):
digit = 0
mask = 1 << (i-1)
if (tx & mask) != 0:
digit += 1
if (ty & mask) != 0:
digit += 2
quadKey += str(digit)
return quadKey
#---------------------
class GlobalGeodetic(object):
"""
TMS Global Geodetic Profile
---------------------------
Functions necessary for generation of global tiles in Plate Carre projection,
EPSG:4326, "unprojected profile".
Such tiles are compatible with Google Earth (as any other EPSG:4326 rasters)
and you can overlay the tiles on top of OpenLayers base map.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Geodetic tiles?
Global Geodetic tiles are using geodetic coordinates (latitude,longitude)
directly as planar coordinates XY (it is also called Unprojected or Plate
Carre). We need only scaling to pixel pyramid and cutting to tiles.
Pyramid has on top level two tiles, so it is not square but rectangle.
Area [-180,-90,180,90] is scaled to 512x256 pixels.
TMS has coordinate origin (for pixels and tiles) in bottom-left corner.
Rasters are in EPSG:4326 and therefore are compatible with Google Earth.
LatLon <-> Pixels <-> Tiles
WGS84 coordinates Pixels in pyramid Tiles in pyramid
lat/lon XY pixels Z zoom XYZ from TMS
EPSG:4326
.----. ----
/ \ <-> /--------/ <-> TMS
\ / /--------------/
----- /--------------------/
WMS, KML Web Clients, Google Earth TileMapService
"""
def __init__(self, tileSize = 256):
self.tileSize = tileSize
def LatLonToPixels(self, lat, lon, zoom):
"Converts lat/lon to pixel coordinates in given zoom of the EPSG:4326 pyramid"
res = 180 / 256.0 / 2**zoom
px = (180 + lat) / res
py = (90 + lon) / res
return px, py
def PixelsToTile(self, px, py):
"Returns coordinates of the tile covering region in pixel coordinates"
tx = int( math.ceil( px / float(self.tileSize) ) - 1 )
ty = int( math.ceil( py / float(self.tileSize) ) - 1 )
return tx, ty
def Resolution(self, zoom ):
"Resolution (arc/pixel) for given zoom level (measured at Equator)"
return 180 / 256.0 / 2**zoom
#return 180 / float( 1 << (8+zoom) )
@staticmethod
def TileBounds(tx, ty, zoom):
"Returns bounds of the given tile"
res = 180 / 256.0 / 2**zoom
return (
tx*256*res - 180,
ty*256*res - 90,
(tx+1)*256*res - 180,
(ty+1)*256*res - 90
)
if __name__ == "__main__":
import sys
def Usage(s = ""):
print "Usage: globalmaptiles.py [-profile 'mercator'|'geodetic'] zoomlevel lat lon [latmax lonmax]"
print
if s:
print s
print
print "This utility prints for given WGS84 lat/lon coordinates (or bounding box) the list of tiles"
print "covering specified area. Tiles are in the given 'profile' (default is Google Maps 'mercator')"
print "and in the given pyramid 'zoomlevel'."
print "For each tile several information is printed including bonding box in EPSG:900913 and WGS84."
sys.exit(1)
profile = 'mercator'
zoomlevel = None
lat, lon, latmax, lonmax = None, None, None, None
boundingbox = False
argv = sys.argv
i = 1
while i < len(argv):
arg = argv[i]
if arg == '-profile':
i = i + 1
profile = argv[i]
if zoomlevel is None:
zoomlevel = int(argv[i])
elif lat is None:
lat = float(argv[i])
elif lon is None:
lon = float(argv[i])
elif latmax is None:
latmax = float(argv[i])
elif lonmax is None:
lonmax = float(argv[i])
else:
Usage("ERROR: Too many parameters")
i = i + 1
if profile != 'mercator':
Usage("ERROR: Sorry, given profile is not implemented yet.")
if zoomlevel == None or lat == None or lon == None:
Usage("ERROR: Specify at least 'zoomlevel', 'lat' and 'lon'.")
if latmax is not None and lonmax is None:
Usage("ERROR: Both 'latmax' and 'lonmax' must be given.")
if latmax != None and lonmax != None:
if latmax < lat:
Usage("ERROR: 'latmax' must be bigger then 'lat'")
if lonmax < lon:
Usage("ERROR: 'lonmax' must be bigger then 'lon'")
boundingbox = (lon, lat, lonmax, latmax)
tz = zoomlevel
mercator = GlobalMercator()
mx, my = mercator.LatLonToMeters( lat, lon )
print "Spherical Mercator (ESPG:900913) coordinates for lat/lon: "
print (mx, my)
tminx, tminy = mercator.MetersToTile( mx, my, tz )
if boundingbox:
mx, my = mercator.LatLonToMeters( latmax, lonmax )
print "Spherical Mercator (ESPG:900913) cooridnate for maxlat/maxlon: "
print (mx, my)
tmaxx, tmaxy = mercator.MetersToTile( mx, my, tz )
else:
tmaxx, tmaxy = tminx, tminy
for ty in range(tminy, tmaxy+1):
for tx in range(tminx, tmaxx+1):
tilefilename = "%s/%s/%s" % (tz, tx, ty)
print tilefilename, "( TileMapService: z / x / y )"
gx, gy = mercator.GoogleTile(tx, ty, tz)
print "\tGoogle:", gx, gy
quadkey = mercator.QuadTree(tx, ty, tz)
print "\tQuadkey:", quadkey, '(',int(quadkey, 4),')'
bounds = mercator.TileBounds( tx, ty, tz)
print
print "\tEPSG:900913 Extent: ", bounds
wgsbounds = mercator.TileLatLonBounds( tx, ty, tz)
print "\tWGS84 Extent:", wgsbounds
print "\tgdalwarp -ts 256 256 -te %s %s %s %s %s %s_%s_%s.tif" % (
bounds[0], bounds[1], bounds[2], bounds[3], "<your-raster-file-in-epsg900913.ext>", tz, tx, ty)
print
| |
#!/usr/bin/env python3
# -*- coding: utf8 -*-
# This file is a part of DynDNSUpdate
#
# Copyright (c) 2014-2018 Pierre GINDRAUD
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""DynDNSUpdate program
A simple DynDNS client
"""
# System imports
import argparse
from base64 import b64encode
import http.client
import logging
import re
import socket
import ssl
import sys
import urllib
# Global project declarations
__version__ = '2.0.0'
class DynDNSUpdate(object):
"""An instance of a dyn client
This class represent a instance of a dyn dns client until it make
his http query to update a remote dns server entry
"""
# define the http protocol string
REG_E_PROTO = 'https?'
# match a exact ipv4 address
REG_E_IPV4 = '(?:(?:25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9][0-9]|[0-9])\.){3}(?:25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9][0-9]|[0-9])'
# according to RFC 1123 define an hostname
REG_E_HOST = '(?:(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*(?:[A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])'
# match the exact value of a port number
REG_E_PORT = '(?:[0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])'
# match a resource's path
REG_E_PATH = '/(?:(?:[a-zA-Z0-9-_~.%]+/?)*)?'
# match some http parameters
REG_E_QUERY = '\?(?:&?[a-zA-Z0-9-_~.%]+=?[a-zA-Z0-9-_~.%]*)+'
# an URL is defined by :
# PROTO+AUTH+IP|HOST+PORT+PATH+QUERY
REG_E_URL = ('^(?P<url>(?:(?P<proto>' + REG_E_PROTO + ')://)?' + # HTTP
'(?P<host>' + REG_E_IPV4 + '|' + REG_E_HOST + ')' + # HOST or IP
'(?P<port>:' + REG_E_PORT + ')?' + # PORT
'(?P<path>' + REG_E_PATH + ')?' + # PATH
')$')
# an ip address is version 4
REG_E_IP = '^(?P<ipv4>' + REG_E_IPV4 + ')$' # IP matching
# re match object
RE_URL = re.compile(REG_E_URL)
RE_IP = re.compile(REG_E_IP)
def __init__(self):
"""Constructor : Build an launcher for dynupdate
"""
# Network required
self.__server_url = None
self.__server_api_url = '/nic/update'
self.__server_username = None
self.__server_password = None
self.__tls_insecure = False
# The HTTP timeout
self.__timeout = 5
# init logger
self.__logger = logging.getLogger('dynupdate')
self.__logger.setLevel(logging.DEBUG)
# remove all previously defined handlers
for handler in self.__logger.handlers:
self.__logger.removeHandler(handler)
# default format for all handlers
out_formatter = logging.Formatter("%(levelname)s [%(name)s] : %(message)s")
# register stdout handler
self.__logger_stdout = logging.StreamHandler(sys.stdout)
self.__logger_stdout.setFormatter(out_formatter)
self.__logger_stdout.setLevel(logging.INFO)
self.__logger.addHandler(self.__logger_stdout)
# register stderr handler
self.__logger_stderr = logging.StreamHandler(sys.stderr)
self.__logger_stderr.setFormatter(out_formatter)
self.__logger_stderr.setLevel(logging.CRITICAL+1)
self.__logger.addHandler(self.__logger_stderr)
# DYNDNS protocol
# for detail see https://help.dyn.com/remote-access-api/perform-update/
self.__fields = dict()
# Identify update type
# "dyndns", "statdns"
self.__fields['system'] = 'dyndns'
# A comma separated list of host to update (max 20)
self.__fields['hostname'] = ''
# The IP address to set.
# If not set or incorrect the server will choose himself an IP
self.__fields['myip'] = ''
# Parameter enables or disables wildcards for this host.
# Values : "ON","NOCHG","OFF"
self.__fields['wildcard'] = 'NOCHG'
# Specify an eMail eXchanger
self.__fields['mx'] = ''
# Requests the MX in the previous parameter to be set up as a backup MX
# by listing the host itself as an MX with a lower preference value.
# Values : "ON","NOCHG","OFF"
self.__fields['backmx'] = 'NOCHG'
# Set the hostname to offline mode
# "YES" turn on offline redirect for host
# "NOCHG" no make change
self.__fields['offline'] = 'NOCHG'
# No already use
self.__fields['url'] = ''
def configure(self, **options):
"""Parse input main program options (restrict to program strict execution)
@param[dict] options : array of option key => value
"""
if 'verbose' in options:
if options['verbose'] < 0:
self.__logger_stdout.setLevel(logging.CRITICAL + 1)
else:
self.__logger_stdout.setLevel(logging.INFO - options['verbose']*10)
self.__logger.debug('configured with args %s', options)
if 'errors_to_stderr' in options and options['errors_to_stderr']:
self.__logger_stderr.setLevel(logging.ERROR)
# disable SSL certificate verification
if 'tls_insecure' in options and options['tls_insecure']:
self.__tls_insecure = True
# http timeout
if 'timeout' in options and options['timeout']:
self.__timeout = options['timeout']
# http settings
if 'server_url' in options and options['server_url']:
match = DynDNSUpdate.RE_URL.match(options['server_url'])
if match:
self.__server_url = match.groupdict()
else:
self.__logger.error('given server url "%s" is incorrect', options['server_url'])
return False
if 'server_api_url' in options and options['server_api_url']:
self.__server_api_url = options['server_api_url']
if 'server_username' in options and options['server_username']:
self.__server_username = options['server_username']
if 'server_password' in options and options['server_password']:
self.__server_password = options['server_password']
# dyn dns parsing
if 'dyndns_myip' in options and options['dyndns_myip']:
match = DynDNSUpdate.RE_IP.match(options['dyndns_myip'])
if match:
self.__fields['myip'] = match.group('ipv4')
else:
self.__logger.error('given ip address "%s" is incorrect', options['dyndns_myip'])
return False
if 'dyndns_hostname' in options and options['dyndns_hostname']:
hostnames = options['dyndns_hostname']
if isinstance(hostnames, list):
hostnames = ','.join(hostnames)
self.__fields['hostname'] = hostnames
if 'dyndns_wildcard' in options and options['dyndns_wildcard']:
if options['dyndns_wildcard'] in ['ON', 'OFF', 'NOCHG']:
self.__logger.warning('Deprecated: Flag wildcard can be currently ignored')
self.__fields['wildcard'] = options['dyndns_wildcard']
else:
self.__logger.error('Incorrect value for dyndns_wildcard option')
return False
return True
#
# if opt[0] == '--backmx':
# self.__fields['backmx'] = 'YES'
# if opt[0] == '--no-backmx':
# self.__fields['backmx'] = 'NO'
# if opt[0] == '--offline':
# self.__fields['offline'] = 'YES'
# if opt[0] == '--static':
# self.__fields['system'] = 'statdns'
# if opt[0] == '--url':
# self.__fields['url'] = opt[1]
def main(self):
"""Entry point of the program
"""
if not self.__server_url:
self.__logger.error('Missing required setting "server_url" in configure()')
return 3
for required_field in ['myip', 'hostname']:
if not self.__fields[required_field]:
self.__logger.error('Missing required setting "%s" in configure()', required_field)
return 3
self.__logger.debug('debug: config fields ' + str(self.__fields))
return int(not self.__query())
def __query(self):
"""Forge and send the HTTP GET query
@return[integer] : True if query success
False otherwise
"""
url_parts = self.__server_url
host = url_parts['host']
port = None
if url_parts['port']:
port = url_parts['port']
# PROTOCOL
if not url_parts['proto'] or url_parts['proto'] == 'http':
self.__logger.debug('-> protocol HTTP')
if port is None:
port = http.client.HTTP_PORT
conn = http.client.HTTPConnection(host, port, timeout=self.__timeout)
elif url_parts['proto'] == 'https':
self.__logger.debug('-> protocol HTTPs')
if port is None:
port = http.client.HTTPS_PORT
if self.__tls_insecure:
context = ssl._create_unverified_context()
self.__logger.debug('-> SSL certificate verification is DISABLED')
else:
context = None
conn = http.client.HTTPSConnection(host, port,
timeout=self.__timeout,
context=context)
else:
self.__logger.error('Found unmanaged url protocol : "%s" ignoring url', url_parts['proto'])
return False
# /PROTOCOL
# HEADER
# build the header dict
headers = {'User-Agent': 'dyndns-update/' + __version__}
# authentification
if self.__server_username and self.__server_password:
# build the auth string
auth_str = self.__server_username + ':' + self.__server_password
# encode it as a base64 string to put in http header
auth = b64encode(auth_str.encode()).decode("ascii")
# fill the header
headers['Authorization'] = 'Basic ' + auth
self.__logger.debug('-> authentication enabled')
else:
self.__logger.debug('-> authentication disabled')
# /HEADER
# URL
dyndns_params = urllib.parse.urlencode(sorted(self.__fields.items()))
url = '{base_url}{api_path}?{params}'.format(base_url=url_parts['url'].rstrip('/'),
api_path=self.__server_api_url,
params=dyndns_params)
self.__logger.debug('set final url to "%s"', url)
# /URL
try:
conn.request('GET', url, headers=headers)
res = conn.getresponse()
data = res.read().decode()
except socket.gaierror as e:
self.__logger.debug('=> unable to resolve hostname %s', str(e))
return False
except ssl.SSLError as e:
self.__logger.debug('=> unable to validate the host\'s certifcate.' +
' You can override this by using --insecure')
return False
except socket.error as e:
self.__logger.debug('=> unable to connect to host %s', str(e))
return False
except http.client.HTTPException:
self.__logger.debug('=> error with HTTP query')
return False
except Exception as e:
self.__logger.error('Unhandled python exception please inform the developper %s', str(e))
return False
finally:
conn.close()
self.__logger.debug('get HTTP status code : %d %s', res.status, res.reason)
self.__logger.debug('get HTTP data : "%s"', data)
# authentication missing error
if res.status == 401:
self.__logger.debug('=> the server may require an authentification')
self.__logger.error('The server at url "%s" may require an authentification', url_parts['url'])
return False
elif res.status in [200]:
self.__logger.info('Successfully updated')
return True
return False
##
# Run launcher as the main program
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
argument_default=argparse.SUPPRESS,
description='DynDNS version v' + __version__ + """ --
Use DYNDNS protocol to update a dynhost with a new ip address""")
# required arguments
parser.add_argument('--dyn-address', action='store', dest='dyndns_myip',
help='set the IP address to use for update')
parser.add_argument('--dyn-hostname', action='append', dest='dyndns_hostname',
help='set the hostname of the dyn host to update. This is the DNS domain name that point to the ip address')
parser.add_argument('--dyn-server', action='store', dest='server_url',
help='set the dyndns server address that contains the zone to update')
# optional arguments
parser.add_argument('-u', '--username', action='store', dest='server_username',
help='username to use for http authentication')
parser.add_argument('-p', '--password', action='store', dest='server_password',
help='password to use for http authentication')
parser.add_argument('--api-url', action='store', dest='server_api_url', default='/nic/update',
help='url endpoint to which send http query parameters')
# dyn dns protocol
backmx_group = parser.add_mutually_exclusive_group()
backmx_group.add_argument('--backmx', action='store_true', dest='dyndns_backmx',
help='set backupmx option to YES')
backmx_group.add_argument('--no-backmx', action='store_false', dest='dyndns_backmx',
help='set backupmx option to NO')
wildcard_group = parser.add_mutually_exclusive_group()
wildcard_group.add_argument('--wildcard', action='store_const', const='ON', dest='dyndns_wildcard',
help='set wildcard option to ON')
wildcard_group.add_argument('--no-wildcard', action='store_const', const='OFF', dest='dyndns_wildcard',
help='set wildcard option to OFF')
parser.add_argument('--url', action='store', dest='dyndns_url',
help='url endpoint to which send http query parameters')
# DynDNS protocol features :
# --offline set dyndns to offline mode (Default: """ + self.__fields['offline'] + """)
# --static set static dns system (Default system : """ + self.__fields['system'] + """)
parser.add_argument('-t', '--timeout', action='store', dest='timeout', default=5,
help='The HTTP timeout in seconds for all requests')
parser.add_argument('--insecure', action='store', dest='tls_insecure', default=False,
help='Disable TLS certificate verification for secure connexions')
logging_group = parser.add_mutually_exclusive_group()
logging_group.add_argument('--no-output', action='store_const', dest='verbose', const=-1,
help='Disable all output message to stdout. (cron mode)')
logging_group.add_argument('-v', '--verbose', action='count', dest='verbose',
help='Show more running messages')
parser.add_argument('--errors-to-stderr', action='store_true', dest='errors_to_stderr',
help='Copy errors to stderr')
parser.add_argument('-V', '--version', action='store_true', dest='show_version', default=False,
help='Print the version and exit')
args = parser.parse_args()
if args.show_version:
print("DynDNS client version v" + __version__)
sys.exit(0)
program = DynDNSUpdate()
if not program.configure(**vars(args)):
sys.exit(2)
sys.exit(program.main())
# Return code :
# 0 Success
# 1 Other errors during running
# 2 Bad argument
# 3 Missing required argument
# 10 Error during HTTP query
# 11 Authentification needed
| |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper classes to implement caching."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import collections
import datetime
import logging
import sys
import threading
import unittest
import appengine_config
from models.counters import PerfCounter
def iter_all(query, batch_size=100):
"""Yields query results iterator. Proven method for large datasets."""
prev_cursor = None
any_records = True
while any_records:
any_records = False
query = query.with_cursor(prev_cursor)
for entity in query.run(batch_size=batch_size):
any_records = True
yield entity
prev_cursor = query.cursor()
class AbstractScopedSingleton(object):
"""A singleton object bound to and managed by a container.
This singleton stores its instance inside the container. When container is
wiped, the singleton instance is garbage collected and destroyed. You can
use a dict as a container and then wipe it yourself. You can use
threading.local as a container and it will be wiped automatically when
thread exits.
"""
CONTAINER = None
@classmethod
def _instances(cls):
assert cls.CONTAINER is not None
if 'instances' not in cls.CONTAINER:
cls.CONTAINER['instances'] = {}
return cls.CONTAINER['instances']
@classmethod
def instance(cls, *args, **kwargs):
"""Creates new or returns existing instance of the object."""
# pylint: disable=protected-access
_instance = cls._instances().get(cls)
if not _instance:
try:
_instance = cls(*args, **kwargs)
except:
logging.exception(
'Failed to instantiate %s: %s, %s', cls, args, kwargs)
raise
appengine_config.log_appstats_event('%s.create' % cls.__name__, {})
_instance._init_args = (args, kwargs)
cls._instances()[cls] = _instance
else:
_before = _instance._init_args
_now = (args, kwargs)
if _now != _before:
raise AssertionError(
'Singleton initiated with %s already exists. '
'Failed to re-initialize it with %s.' % (_before, _now))
return _instance
@classmethod
def clear_all(cls):
"""Clear all active instances."""
if cls._instances():
for _instance in list(cls._instances().values()):
_instance.clear()
del cls.CONTAINER['instances']
@classmethod
def clear_instance(cls):
"""Destroys the instance of this cls."""
appengine_config.log_appstats_event(
'%s.destroy' % cls.__name__, {})
_instance = cls._instances().get(cls)
if _instance:
del cls._instances()[cls]
def clear(self):
"""Destroys this object and its content."""
appengine_config.log_appstats_event(
'%s.destroy' % self.__class__.__name__, {})
_instance = self._instances().get(self.__class__)
if _instance:
del self._instances()[self.__class__]
_process_scoped_singleton = {}
_request_scoped_singleton = threading.local()
class ProcessScopedSingleton(AbstractScopedSingleton):
"""A singleton object bound to the process."""
CONTAINER = _process_scoped_singleton
class RequestScopedSingleton(AbstractScopedSingleton):
"""A singleton object bound to the request scope."""
CONTAINER = _request_scoped_singleton.__dict__
class LRUCache(object):
"""A dict that supports capped size and LRU eviction of items."""
def __init__(
self, max_item_count=None,
max_size_bytes=None, max_item_size_bytes=None):
assert max_item_count or max_size_bytes
if max_item_count:
assert max_item_count > 0
if max_size_bytes:
assert max_size_bytes > 0
self.total_size = 0
self.max_item_count = max_item_count
self.max_size_bytes = max_size_bytes
self.max_item_size_bytes = max_item_size_bytes
self.items = collections.OrderedDict([])
def get_entry_size(self, key, value):
"""Computes item size. Override and compute properly for your items."""
return sys.getsizeof(key) + sys.getsizeof(value)
def _compute_current_size(self):
total = 0
for key, item in self.items.iteritems():
total += sys.getsizeof(key) + self.get_item_size(item)
return total
def _allocate_space(self, key, value):
"""Remove items in FIFO order until size constraints are met."""
entry_size = self.get_entry_size(key, value)
if self.max_item_size_bytes and entry_size > self.max_item_size_bytes:
return False
while True:
over_count = False
over_size = False
if self.max_item_count:
over_count = len(self.items) >= self.max_item_count
if self.max_size_bytes:
over_size = self.total_size + entry_size >= self.max_size_bytes
if not (over_count or over_size):
if self.max_size_bytes:
self.total_size += entry_size
assert self.total_size < self.max_size_bytes
return True
if self.items:
_key, _value = self.items.popitem(last=False)
if self.max_size_bytes:
self.total_size -= self.get_entry_size(_key, _value)
assert self.total_size >= 0
else:
break
return False
def _record_access(self, key):
"""Pop and re-add the item."""
item = self.items.pop(key)
self.items[key] = item
def contains(self, key):
"""Checks if item is contained without accessing it."""
assert key
return key in self.items
def put(self, key, value):
assert key
if self._allocate_space(key, value):
self.items[key] = value
return True
return False
def get(self, key):
"""Accessing item makes it less likely to be evicted."""
assert key
if key in self.items:
self._record_access(key)
return True, self.items[key]
return False, None
def delete(self, key):
assert key
if key in self.items:
del self.items[key]
return True
return False
class NoopCacheConnection(object):
"""Connection to no-op cache that provides no caching."""
def put(self, *unused_args, **unused_kwargs):
return None
def get(self, *unused_args, **unused_kwargs):
return False, None
def delete(self, *unused_args, **unused_kwargs):
return None
class AbstractCacheEntry(object):
"""Object representation while in cache."""
# we don't track deletions; deleted item will hang around this long
CACHE_ENTRY_TTL_SEC = 5 * 60
@classmethod
def internalize(cls, unused_key, *args, **kwargs):
"""Converts incoming objects into cache entry object."""
return (args, kwargs)
@classmethod
def externalize(cls, unused_key, *args, **kwargs):
"""Converts cache entry into external object."""
return (args, kwargs)
def has_expired(self):
age = (datetime.datetime.utcnow() - self.created_on).total_seconds()
return age > self.CACHE_ENTRY_TTL_SEC
def is_up_to_date(self, unused_key, unused_update):
"""Compare entry and the update object to decide if entry is fresh."""
raise NotImplementedError()
def updated_on(self):
"""Return last update time for entity."""
raise NotImplementedError()
class AbstractCacheConnection(object):
PERSISTENT_ENTITY = None
CACHE_ENTRY = None
@classmethod
def init_counters(cls):
name = cls.__name__
cls.CACHE_RESYNC = PerfCounter(
'gcb-models-%s-cache-resync' % name,
'A number of times an vfs cache was updated.')
cls.CACHE_PUT = PerfCounter(
'gcb-models-%s-cache-put' % name,
'A number of times an object was put into cache.')
cls.CACHE_GET = PerfCounter(
'gcb-models-%s-cache-get' % name,
'A number of times an object was pulled from cache.')
cls.CACHE_DELETE = PerfCounter(
'gcb-models-%s-cache-delete' % name,
'A number of times an object was deleted from cache.')
cls.CACHE_HIT = PerfCounter(
'gcb-models-%s-cache-hit' % name,
'A number of times an object was found cache.')
cls.CACHE_HIT_NONE = PerfCounter(
'gcb-models-%s-cache-hit-none' % name,
'A number of times an object was found cache, but it was None.')
cls.CACHE_MISS = PerfCounter(
'gcb-models-%s-cache-miss' % name,
'A number of times an object was not found in the cache.')
cls.CACHE_NOT_FOUND = PerfCounter(
'gcb-models-%s-cache-not-found' % name,
'A number of times an object was requested, but was not found in '
'the cache or underlying provider.')
cls.CACHE_UPDATE_COUNT = PerfCounter(
'gcb-models-%s-cache-update-count' % name,
'A number of update objects received.')
cls.CACHE_EVICT = PerfCounter(
'gcb-models-%s-cache-evict' % name,
'A number of times an object was evicted from cache because it was '
'changed.')
cls.CACHE_EXPIRE = PerfCounter(
'gcb-models-%s-cache-expire' % name,
'A number of times an object has expired from cache because it was '
'too old.')
@classmethod
def make_key_prefix(cls, ns):
return '%s:%s' % (cls.__name__, ns)
@classmethod
def make_key(cls, ns, entry_key):
return '%s:%s' % (cls.make_key_prefix(ns), entry_key)
@classmethod
def is_enabled(cls):
raise NotImplementedError()
@classmethod
def new_connection(cls, *args, **kwargs):
if not cls.is_enabled():
return NoopCacheConnection()
conn = cls(*args, **kwargs)
# pylint: disable=protected-access
conn.apply_updates(conn._get_incremental_updates())
return conn
def __init__(self, namespace):
"""Override this method and properly instantiate self.cache."""
self.namespace = namespace
self.cache = None
appengine_config.log_appstats_event(
'%s.connect' % self.__class__.__name__, {'namespace': namespace})
def apply_updates(self, updates):
"""Applies a list of global changes to the local cache."""
self.CACHE_RESYNC.inc()
for key, update in updates.iteritems():
_key = self.make_key(self.namespace, key)
found, entry = self.cache.get(_key)
if not found:
continue
if entry is None:
self.CACHE_EVICT.inc()
self.cache.delete(_key)
continue
if not entry.is_up_to_date(key, update):
self.CACHE_EVICT.inc()
self.cache.delete(_key)
continue
if entry.has_expired():
self.CACHE_EXPIRE.inc()
self.cache.delete(_key)
continue
def _get_most_recent_updated_on(self):
"""Get the most recent item cached. Datastore deletions are missed..."""
has_items = False
max_updated_on = datetime.datetime.fromtimestamp(0)
prefix = self.make_key_prefix(self.namespace)
for key, entry in self.cache.items.iteritems():
if not key.startswith(prefix):
continue
has_items = True
if not entry:
continue
updated_on = entry.updated_on()
if not updated_on: # old entities may be missing this field
updated_on = datetime.datetime.fromtimestamp(0)
if updated_on > max_updated_on:
max_updated_on = updated_on
return has_items, max_updated_on
def get_updates_when_empty(self):
"""Override this method to pre-load cache when it's completely empty."""
return {}
def _get_incremental_updates(self):
"""Gets a list of global changes older than the most recent item cached.
WARNING!!! We fetch the updates since the timestamp of the oldest item
we have cached so far. This will bring all objects that have changed or
were created since that time.
This will NOT bring the notifications about object deletions. Thus cache
will continue to serve deleted objects until they expire.
Returns:
a dict of {key: update} objects that represent recent updates
"""
has_items, updated_on = self._get_most_recent_updated_on()
if not has_items:
return self.get_updates_when_empty()
q = self.PERSISTENT_ENTITY.all()
if updated_on:
q.filter('updated_on > ', updated_on)
result = {
entity.key().name(): entity for entity in iter_all(q)}
self.CACHE_UPDATE_COUNT.inc(len(result.keys()))
return result
def put(self, key, *args):
self.CACHE_PUT.inc()
self.cache.put(
self.make_key(self.namespace, key),
self.CACHE_ENTRY.internalize(key, *args))
def get(self, key):
self.CACHE_GET.inc()
_key = self.make_key(self.namespace, key)
found, entry = self.cache.get(_key)
if not found:
self.CACHE_MISS.inc()
return False, None
if not entry:
self.CACHE_HIT_NONE.inc()
return True, None
if entry.has_expired():
self.CACHE_EXPIRE.inc()
self.cache.delete(_key)
return False, None
self.CACHE_HIT.inc()
return True, self.CACHE_ENTRY.externalize(key, entry)
def delete(self, key):
self.CACHE_DELETE.inc()
self.cache.delete(self.make_key(self.namespace, key))
class LRUCacheTests(unittest.TestCase):
def test_ordereddict_works(self):
_dict = collections.OrderedDict([])
_dict['a'] = '1'
_dict['b'] = '2'
_dict['c'] = '3'
self.assertEqual(('a', '1'), _dict.popitem(last=False))
self.assertEqual(('c', '3'), _dict.popitem(last=True))
def test_initialization(self):
with self.assertRaises(AssertionError):
LRUCache()
with self.assertRaises(AssertionError):
LRUCache(max_item_count=-1)
with self.assertRaises(AssertionError):
LRUCache(max_size_bytes=-1)
LRUCache(max_item_count=1)
LRUCache(max_size_bytes=1)
def test_evict_by_count(self):
cache = LRUCache(max_item_count=3)
self.assertTrue(cache.put('a', '1'))
self.assertTrue(cache.put('b', '2'))
self.assertTrue(cache.put('c', '3'))
self.assertTrue(cache.contains('a'))
self.assertTrue(cache.put('d', '4'))
self.assertFalse(cache.contains('a'))
self.assertEquals(cache.get('a'), (False, None))
def test_evict_by_count_lru(self):
cache = LRUCache(max_item_count=3)
self.assertTrue(cache.put('a', '1'))
self.assertTrue(cache.put('b', '2'))
self.assertTrue(cache.put('c', '3'))
self.assertEquals(cache.get('a'), (True, '1'))
self.assertTrue(cache.put('d', '4'))
self.assertTrue(cache.contains('a'))
self.assertFalse(cache.contains('b'))
def test_evict_by_size(self):
min_size = sys.getsizeof(LRUCache(max_item_count=1).items)
item_size = sys.getsizeof('a1')
cache = LRUCache(max_size_bytes=min_size + 3 * item_size)
self.assertTrue(cache.put('a', '1'))
self.assertTrue(cache.put('b', '2'))
self.assertTrue(cache.put('c', '3'))
self.assertFalse(cache.put('d', bytearray(1000)))
def test_evict_by_size_lru(self):
cache = LRUCache(max_size_bytes=5000)
self.assertTrue(cache.put('a', bytearray(4500)))
self.assertTrue(cache.put('b', '2'))
self.assertTrue(cache.put('c', '3'))
self.assertTrue(cache.contains('a'))
self.assertTrue(cache.put('d', bytearray(1000)))
self.assertFalse(cache.contains('a'))
self.assertTrue(cache.contains('b'))
def test_max_item_size(self):
cache = LRUCache(max_size_bytes=5000, max_item_size_bytes=1000)
self.assertFalse(cache.put('a', bytearray(4500)))
self.assertEquals(cache.get('a'), (False, None))
self.assertTrue(cache.put('a', bytearray(500)))
found, _ = cache.get('a')
self.assertTrue(found)
class SingletonTests(unittest.TestCase):
def test_singleton(self):
class A(RequestScopedSingleton):
def __init__(self, data):
self.data = data
class B(RequestScopedSingleton):
def __init__(self, data):
self.data = data
# TODO(psimakov): prevent direct instantiation
A('aaa')
B('bbb')
# using instance() creates and returns the same instance
RequestScopedSingleton.clear_all()
a = A.instance('bar')
b = A.instance('bar')
assert a.data == 'bar'
assert b.data == 'bar'
assert a is b
# re-initialization fails if arguments differ
RequestScopedSingleton.clear_all()
a = A.instance('dog')
try:
b = A.instance('cat')
raise Exception('Expected to fail.')
except AssertionError:
pass
# clearing one keep others
RequestScopedSingleton.clear_all()
a = A.instance('bar')
b = B.instance('cat')
a.clear()
c = B.instance('cat')
assert c is b
# clearing all clears all
RequestScopedSingleton.clear_all()
a = A.instance('bar')
b = B.instance('cat')
RequestScopedSingleton.clear_all()
c = A.instance('bar')
d = B.instance('cat')
assert a is not c
assert b is not d
def run_all_unit_tests():
"""Runs all unit tests in this module."""
suites_list = []
for test_class in [LRUCacheTests, SingletonTests]:
suite = unittest.TestLoader().loadTestsFromTestCase(test_class)
suites_list.append(suite)
unittest.TextTestRunner().run(unittest.TestSuite(suites_list))
if __name__ == '__main__':
run_all_unit_tests()
| |
#!/bin/py
#
# interpolate over data field with 2d polynomial fit
#
# fit a 2D, 3rd order polynomial to data
# estimate the 16 coefficients using all of your data points.
#
# http://stackoverflow.com/questions/18832763/drawing-directions-fields
#
#
import numpy as np
import matplotlib
matplotlib.use('Agg')
import itertools
import matplotlib.pyplot as plt
from scipy import integrate
from scipy.integrate import ode
hprime = -12
Rprime = 3.0
def load_ell():
#
# Generate Data from ellipses
#
h = hprime
thetaf = 5*np.pi/180.
R = Rprime
a = -(h-R)*1
miny = 0.4
maxy = Rprime
#
# create data
#
space = 0.02
y0 = np.arange(maxy,miny,-space)
x0 = np.sqrt(R*R-y0*y0)
theta0 = np.arctan2(y0,x0)
thetafy = thetaf*(R-y0)/R
#thetafy = thetaf*np.arccos(y0/R)/2.
thetam = theta0-np.pi/2-thetafy
m = np.tan(thetam)
k = (y0 + a*a*m/(x0-h) - m*(x0-h))
bs = -a*a*m*(y0-k)/(x0-h)
b = np.sqrt(bs)
xl = []
yl = []
zl = []
print 'y0 ', y0
print 'x0 ', x0
print 'b/a: ',b/a
fudge = 0.05
dx_space=0.2
for i in xrange(len(k)):
dx = np.arange(h,x0[i],dx_space)
xl = xl + dx.tolist()
dy = (b[i]*np.sqrt(1-((dx-h)/(a))**2))+k[i]
yl = yl + dy.tolist()
#zl.append(np.arctan(dy/dx))
if(i == 0):
m = np.zeros(len(dy))
else:
m = b[i]*b[i]*(dx-h)/((dy-k[i])*(a*a))
zl = zl + m.tolist()
#
# convert to numpy array
#
x = np.asarray(xl)
y = np.asarray(yl)
z = np.asarray(zl)
#
# steady as she goes
#
return x,y,z
def vf(t,x,m):
#
# Vector field function
#
dx=np.zeros(2)
zz = polyval2d(x[0], x[1], m)
theta = np.arctan(zz)
dx[0]=np.cos(theta)
dx[1]=-np.sin(theta)
#dx[1]=x[0]**2-x[0]-2
#polyval2d(xx, yy, m)
#dx[1]=polyval2d(xx, yy, m)
return dx
def arr(m):
#
# Solution curves
#
h = hprime
ic=[[h,6],[h,8.5],[h,12]]
end = [0.5,2,4.3]
t0=0; dt=0.1;
r = ode(vf).set_integrator('vode', method='bdf',max_step=dt)
for k in range(len(ic)):
tEnd=np.sqrt(ic[k][0]**2 + ic[k][1]**2)+end[k]
Y=[];T=[];S=[];
r.set_initial_value(ic[k], t0).set_f_params(m)
while r.successful() and r.t +dt < tEnd:
r.integrate(r.t+dt)
Y.append(r.y)
S=np.array(np.real(Y))
plt.plot(S[:,0],S[:,1], color = 'red', lw = 4.25)
def polyfit2d(x, y, z, order=5):
ncols = (order + 1)**2
G = np.zeros((x.size, ncols))
ij = itertools.product(range(order+1), range(order+1))
for k, (i,j) in enumerate(ij):
G[:,k] = x**i * y**j
#
# http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.linalg.lstsq.html
#
cnd=1e-5
#m, _, _, _ = np.linalg.lstsq(G, z,rcond=cnd)
m, _, _, _ = np.linalg.lstsq(G, z)
return m
def polyval2d(x, y, m):
order = int(np.sqrt(len(m))) - 1
ij = itertools.product(range(order+1), range(order+1))
z = np.zeros_like(x)
for a, (i,j) in zip(m, ij):
tmp = a * x**i * y**j
z += tmp
#print a,i,j,tmp,z
return z
def polyval2d_disp(x, y, m):
order = int(np.sqrt(len(m))) - 1
ij = itertools.product(range(order+1), range(order+1))
z = np.zeros_like(x)
for a, (i,j) in zip(m, ij):
tmp = a * x**i * y**j
z += tmp
print a,i,j,tmp,z
return z
#
#
#
def poly_disp_fparse(m):
print "#"
print "# TOP VANES: Polynomial Interpolation Function"
print "#"
print "top_slope_func = '"
order = int(np.sqrt(len(m))) - 1
ij = itertools.product(range(order+1), range(order+1))
for a, (i,j) in zip(m, ij):
if( (i+1)*(j+1) != len(m)):
print ' %.15f * x^%i * y^%i +' % (a,i,j )
else:
print " %.15f * x^%i * y^%i'" % (a,i,j )
print
return 0
#
#
#
def poly_disp_py(m):
print "#"
print "# Polynomial Interpolation Function"
print "# For python"
print "return ",
order = int(np.sqrt(len(m))) - 1
ij = itertools.product(range(order+1), range(order+1))
for a, (i,j) in zip(m, ij):
if( (i+1)*(j+1) != len(m)):
print '%.15f * x**%i * y**%i +' % (a,i,j ),
else:
print "%.15f * x**%i * y**%i" % (a,i,j ),
print
return 0
#
#
#
def poly_disp_py_line(m):
print "#"
print "# Polynomial Interpolation Function"
print "# For python"
order = int(np.sqrt(len(m))) - 1
ij = itertools.product(range(order+1), range(order+1))
for a, (i,j) in zip(m, ij):
if( (i+1)*(j+1) != len(m)):
print ' tmp += %.15f * x**%i * y**%i' % (a,i,j )
print ' print tmp'
else:
print " tmp += %.15f * x**%i * y**%i" % (a,i,j )
print ' print tmp'
print
return 0
def load_ex():
#
# Generate Example Data
#
numdata = 100
x = np.random.random(numdata)
y = np.random.random(numdata)
#
# silly fake function for z
#
z = x**2 + y**2 + 3*x**3 + y + np.random.random(numdata)
return x,y,z
#
# main function: execute
#
def main():
#
# load data in
#
x,y,z=load_ell()
#x,y,z=load_ex()
#
# Fit polynomial
#
m = polyfit2d(x,y,z)
#
# Evaluate it on a grid...
#
nx, ny = 200, 200
xx, yy = np.meshgrid(np.linspace(x.min(), x.max(), nx),
np.linspace(y.min(), y.max(), ny))
zz = polyval2d(xx, yy, m)
#
# m is a matrix of polynomial values...
# e.g.
#
# Plot!
#
arr(m)
#
# extent: [ None | (x0,x1,y0,y1) ]
#
plt.imshow(zz, extent=(x.min(), x.max(), y.min(), y.max()))
plt.colorbar()
plt.scatter(x, y, c=z)
plt.title("Elliptic Vane Interpolation")
plt.xlim([-6,3])
plt.ylim([0,6])
plt.xlabel('Streamwise (x)')
plt.ylabel('Spanwise (y)')
# add circle
R = Rprime
circle=plt.Circle((0,0),R,color='black',fill=False,linewidth=4)
fig = plt.gcf()
fig.gca().add_artist(circle)
plt.savefig('interp_top.png')
#
# output polynomial for input
#
poly_disp_fparse(m)
#poly_disp_py_line(m)
#print
#print polyval2d_disp(-5.5, -3.5, m)
#
# EXECUTE
#
#main()
#
# nick
# 1/30/16
#
# http://stackoverflow.com/questions/7997152/python-3d-polynomial-surface-fit-order-dependent
#
| |
# Lots of this code is based off of the sprite animation answer from https://stackoverflow.com/questions/14044147/animated-sprite-from-few-images
#todo bitedown animation. Score display, health bar-display, link to val of health, create game gui- newgame,exit,
import os
import pygame
import random
pygame.init()
SIZE = WIDTH, HEIGHT = 1200, 800
FPS = 60
screen = pygame.display.set_mode(SIZE)
clock = pygame.time.Clock()
soldierList = []
difficulty = 1
def load_images(path):
"""
Loads all images in directory. The directory must only contain images.
Args:
path: The relative or absolute path to the directory to load images from.
Returns:
List of images.
"""
images = []
for file_name in os.listdir(path):
image = pygame.image.load(path + os.sep + file_name).convert()
images.append(image)
return images
class AnimatedSprite(pygame.sprite.Sprite):
def __init__(self, position, images):
"""
Animated sprite object.
Args:
position: x, y coordinate on the screen to place the AnimatedSprite.
images: Images to use in the animation.
"""
super(AnimatedSprite, self).__init__()
size = (225, 133) # This should match the size of the images.
self.rect = pygame.Rect(position, size)
self.images = images
self.images_right = images
self.images_left = [pygame.transform.flip(image, True, False) for image in images] # Flipping every image.
self.index = 0
self.image = images[self.index].convert_alpha() # 'image' is the current image of the animation.
self.velocity = pygame.math.Vector2(0, 0)
self.animation_time = 0.1
self.current_time = 0
self.animation_frames = 6
self.current_frame = 0
def update_time_dependent(self, dt, player):
"""
Updates the image of Sprite approximately every 0.1 second.
Args:
dt: Time elapsed between each frame.
"""
if self.velocity.x > 0: # Use the right images if sprite is moving right.
self.images = self.images_right
elif self.velocity.x < 0:
self.images = self.images_left
self.current_time += dt
if self.current_time >= self.animation_time:
self.current_time = 0
self.index = (self.index + 1) % len(self.images)
self.image = self.images[self.index]
self.rect.move_ip(*self.velocity)
for i in soldierList:
if player.rect.x >= i.rect.x:
i.images = i.images_right
elif player.rect.x < i.rect.x:
i.images = i.images_left
def update(self, dt, player):
"""This is the method that's being called when 'all_sprites.update(dt)' is called."""
self.update_time_dependent(dt, player)
def eat(self, player):
deadSoldiers=[]
playerX = player.rect.x
global soldierList
for i in soldierList:
if player.images == player.images_right:
print(player.images == player.images_right)
if abs(i.rect.x-225 - playerX) < 50 :
deadSoldiers.append(i)
else:
print(player.images == player.images_right)
if abs(i.rect.x - playerX) < 50 :
deadSoldiers.append(i)
return deadSoldiers
def main():
global soldierList
global difficulty
images = load_images(path='img/dino') # Make sure to provide the relative or full path to the images directory.
player = AnimatedSprite(position=(100, 575), images=images)
playerEatImg = load_images(path='img/eat')
bgImg = load_images(path='img/bg')
bg = AnimatedSprite(position=(0, 0), images=bgImg)
soldierImg = load_images(path='img/soldier')
def addSoldier():
for i in range(difficulty):
soldierX = random.randint(50, 1150)
soldier = AnimatedSprite(position=(soldierX, 650), images=soldierImg)
soldierList.append(soldier)
addSoldier()
all_sprites = pygame.sprite.Group(bg, player,soldierList) # Creates a sprite group and adds 'player' to it.
def dinoEatAnim(player):
playerEat = AnimatedSprite(position=(player.rect.x, 575), images=playerEatImg)
if player.images == player.images_right:
playerEat.images = playerEat.images_right
else:
playerEat.images = playerEat.images_left
return playerEat
running = True
SCORE = 0
health = 100
myCounter = 0
eatTimer = 50
eating = False
while running:
# print('x=', player.rect.x)
if player.rect.x<=0 or player.rect.x>=975:
player.velocity.x =0
dt = clock.tick(FPS) / 1000 # Amount of seconds between each loop.
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
player.velocity.x = 4
elif event.key == pygame.K_LEFT:
player.velocity.x = -4
elif event.key == pygame.K_SPACE:
deadSoldiers = player.eat(player)
newList = []
for e in soldierList:
if e not in deadSoldiers:
newList.append(e)
playerEat = dinoEatAnim(player)
soldierList = newList
all_sprites = pygame.sprite.Group(bg, playerEat,soldierList)
eating = True
SCORE += len(deadSoldiers)
#print(SCORE)
elif event.type == pygame.KEYUP:
if event.key == pygame.K_RIGHT or event.key == pygame.K_LEFT:
player.velocity.x = 0
if eating == True:
eatTimer -= 1
print('eatin = true: ', eatTimer)
if eatTimer == 0:
all_sprites = pygame.sprite.Group(bg, player,soldierList)
eatTimer =50
eating = False
myCounter += 1
if (myCounter % 100 == 0):
addSoldier()
#shoot
#all_sprites = pygame.sprite.Group(bg, player,soldierList)
if myCounter == 2000:
difficulty += 1
myCounter = 0
all_sprites.update(dt, player) # Calls the 'update' method on all sprites in the list (currently just the player).
all_sprites.draw(screen)
pygame.display.update()
if __name__ == '__main__':
main()
| |
from coco.core.auth.checks import login_allowed
from coco.core.models import BackendGroup, Notification
from coco.web.api_client_proxy import get_httpclient_instance
from coco.web.views._messages import api_error_message
from django.contrib import messages
from django.contrib.auth.decorators import user_passes_test
from django.contrib.auth.models import Group, User
from django.db import IntegrityError
from django.shortcuts import redirect, render
from slumber.exceptions import HttpClientError
@user_passes_test(login_allowed)
def index(request):
"""
Groups listing/index.
"""
client = get_httpclient_instance(request)
users = client.users.get()
collab_groups = client.collaborationgroups.get()
new_notifications_count = len(client.notificationlogs.unread.get())
for group in collab_groups:
group["member_ids"] = [member.id for member in group.members]
return render(request, 'web/collaborationgroups/index.html', {
'title': "Groups",
'groups': collab_groups,
'users': users,
'new_notifications_count': new_notifications_count
})
@user_passes_test(login_allowed)
def manage(request, group_id):
"""
Manage single group.
"""
client = get_httpclient_instance(request)
group = client.collaborationgroups(group_id).get()
members = group.members
users = client.users.get()
group["member_ids"] = [member.id for member in members]
new_notifications_count = len(client.notificationlogs.unread.get())
return render(request, 'web/collaborationgroups/manage.html', {
'title': "Group",
'group': group,
'members': members,
'users': users,
'new_notifications_count': new_notifications_count
})
@user_passes_test(login_allowed)
def create(request):
if request.method != "POST":
messages.error(request, "Invalid request method.")
return redirect('groups')
params = {}
if 'name' not in request.POST:
messages.error(request, "Invalid POST request.")
else:
params["name"] = request.POST.get('name')
if 'public' in request.POST:
params["is_public"] = True
else:
params["is_public"] = False
client = get_httpclient_instance(request)
try:
client.collaborationgroups.post(params)
messages.success(request, "Group `{}` created sucessfully.".format(params.get("name")))
except HttpClientError:
messages.error(request, "Bad Request. A group with this name already exists.")
except Exception as e:
messages.error(request, api_error_message(e, params))
return redirect('groups')
@user_passes_test(login_allowed)
def delete(request):
if request.method != "POST":
messages.error(request, "Invalid request method.")
return redirect('groups')
if 'group_id' not in request.POST or not request.POST.get('group_id').isdigit():
messages.error(request, "Invalid POST request.")
return redirect('shares')
group_id = int(request.POST.get('group_id'))
client = get_httpclient_instance(request)
group = client.collaborationgroups(group_id).get()
if group:
try:
client.collaborationgroups(group_id).delete()
messages.success(request, "Group `{}` deleted.".format(group.name))
except Exception as e:
messages.error(request, api_error_message(e, ""))
else:
messages.error(request, "Group does not exist.")
return redirect('groups')
@user_passes_test(login_allowed)
def add_admin(request):
if request.method != "POST":
messages.error(request, "Invalid request method.")
return redirect('shares')
if 'group_id' not in request.POST or not request.POST.get('group_id').isdigit() \
or 'user_id' not in request.POST or not request.POST.get('user_id').isdigit():
messages.error(request, "Invalid POST request.")
return redirect('shares')
group_id = int(request.POST.get('group_id'))
user_id = int(request.POST.get('user_id'))
client = get_httpclient_instance(request)
group = client.collaborationgroups(group_id).get()
user = client.users(user_id).get()
params = {}
params["users"] = [user_id]
if group:
if request.user.is_superuser or request.user.backend_user.id == group.creator.id or request.user.backend_user.id in group.admins:
try:
client.collaborationgroups(group_id).add_admins.post(params)
messages.success(request, "{} is now a admin of {}.".format(user.username, group.name))
except Exception as e:
messages.error(request, api_error_message(e, params))
request.method = "GET"
return redirect('group_manage', group.id)
else:
messages.error(request, "Not enough permissions to do this.")
return redirect('group_manage', group.id)
else:
messages.error(request, "Group does not exist.")
return redirect('group_manage', group.id)
@user_passes_test(login_allowed)
def remove_admin(request):
if request.method != "POST":
messages.error(request, "Invalid request method.")
return redirect('shares')
if 'group_id' not in request.POST or not request.POST.get('group_id').isdigit() \
or 'user_id' not in request.POST or not request.POST.get('user_id').isdigit():
messages.error(request, "Invalid POST request.")
return redirect('shares')
group_id = int(request.POST.get('group_id'))
user_id = int(request.POST.get('user_id'))
client = get_httpclient_instance(request)
group = client.collaborationgroups(group_id).get()
user = client.users(user_id).get()
params = {}
params["users"] = [user_id]
if group:
if request.user.is_superuser or request.user.backend_user.id == group.creator.id or request.user.backend_user.id in group.admins:
try:
client.collaborationgroups(group_id).remove_admins.post(params)
messages.success(request, "{} is not a admin of {} anymore.".format(user.username, group.name))
except Exception as e:
messages.error(request, api_error_message(e, params))
request.method = "GET"
return redirect('group_manage', group.id)
else:
messages.error(request, "Not enough permissions to do this.")
return redirect('group_manage', group.id)
else:
messages.error(request, "Group does not exist.")
return redirect('group_manage', group.id)
@user_passes_test(login_allowed)
def add_members(request):
if request.method != "POST":
messages.error(request, "Invalid request method.")
return redirect('shares')
users = request.POST.getlist('users')
group_id = request.POST.get('group_id')
client = get_httpclient_instance(request)
user_list = []
# validate existance of users first
for u in users:
user = client.users(u).get()
if user:
user_list.append(u)
# then call API to add the users to the group
params = {}
params["users"] = user_list
try:
client.collaborationgroups(group_id).add_members.post(params)
messages.success(request, "Users successfully added to the group.")
except Exception as e:
messages.error(request, api_error_message(e, params))
return redirect('group_manage', group_id)
@user_passes_test(login_allowed)
def remove_member(request):
if request.method != "POST":
messages.error(request, "Invalid request method.")
return redirect('groups')
if 'group_id' not in request.POST or not request.POST.get('group_id').isdigit() \
or 'user_id' not in request.POST or not request.POST.get('user_id').isdigit():
messages.error(request, "Invalid POST request.")
return redirect('groups')
group_id = int(request.POST.get('group_id'))
user_id = int(request.POST.get('user_id'))
client = get_httpclient_instance(request)
user = client.users(user_id).get()
group = client.collaborationgroups(group_id).get()
if group:
if user:
params = {}
params["users"] = [user_id]
# API call
try:
client.collaborationgroups(group_id).remove_members.post(params)
messages.success(request, "Sucessfully removed {} from the group.".format(user.username))
except Exception as e:
messages.error(request, api_error_message(e, params))
request.method = "GET"
return redirect('group_manage', group.id)
else:
messages.error(request, "User does not exist.")
return redirect('group_manage', group.id)
else:
messages.error(request, "Group does not exist.")
return redirect('group_manage', group_id)
@user_passes_test(login_allowed)
def leave(request):
if request.method != "POST":
messages.error(request, "Invalid request method.")
return redirect('groups')
if 'group_id' not in request.POST or not request.POST.get('group_id').isdigit():
messages.error(request, "Invalid POST request.")
return redirect('groups')
group_id = int(request.POST.get('group_id'))
client = get_httpclient_instance(request)
group = client.collaborationgroups(group_id).get()
if group:
try:
client.collaborationgroups(group_id).leave.post()
messages.success(request, "You are no longer a member of group {}.".format(group.name))
except Exception as e:
messages.error(request, api_error_message(e, ""))
request.method = "GET"
return redirect('groups')
else:
messages.error(request, "Group does not exist.")
return redirect('groups')
@user_passes_test(login_allowed)
def join(request):
if request.method != "POST":
messages.error(request, "Invalid request method.")
return redirect('shares')
if 'group_id' not in request.POST or not request.POST.get('group_id').isdigit():
messages.error(request, "Invalid POST request.")
return redirect('groups')
group_id = request.POST.get('group_id')
client = get_httpclient_instance(request)
group = client.collaborationgroups(group_id).get()
try:
client.collaborationgroups(group_id).join.post()
messages.success(request, "You are now a member of {}.".format(group.name))
except Exception as e:
messages.error(request, api_error_message(e, ""))
return redirect('groups')
| |
# ETRM - Evapotranspiration and Recharge Model, Point version, AMF Comparison
# For use with multiple Ameriflux stations
# David Ketchum, February 2016
import pandas as pd
import matplotlib.pyplot as plt
import datetime
from dateutil import rrule
import os
import numpy as np
# Set start datetime object
start, end = datetime.datetime(2007, 1, 1), datetime.datetime(2013, 12, 31)
# Define winter and summer for SNOW algorithm
# sWin, eWin = datetime.datetime(start.year, 11, 1), datetime.datetime(end.year, 3, 30)
# Define monsoon for Ksat, presumed storm intensity
sMon, eMon = datetime.datetime(start.year, 6, 1), datetime.datetime(start.year, 10, 1)
amfdict = { '1': {'Coords': '361716 3972654', 'Name': 'Valles_conifer'},
'2': {'Coords': '355774 3969864', 'Name': 'Valles_ponderosa'},
'3': {'Coords': '339552 3800667', 'Name': 'Sev_shrub'},
'4': {'Coords': '343495 3803640', 'Name': 'Sev_grass'},
'5': {'Coords': '386288 3811461', 'Name': 'Heritage_pinyon_juniper'},
'6': {'Coords': '420840 3809672', 'Name': 'Tablelands_juniper_savanna'}}
info = amfdict.items()
codes = [int(x[0]) for x in info]
info = [str(x[1]) for x in info]
coords = [str(x[12:26]) for x in info]
names = [str(x[38:-2]) for x in info]
years = [x for x in range(start.year, end.year + 1)]
# Load amf data, all dates, be sure the points are at the same place!
# In this version, use AMF precipitation, temperature, and energy fluxes
#
# Actual ET
amf_names = []
amf_length = []
amf_array = []
select_codes = []
for code in codes:
print code
amf_name = amfdict['{a}'.format(a=code)]['Name']
print amf_name
folder = "C:\\Users\\David\Documents\\Recharge\\aET\\AMF_Data\\" + '{a}'.format(a=amf_name)
os.chdir(folder)
csvList = os.listdir(folder)
for item in csvList:
if item == csvList[0]:
# if item == "C:\\Users\\David\Documents\\Recharge\\aET\\AMF_Data\\" + '{a}_extract.csv'.format(a=amf_name):
# break
amf_recs = []
fid = open(item)
# print "opening file: " + '{a}'.format(a=fid)
lines = fid.readlines()[3:]
fid.close()
rows = [line.split(',') for line in lines]
for line in rows:
amf_recs.append([line[0], float(line[2]), float(line[3]),
float(line[12]), float(line[14]), float(line[16]),
float(line[28]), float(line[30]), float(line[33]), float(line[34]), float(line[35]),
float(line[8]), float(line[7]), float(line[22]), float(line[6]), float(line[25])])
amf_data = np.array(amf_recs)
amf_data = pd.DataFrame(amf_data, columns=['year', 'dtime', 'doy', 'H', 'LE', 'FG', 'RN',
'RG', 'RGout', 'RGL', 'RGLout', 'WS', 'WD', 'RH', 'TA', 'VPD'])
norm = [x for x in range(1, 366)]
leap = [x for x in range(1, 367)]
leaps = ['2004', '2008', '2012']
print 'You have {a} rows of --RAW-- data from {b}'.format(a=len(amf_data), b=amf_name)
# (year, dtime, H, LE, FG, RN, RG, RGin, RGout)
# H = sensible heat flux
# LE = latent heat flux
# FG = soil heat flux
# RN = net radiation
# RG = incoming shortwave
# RGout = outgoing shortwave
# RGL = incoming longwave
# RGLout = outgoing longwave
# Find data where there is both RN and LE available, and thus all energy data, as RN is calculated
amf_data_RN = amf_data[amf_data.RN != '-9999.0']
amf_data_RN_LE = amf_data_RN[amf_data_RN.LE != '-9999.0']
amf_data_H = amf_data[amf_data.H != '-9999.0']
amf_data_H_LE = amf_data_H[amf_data_H.LE != '-9999.0']
amf_data_H_LE_RG = amf_data_H_LE[amf_data_H_LE.RG != '-9999.0']
amf_data_H_LE_RG_RGout = amf_data_H_LE_RG[amf_data_H_LE_RG.RGout != '-9999.0']
amf_data_H_LE_RG_RGout_RGL = amf_data_H_LE_RG_RGout[amf_data_H_LE_RG_RGout.RGL != '-9999.0']
amf_data_H_LE_RG_RGout_RGL_RGLout = amf_data_H_LE_RG_RGout_RGL[amf_data_H_LE_RG_RGout_RGL.RGLout != '-9999.0']
amf_data_H_LE_RG_RGout_RGL_RGLout_RN = amf_data_H_LE_RG_RGout_RGL_RGLout[amf_data_H_LE_RG_RGout_RGL_RGLout.RN != '-9999.0']
del amf_data_H, amf_data_H_LE, amf_data_H_LE_RG, amf_data_H_LE_RG_RGout, amf_data_H_LE_RG_RGout_RGL, amf_data_H_LE_RG_RGout_RGL_RGLout
amf_data_WS = amf_data_H_LE_RG_RGout_RGL_RGLout_RN[amf_data_H_LE_RG_RGout_RGL_RGLout_RN.WS != '-9999.0']
amf_data_WS_WD = amf_data_WS[amf_data_WS.WD != '-9999.0']
amf_data_WS_WD_RH = amf_data_WS_WD[amf_data_WS_WD.RH != '-9999.0']
amf_data_WS_WD_RH_TA = amf_data_WS_WD_RH[amf_data_WS_WD_RH.TA != '-9999.0']
amf_data_WS_WD_RH_TA_VPD = amf_data_WS_WD_RH_TA[amf_data_WS_WD_RH_TA.TA != '-9999.0']
del amf_data_WS, amf_data_WS_WD, amf_data_WS_WD_RH, amf_data_WS_WD_RH_TA
print 'You have {a} rows of FLUX data from {b}'.format(a=len(amf_data_H_LE_RG_RGout_RGL_RGLout_RN), b=amf_name)
# Find all complete days (48) records with no NULL values, makes lists, merge, stack, put in dataframe object
rn = []
le = []
h = []
bal = []
rg = []
rgOut = []
rl = []
rlOut = []
err_rn_le_h = []
err_rn_rg_grOut_rl_rlOut = []
date_bal = []
doy_bal = []
year_doy_bal = []
aEt = []
x = -1
y = -1
rn_set = []
le_set = []
h_set = []
rg_set = []
rgOut_set = []
rl_set = []
rlOut_set = []
sets = []
sets = []
sets_set = []
amf_test = amf_data_H_LE_RG_RGout_RGL_RGLout_RN.iloc[6900:7300, :]
for row in amf_data_H_LE_RG_RGout_RGL_RGLout_RN.itertuples():
if row[2] == row[3]:
if x == y + 48:
if len(rn_set) == 48 and len(le_set) == 48:
# print (int(float(row[3])))
rn.append(sum(rn_set))
le.append(sum(le_set))
h.append(sum(h_set))
rg.append(sum(rg_set))
rgOut.append(sum(rgOut_set))
rl.append(sum(rl_set))
rlOut.append(sum(rlOut_set))
err_rn_rg_grOut_rl_rlOut.append(abs((sum(rn_set) - (sum(rg_set) - sum(rgOut_set) +
sum(rl_set) - sum(rlOut_set))) / sum(rn_set)))
err_rn_le_h.append(abs((sum(rn_set) - (sum(le_set) + sum(h_set))) / sum(rn_set)))
date_bal.append(datetime.datetime(int(float(row[1])), 1, 1) + datetime.timedelta(days=(int(float((row[3])) - 2))))
doy_bal.append(int(float(row[3])) - 1)
year_doy_bal.append((int(float(row[3])) - 1, row[1]))
# if int(float(row[3])) == 190:
# print row
bal.append(sum(rn_set) - (sum(le_set) + sum(h_set)))
aEt.append(sum(le_set) / 2.45) # convert from MJ/(step * m**2) to mm water
sets.append(sets_set)
rn_set = []
le_set = []
h_set = []
rg_set = []
rgOut_set = []
rl_set = []
rlOut_set = []
sets_set = []
y = x
x += 1
sets_set.append(row)
# convert energies to MJ
rn_set.append(float(row[7]) * (0.0864 / 48))
le_set.append(float(row[5]) * (0.0864 / 48))
h_set.append(float(row[4]) * (0.0864 / 48))
rg_set.append(float(row[8]) * (0.0864 / 48))
rgOut_set.append(float(row[9]) * (0.0864 / 48))
rl_set.append(float(row[10]) * (0.0864 / 48))
rlOut_set.append(float(row[11]) * (0.0864 / 48))
print 'You have {a} DAYS of CLEAN RN/LE/H/RAD data from {b}'.format(a=len(bal), b=amf_name)
bal_data = zip(date_bal, doy_bal, year_doy_bal, rn, le, h, rg, rgOut, rl, rlOut, aEt, err_rn_le_h, err_rn_rg_grOut_rl_rlOut)
bal_data = np.array(bal_data)
print 'The mean energy balance closure error is: {a}'.format(a=np.mean(bal_data[:, 11]))
bal_data = np.column_stack(bal_data)
bal_data = np.transpose(bal_data)
bal_data = pd.DataFrame(bal_data, columns=['date', 'doy', 'year_doy_bal', 'rn', 'le', 'h', 'rg',
'rgOut', 'rl', 'rlOut', 'aEt', 'err_rn_le_h', 'err_rn_rg_grOut_rl_rlOut'])
bal_data_lowErr = bal_data[bal_data.err_rn_le_h <= 0.20]
print 'You have {a} DAYS of [0.0 < CLOSURE ERROR < 0.10] data from {b}'.format(a=len(bal_data_lowErr), b=amf_name)
amf_array.append(bal_data)
amf_length.append(len(bal_data))
filepath = 'C:\\Users\\David\\Documents\\Recharge\\aET\\AMF_Cleaned_Data'
# bal_data.to_csv('{a}\\{b}_cleaned_all.csv'.format(a=filepath, b=amf_name))
# bal_data_lowErr.to_csv('{a}\\{b}_cleaned_lowErr.csv'.format(a=filepath, b=amf_name))
amf_names.append(amf_name)
select_codes.append(code)
# del bal_data, bal_data_lowErr
# Find all complete records with no NULL values, makes lists, merge,
# stack, put in dataframe object, regardless of having full days
# rn = []
# le = []
# h = []
# bal = []
# rg = []
# rgOut = []
# rl = []
# rlOut = []
# err_rn_le_h = []
# date_bal = []
# dtime_bal = []
# aEt = []
# wd = []
# ws = []
# rh = []
# ta = []
# vpd = []
# # amf_test = amf_data_WS_WD_RH_TA_VPD.iloc[-500:, :]
# for row in amf_data_WS_WD_RH_TA_VPD.itertuples():
# rn.append(float(row[7]))
# le.append(float(row[5]))
# h.append(float(row[4]))
# rg.append(float(row[8]))
# rgOut.append(float(row[9]))
# rl.append(float(row[10]))
# rlOut.append(float(row[11]))
# err_rn_le_h.append(abs((float(row[7]) - (float(row[5]) + float(row[4]))) / float(row[7])))
# date_bal.append(datetime.datetime(int(float(row[1])), 1, 1) + datetime.timedelta(days=(int(float((row[3])) - 2))))
# dtime_bal.append(float(row[2]))
# aEt.append(float(row[5]) * 0.03525)
# ws.append(float(row[12]))
# wd.append(float(row[13]))
# rh.append(float(row[14]))
# ta.append(float(row[15]))
# vpd.append(float(row[16]))
#
# print 'You have {a} RECORDS of CLEAN RN/LE/H/RAD/METEOROLOGY data from {b}'.format(a=len(rn), b=amf_name)
# bal_data = zip(date_bal, dtime_bal, rn, le, h, rg, rgOut, rl, rlOut, aEt, err_rn_le_h, ws, wd, rh, ta, vpd)
# bal_data = np.array(bal_data)
# print 'The mean energy balance closure error is: {a}'.format(a=np.mean(bal_data[:, 10]))
# bal_data = np.column_stack(bal_data)
# bal_data = np.transpose(bal_data)
# bal_data = pd.DataFrame(bal_data, columns=['date', 'doy', 'rn', 'le', 'h', 'rg', 'rgOut', 'rl', 'rlOut',
# 'aEt', 'err_rn_le_h', 'WS', 'WD', 'RH', 'TA', 'VPD'])
# bal_data_lowErr = bal_data[bal_data.err_rn_le_h <= 0.10]
# print 'You have {a} RECORDS of [0.0 < CLOSURE ERROR < 0.10] data from {b}'.format(a=len(bal_data_lowErr),
# b=amf_name)
# amf_array.append(bal_data)
# amf_length.append(len(bal_data))
# filepath = 'C:\\Users\\David\\Documents\\Recharge\\aET\\AMF_Cleaned_Data'
# # bal_data.to_csv('{a}\\{b}_cleaned_all.csv'.format(a=filepath, b=amf_name))
# # bal_data_lowErr.to_csv('{a}\\{b}_cleaned_lowErr.csv'.format(a=filepath, b=amf_name))
# amf_names.append(amf_name)
meta_amf = zip(select_codes, amf_names, amf_array, amf_length)
print codes
print amf_names
print ''
print ''
print ''
print 'Moving on to EXTRACT PARAMETERS.................................................................'
print ''
print ''
print ''
# Load up all data needed for ETRM from extract .csv
# EXTRACT PARAMETERS
for site in select_codes:
extract_name = amf_names[select_codes.index(site)]
name = 'C:\\Users\\David\\Documents\\Recharge\\aET\\AMF_extracts\\AMF{a}_extract.csv'.format(a=site)
print 'Processing site {} code AMF{}'.format(extract_name, site)
# Get a numpy object of all raster-extracted data out of the csv it is held in
recs = []
try:
fid = open(name)
# print "file: " + str(fid)
lines = fid.readlines()[:]
fid.close()
except IOError:
print "couldn't find " + '{a}'.format(a=fid)
# break
rows = [line.split(',') for line in lines]
for line in rows:
try:
recs.append([datetime.datetime.strptime(line[0], '%m/%d/%Y'), # date
float(line[1]), float(line[2]), float(line[3]), float(line[4]),
float(line[5]), float(line[6]), float(line[7]), float(line[8]),
float(line[9]), float(line[10]), float(line[11]), float(line[12]),
float(line[13]), float(line[14]), float(line[15]), float(line[16])])
except ValueError:
recs.append([datetime.datetime.strptime(line[0], '%Y-%m-%d %H:%M:%S'), # date
float(line[1]), float(line[2]), float(line[3]), float(line[4]),
float(line[5]), float(line[6]), float(line[7]), float(line[8]),
float(line[9]), float(line[10]), float(line[11]), float(line[12]),
float(line[13]), float(line[14]), float(line[15]), float(line[16])])
# ['date', 'ksat', 'soil_ksat', 'kcb', 'rlin', 'rg', 'etrs_Pm', 'plant height', 'min temp',
# 'max temp', 'temp', 'precip', 'fc', 'wp', 'taw', 'aws', 'root_z']
data = np.array(recs)
# print len(data)
# Data format now in [date, ksat, soil_ksat, kcb, rlin, rg, etrs_Pm, plant height, min temp,
# max temp, temp, prcip, fc, wp, taw, aws, root_z]
# Loop daily time step over chosen interval, computing all madel variables each day
# fit select start and end dates for each panel (site)
panel = select_codes.index(site)
df_amf = amf_array[panel]
extract_start, extract_end = data[0, 0], data[-1, 0]
df_amf = df_amf[(df_amf.iloc[:, 0] >= extract_start) & (df_amf.iloc[:, 0] <= extract_end)]
amf_start_obj, amf_end_obj = df_amf.iloc[0, 0], df_amf.iloc[-1, 0]
# Use the coincident data to only run the model during the period AMF data exists
coin_data = data[data[:, 0] >= amf_start_obj]
coin_data = coin_data[coin_data[:, 0] <= amf_end_obj]
print 'Site {a} at {d} runs from {b} to {c}'.format(a=amf_names[panel],
b=amf_start_obj, c=amf_end_obj, d=select_codes[panel])
# Create indices to plot point time series, these are empty lists that will
# be filled as the simulation progresses
pltRain = []
pltEta = []
pltEvap = []
pltSnow_fall = []
pltRo = []
pltDr = []
pltPdr = []
pltDe = []
pltDrew = []
pltTemp = []
pltTempM = []
pltDp_r = []
pltKs = []
pltEtrs = []
pltKcb = []
pltKe = []
pltMlt = []
pltSwe = []
pltDay = []
pltFs1 = []
pltPpt = []
pltKr = []
pltMass = []
pltPdrew = []
# Define user-controlled constants, these are constants to start with day one, replace
# with spin-up data when multiple years are covered
ze = 40
p = 0.4
kc_min = 0.15
infil = 0.0
precip = 0.0
et = 0.0
runoff = 0.0
ppt_tom = 0.0
fb = 0.25
swe = 0.0
ke_max = 1.0
cum_mass = 0.0
tot_transp = 0.0
tot_evap = 0.0
a_min = 0.45
a_max = 0.90
pA = a_min
print 'Starting {a}...........'.format(a=amf_names[panel])
for dday in rrule.rrule(rrule.DAILY, dtstart=start, until=end):
if dday == start:
day = 0
print '......................at day zero'
else:
day += 1
day_of_year = dday.timetuple().tm_yday
loopTime = datetime.datetime.now()
# 'date', 'ksat', 'kcb', 'rlin', 'etrs_Pm', 'plant height', 'min temp',
# 'max temp', 'temp', 'precip', 'fc', 'wp', 'taw', 'aws'
if dday == start:
fc = data[0, 12] / 100.
wp = data[0, 13] / 100.
etrs = float(data[day, 6])
tew = (fc - 0.5 * wp) * ze # don't use time-dependent etrs for long-term simulations
taw = data[0, 14]
aws = data[0, 15] * 100.
print 'TAW is {a} and AWS is {b}'.format(a=taw, b=aws)
rew = min((2+(tew/3.)), 0.8 * tew)
pDr = taw
pDe = tew
pDrew = rew
dr = taw
de = tew
drew = rew
ksat_init = data[0, 2] * 86.4 / 10. # from micrometer/sec to mm/day
old_ksat = data[0, 1] * 1000 / 3.281 # from ft/dat to mm/day
print 'SSURGO Ksat is {a} and bedrock Ksat is {b}'.format(a=ksat_init, b=old_ksat)
if sMon < dday < eMon:
ksat = ksat_init / 12.
else:
ksat = ksat_init / 4.
# print 'Ksat for this day is {a} mm/day'.format(a=ksat)
# Find et and evap
kcb = data[day, 3]
etrs = data[day, 6]
kc_max_1 = kcb + 0.0001
kc_max = max(0.0001, kc_max_1)
# compute coverage/exposure of soil
plnt_hgt = data[day, 7]
plnt_term = plnt_hgt * 0.5 + 1
numr = max(kcb - kc_min, 0.01)
denom = max((kc_max - kc_min), 0.01)
fcov_ref = (numr / denom) ** plnt_term
fcov_min = min(fcov_ref, 1.00)
fcov = max(fcov_min, 0.1) # vegetation-covered ground
few = max(1 - fcov, 0.01) # exposed ground
# root zone stress coefficient
ks_ref = (taw - dr) / (0.6 * taw)
ks_min = max(ks_ref, 0.001)
ks = min(ks_min, 1.0)
# total evaporation layer reduction coefficient (aka stage 2)
kr = min((tew - de) / (tew - rew), 1.0)
# check if stage 1 evaporation is occurring
# and calculate STRESS
# remember to apply a condition for bare ground
# if NDVI < 0.05:
# ke = (fs1 + (1 - fs1) * kr) * ke_max
fsa = (rew - drew) / (ke_max * etrs)
fsb = min(fsa, 1.0)
fs1 = max(fsb, 0.0)
ke = min((fs1 + (1 - fs1) * kr) * (kc_max - (ks * kcb)), few * kc_max)
eta = (ks * kcb + ke) * etrs
eta = max(eta, 0.0)
transp = (ks * kcb) * etrs
evap_init = ke * etrs
evap = max(evap_init, 0.0)
if dday == end:
temp = data[day, 10]
max_temp = data[day, 9]
min_temp = data[day, 8]
ppt_tot = data[day, 11]
ppt_tom = 0.0
else:
temp = data[day, 10]
max_temp = data[day, 9]
min_temp = data[day, 8]
ppt_tot = data[day, 11]
ppt_tom = data[day + 1, 11]
if temp < 0.0:
snow_fall = ppt_tot
if snow_fall > 3.0:
a = a_max
else:
k = 0.12
a = a_min + (pA - a_min) * np.exp(-k)
a = min(a, a_max)
a = max(a, a_min)
rain = 0.0
rain_tom = 0.0
else:
snow_fall = 0.0
rain = ppt_tot
rain_tom = ppt_tom
k = 0.05
a = a_min + (pA - a_min) * np.exp(-k)
a = min(a, a_max)
a = max(a, a_min)
pA = a
swe = snow_fall + swe
rg = data[day, 5]
mlt_init = max(((1 - a) * rg * 0.15) + (temp - 1.5) * 3.956, 0.0)
mlt = min(swe, mlt_init)
swe = swe - mlt
# Find depletions
pDr = dr
pDe = de
pDrew = drew
watr = rain + mlt
deps = dr + de + drew
if watr <= deps:
ro = 0.0
dp_r = 0.0
elif ksat + deps > watr > deps:
ro = 0.0
dp_r = watr - deps
elif watr > ksat + deps:
ro = watr - ksat - deps
dp_r = ksat
else:
ro = 0.0
dp_r = 0.0
pass
drew_1 = min((pDrew + ((ro + (fs1 * evap)) - (rain + mlt))), rew)
drew = max(drew_1, 0.0)
diff = max(pDrew - drew, 0.0)
de_1 = min(pDe + ((1 - fs1) * evap) - (rain + mlt - diff), tew)
de = max(de_1, 0.0)
diff2 = max((diff + (pDe - de)), 0.0)
dr_1 = min((pDr + ((transp + dp_r) - (rain + mlt - diff2))), taw)
dr = max(dr_1, 0.0)
infil += dp_r
tot_transp += transp
tot_evap += evap
et += transp + evap
precip += rain + snow_fall
runoff += ro
snow_ras = swe + snow_fall - mlt
# Check MASS BALANCE for the love of WATER!!!
mass = rain + mlt - (ro + transp + evap + dp_r + ((pDr - dr) + (pDe - de) + (pDrew - drew)))
cum_mass += abs(mass)
if dday == end:
end_mass = precip - infil - runoff - snow_ras - et - ((taw - dr) + (tew - de) + (rew - drew))
# Append everything to its index plotting object (list) daily
pltDay.append(dday)
pltRain.append(rain)
pltEta.append(eta)
pltEvap.append(evap)
pltSnow_fall.append(snow_fall)
pltRo.append(ro)
pltDr.append(dr)
pltDe.append(de)
pltDrew.append(drew)
pltPdrew.append(pDrew)
pltTemp.append(temp)
pltDp_r.append(dp_r)
pltKs.append(ks)
pltPdr.append(pDr)
pltEtrs.append(etrs)
pltKcb.append(kcb)
pltPpt.append(ppt_tot)
pltKe.append(ke)
pltKr.append(kr)
pltMlt.append(mlt)
pltSwe.append(swe)
pltTempM.append(max_temp)
pltFs1.append(fs1)
pltMass.append(mass)
amf_eta_mean = np.mean(df_amf.iloc[:, 10].values)
etrm_eta_mean = np.mean(np.array(pltEta))
print ''
print 'AMF ET mean: {}mm/day ETRM ET mean: {}mm/day'.format(amf_eta_mean, etrm_eta_mean)
print 'Cumulative mass balance error: {}'.format(cum_mass)
print ''
if dday == end:
fdata = np.column_stack((pltSnow_fall, pltRain, pltMlt, pltEta, pltRo, pltDp_r, pltDr, pltDe, pltDrew, pltMass))
np.savetxt('C:\\Users\\David\\Documents\\Recharge\\aET\extra_data\\calibration_1APR.csv',
fdata, fmt=['%1.3f', '%1.3f', '%1.3f', '%1.3f', '%1.3f', '%1.3f', '%1.3f', '%1.3f', '%1.3f', '%1.3f'],
delimiter=',')
# plt.plot(pltDay, pltEtrs, 'orange', label='ETRS (mm)')
# plt.plot(pltDay, pltEta, 'r', label='Evapotranspiration (mm)')
# plt.plot(df_amf.iloc[:, 0].values, df_amf.iloc[:, 10].values, 'k+', label='Measured ET from Flux Tower')
# plt.legend()
# plt.title('ETRM vs AMF Data at {}... AMF ET mean: {}mm/day ETRM ET mean: {}mm/day... Cumulative mass balance error: {}mm'.format(extract_name, amf_eta_mean, etrm_eta_mean, cum_mass))
# plt.xlabel('Date')
# plt.ylabel('(mm)')
# plt.show()
plt.subplot(5, 1, 1)
plt.plot(pltDay, pltEtrs, 'orange', label='ETRS (mm)')
plt.plot(pltDay, pltEta, 'r', label='Evapotranspiration (mm)')
plt.plot(df_amf.iloc[:, 0].values, df_amf.iloc[:, 10].values, 'k+', label='Measured ET from Flux Tower')
plt.legend()
plt.title('ETRM vs AMF Data at {}... AMF ET mean: {}mm/day ETRM ET mean:'
' {}mm/day... Cumulative mass balance error: {}mm'.format(extract_name,
amf_eta_mean, etrm_eta_mean, cum_mass))
plt.xlabel('Date')
plt.ylabel('(mm)')
plt.subplot(5, 1, 2)
plt.plot(pltDay, pltSwe, 'b', label='ETRM Snow Water Equivalent (mm)')
plt.plot(pltDay, pltRain, 'red', label='Rain (mm)')
plt.plot(pltDay, pltSnow_fall, 'b', label='Snow Fall Water Equivalent (mm)')
plt.legend()
plt.subplot(5, 1, 3)
plt.plot(pltDay, pltDp_r, 'g', label='Recharge (mm)')
plt.plot(pltDay, pltRo, 'p', label='Runoff (mm)')
plt.legend()
plt.subplot(5, 1, 4)
plt.plot(pltDay, pltMass, 'r')
plt.legend()
plt.subplot(5, 1, 5)
plt.plot(pltDay, pltKs, 'r', label='Stress Coefficient (-)')
plt.plot(pltDay, pltFs1, 'g', label='Stage 1 Evaporation Coefficient (-)')
plt.plot(pltDay, pltKr, 'b', label='Evaporation Reduction Coefficient (-)')
plt.legend()
# plt.subplot(5, 1, 1)
# plt.plot(pltDay, pltKs, 'r', label='Stress Coefficient (-)')
# plt.plot(pltDay, pltFs1, 'g', label='Stage 1 Evaporation Coefficient (-)')
# plt.plot(pltDay, pltKr, 'b', label='Evaporation Reduction Coefficient (-)')
# plt.legend()
# plt.subplot(5, 1, 2)
# plt.plot(pltDay, pltSwe, 'b', label='ETRM Snow Water Equivalent (mm)')
# plt.plot(pltDay, pltRain, 'purple', label='Rain (mm)')
# plt.plot(pltDay, pltSnow_fall, 'b', label='Snow Fall Water Equivalent (mm)')
# plt.legend()
# plt.subplot(5, 1, 3)
# plt.plot(pltDay, pltDr, 'p', label='Root Zone Depletion (mm)')
# plt.xlabel('Days')
# plt.legend()
# plt.subplot(5, 1, 4)
# plt.plot(pltDay, pltDrew, 'p', label='Skin Layer Depletion (mm)')
# plt.xlabel('Days')
# plt.subplot(5, 1, 5)
# plt.plot(pltDay, pltDe, 'p', label='Evaporation Layer Depletion (mm)')
# plt.xlabel('Days')
# plt.legend()
# plt.show()
| |
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import struct
import unittest
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
import time
import random
import rospy.rostime
try:
import cPickle as pickle
except ImportError:
import pickle
# currently tests rospy.rostime, rospy.simtime, and some parts of rospy.client
class TestRospyTime(unittest.TestCase):
def setUp(self):
rospy.rostime.set_rostime_initialized(True)
def test_import_simtime(self):
# trip wire test, make sure the module is loading
import rospy.impl.simtime
# can't actually do unit tests of simtime, requires rosunit
def test_switch_to_wallclock(self):
rospy.rostime.switch_to_wallclock()
self.assertAlmostEqual(time.time(), rospy.get_time(), 1)
def test_Time_get_setstate(self):
# use deepcopy to test getstate/setstate
import copy, random
a = rospy.Time(random.randint(0, 10000), random.randint(0, 10000))
b = copy.deepcopy(a)
self.assertEquals(a.secs, b.secs)
self.assertEquals(a.nsecs, b.nsecs)
buff = StringIO()
pickle.dump(a, buff)
buff.seek(0)
c = pickle.load(buff)
self.assertEquals(a.secs, c.secs)
self.assertEquals(a.nsecs, c.nsecs)
def test_Duration_get_setstate(self):
# use deepcopy to test getstate/setstate
import copy, random
a = rospy.Duration(random.randint(0, 10000), random.randint(0, 10000))
b = copy.deepcopy(a)
self.assertEquals(a.secs, b.secs)
self.assertEquals(a.nsecs, b.nsecs)
buff = StringIO()
pickle.dump(a, buff)
buff.seek(0)
c = pickle.load(buff)
self.assertEquals(a.secs, c.secs)
self.assertEquals(a.nsecs, c.nsecs)
def test_Time(self):
# This is a copy of test_roslib_rostime
from rospy import Time, Duration
# #1600 Duration > Time should fail
failed = False
try:
v = Duration.from_sec(0.1) > Time.from_sec(0.5)
failed = True
except: pass
self.failIf(failed, "should have failed to compare")
try:
v = Time.from_sec(0.4) > Duration.from_sec(0.1)
failed = True
except: pass
self.failIf(failed, "should have failed to compare")
try: # neg time fails
Time(-1)
failed = True
except: pass
self.failIf(failed, "negative time not allowed")
try:
Time(1, -1000000001)
failed = True
except: pass
self.failIf(failed, "negative time not allowed")
# test Time.now() is within 10 seconds of actual time (really generous)
import time
t = time.time()
v = Time.from_sec(t)
self.assertEquals(v.to_sec(), t)
# test from_sec()
self.assertEquals(Time.from_sec(0), Time())
self.assertEquals(Time.from_sec(1.), Time(1))
self.assertEquals(Time.from_sec(v.to_sec()), v)
self.assertEquals(v.from_sec(v.to_sec()), v)
# test to_time()
self.assertEquals(v.to_sec(), v.to_time())
# test addition
# - time + time fails
try:
v = Time(1,0) + Time(1, 0)
failed = True
except: pass
self.failIf(failed, "Time + Time must fail")
# - time + duration
v = Time(1,0) + Duration(1, 0)
self.assertEquals(Time(2, 0), v)
v = Duration(1, 0) + Time(1,0)
self.assertEquals(Time(2, 0), v)
v = Time(1,1) + Duration(1, 1)
self.assertEquals(Time(2, 2), v)
v = Duration(1, 1) + Time(1,1)
self.assertEquals(Time(2, 2), v)
v = Time(1) + Duration(0, 1000000000)
self.assertEquals(Time(2), v)
v = Duration(1) + Time(0, 1000000000)
self.assertEquals(Time(2), v)
v = Time(100, 100) + Duration(300)
self.assertEquals(Time(400, 100), v)
v = Duration(300) + Time(100, 100)
self.assertEquals(Time(400, 100), v)
v = Time(100, 100) + Duration(300, 300)
self.assertEquals(Time(400, 400), v)
v = Duration(300, 300) + Time(100, 100)
self.assertEquals(Time(400, 400), v)
v = Time(100, 100) + Duration(300, -101)
self.assertEquals(Time(399, 999999999), v)
v = Duration(300, -101) + Time(100, 100)
self.assertEquals(Time(399, 999999999), v)
# test subtraction
try:
v = Time(1,0) - 1
failed = True
except: pass
self.failIf(failed, "Time - non Duration must fail")
class Foob(object): pass
try:
v = Time(1,0) - Foob()
failed = True
except: pass
self.failIf(failed, "Time - non TVal must fail")
# - Time - Duration
v = Time(1,0) - Duration(1, 0)
self.assertEquals(Time(), v)
v = Time(1,1) - Duration(-1, -1)
self.assertEquals(Time(2, 2), v)
v = Time(1) - Duration(0, 1000000000)
self.assertEquals(Time(), v)
v = Time(2) - Duration(0, 1000000000)
self.assertEquals(Time(1), v)
v = Time(400, 100) - Duration(300)
self.assertEquals(Time(100, 100), v)
v = Time(100, 100) - Duration(0, 101)
self.assertEquals(Time(99, 999999999), v)
# - Time - Time = Duration
v = Time(100, 100) - Time(100, 100)
self.assertEquals(Duration(), v)
v = Time(100, 100) - Time(100)
self.assertEquals(Duration(0,100), v)
v = Time(100) - Time(200)
self.assertEquals(Duration(-100), v)
def test_Duration(self):
Duration = rospy.Duration
# test from_sec
v = Duration(1000)
self.assertEquals(v, Duration.from_sec(v.to_sec()))
self.assertEquals(v, v.from_sec(v.to_sec()))
v = Duration(0,1000)
self.assertEquals(v, Duration.from_sec(v.to_sec()))
self.assertEquals(v, v.from_sec(v.to_sec()))
# test neg
v = -Duration(1, -1)
self.assertEquals(-1, v.secs)
self.assertEquals(1, v.nsecs)
v = -Duration(-1, -1)
self.assertEquals(1, v.secs)
self.assertEquals(1, v.nsecs)
v = -Duration(-1, 1)
self.assertEquals(0, v.secs)
self.assertEquals(999999999, v.nsecs)
# test addition
failed = False
try:
v = Duration(1,0) + Time(1, 0)
failed = True
except: pass
self.failIf(failed, "Duration + Time must fail")
try:
v = Duration(1,0) + 1
failed = True
except: pass
self.failIf(failed, "Duration + int must fail")
v = Duration(1,0) + Duration(1, 0)
self.assertEquals(2, v.secs)
self.assertEquals(0, v.nsecs)
self.assertEquals(Duration(2, 0), v)
v = Duration(-1,-1) + Duration(1, 1)
self.assertEquals(0, v.secs)
self.assertEquals(0, v.nsecs)
self.assertEquals(Duration(), v)
v = Duration(1) + Duration(0, 1000000000)
self.assertEquals(2, v.secs)
self.assertEquals(0, v.nsecs)
self.assertEquals(Duration(2), v)
v = Duration(100, 100) + Duration(300)
self.assertEquals(Duration(400, 100), v)
v = Duration(100, 100) + Duration(300, 300)
self.assertEquals(Duration(400, 400), v)
v = Duration(100, 100) + Duration(300, -101)
self.assertEquals(Duration(399, 999999999), v)
# test subtraction
try:
v = Duration(1,0) - 1
failed = True
except: pass
self.failIf(failed, "Duration - non duration must fail")
try:
v = Duration(1, 0) - Time(1,0)
failed = True
except: pass
self.failIf(failed, "Duration - Time must fail")
v = Duration(1,0) - Duration(1, 0)
self.assertEquals(Duration(), v)
v = Duration(-1,-1) - Duration(1, 1)
self.assertEquals(Duration(-3, 999999998), v)
v = Duration(1) - Duration(0, 1000000000)
self.assertEquals(Duration(), v)
v = Duration(2) - Duration(0, 1000000000)
self.assertEquals(Duration(1), v)
v = Duration(100, 100) - Duration(300)
self.assertEquals(Duration(-200, 100), v)
v = Duration(100, 100) - Duration(300, 101)
self.assertEquals(Duration(-201, 999999999), v)
# test abs
self.assertEquals(abs(Duration()), Duration())
self.assertEquals(abs(Duration(1)), Duration(1))
self.assertEquals(abs(Duration(-1)), Duration(1))
self.assertEquals(abs(Duration(0,-1)), Duration(0,1))
self.assertEquals(abs(Duration(-1,-1)), Duration(1,1))
def test_set_rostime(self):
from rospy.rostime import _set_rostime
from rospy import Time
self.assertAlmostEqual(time.time(), rospy.get_time(), 1)
for t in [Time.from_sec(1.0), Time.from_sec(4.0)]:
_set_rostime(t)
self.assertEquals(t, rospy.get_rostime())
self.assertEquals(t.to_time(), rospy.get_time())
def test_get_rostime(self):
rospy.rostime.switch_to_wallclock()
self.assertAlmostEqual(time.time(), rospy.get_time(), 1)
self.assertAlmostEqual(time.time(), rospy.get_rostime().to_time(), 1)
#rest of get_rostime implicitly tested by update_rostime tests
def test_sleep(self):
# test wallclock sleep
rospy.rostime.switch_to_wallclock()
rospy.sleep(0.1)
rospy.sleep(rospy.Duration.from_sec(0.1))
from rospy.rostime import _set_rostime
from rospy import Time
t = Time.from_sec(1.0)
_set_rostime(t)
self.assertEquals(t, rospy.get_rostime())
self.assertEquals(t.to_time(), rospy.get_time())
import threading
#start sleeper
self.failIf(test_sleep_done)
sleepthread = threading.Thread(target=sleeper, args=())
sleepthread.setDaemon(True)
sleepthread.start()
time.sleep(1.0) #make sure thread is spun up
self.failIf(test_sleep_done)
t = Time.from_sec(1000000.0)
_set_rostime(t)
time.sleep(0.5) #give sleeper time to wakeup
self.assert_(test_sleep_done, "sleeper did not wake up")
#start duration sleeper
self.failIf(test_duration_sleep_done)
dursleepthread = threading.Thread(target=duration_sleeper, args=())
dursleepthread.setDaemon(True)
dursleepthread.start()
time.sleep(1.0) #make sure thread is spun up
self.failIf(test_duration_sleep_done)
t = Time.from_sec(2000000.0)
_set_rostime(t)
time.sleep(0.5) #give sleeper time to wakeup
self.assert_(test_sleep_done, "sleeper did not wake up")
#start backwards sleeper
self.failIf(test_backwards_sleep_done)
backsleepthread = threading.Thread(target=backwards_sleeper, args=())
backsleepthread.setDaemon(True)
backsleepthread.start()
time.sleep(1.0) #make sure thread is spun up
self.failIf(test_backwards_sleep_done)
t = Time.from_sec(1.0)
_set_rostime(t)
time.sleep(0.5) #give sleeper time to wakeup
self.assert_(test_backwards_sleep_done, "backwards sleeper was not given an exception")
test_duration_sleep_done = False
def duration_sleeper():
global test_duration_sleep_done
rospy.sleep(rospy.Duration(10000.0))
test_duration_sleep_done = True
test_sleep_done = False
def sleeper():
global test_sleep_done
rospy.sleep(10000.0)
test_sleep_done = True
test_backwards_sleep_done = False
def backwards_sleeper():
global test_backwards_sleep_done
try:
rospy.sleep(10000.0)
except rospy.ROSException:
test_backwards_sleep_done = True
| |
"""Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..preprocessing import LabelBinarizer
from ..utils import check_consistent_length
from ..utils import deprecated
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import isclose
from ..utils.stats import rankdata
from .base import _average_binary_score
from .base import UndefinedMetricWarning
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
@deprecated("Function 'auc_score' has been renamed to "
"'roc_auc_score' and will be removed in release 0.16.")
def auc_score(y_true, y_score):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> auc_score(y_true, y_scores)
0.75
"""
return roc_auc_score(y_true, y_score)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds := len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (np.all(classes == [0, 1]) or
np.all(classes == [-1, 1]) or
np.all(classes == [0]) or
np.all(classes == [-1]) or
np.all(classes == [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# We need to use isclose to avoid spurious repeated thresholds
# stemming from floating point roundoff errors.
distinct_value_indices = np.where(np.logical_not(isclose(
np.diff(y_score), 0)))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = (y_true * weight).cumsum()[threshold_idxs]
if sample_weight is not None:
fps = weight.cumsum()[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds := len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class or confidence values.
pos_label : int
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator"
and not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += np.divide(L, rank, dtype=float).mean()
return out / n_samples
| |
# encoding=utf8
# Web Database Classification - hw2
# Advanced Database Systems
# Pedro Ferro Freitas - pff2108
# Roberto Jose de Amorim - rja2139
import urllib2
import base64
import json
import subprocess
from collections import defaultdict
from sys import argv
# bing = 'hTvGEgXTQ8lDLYr8nnHocn7n9GSwF5antgnogEhNDTc'
# t_es = 0.6
# t_ec = 100
# site = 'hardwarecentral.com'
bing = argv[1]
t_es = float(argv[2])
t_ec = int(argv[3])
site = argv[4]
def run_query(query):
# Execute query
query_url = urllib2.quote("'site:" + site + " " + query + "'")
bing_url = 'https://api.datamarket.azure.com/Bing/SearchWeb/v1/Composite?Query='+query_url+'&$top=4&$format=json'
account_key = bing
account_key_enc = base64.b64encode(account_key + ':' + account_key)
headers = {'Authorization': 'Basic ' + account_key_enc}
req = urllib2.Request(bing_url, headers=headers)
response = urllib2.urlopen(req)
content = response.read()
# content contains the xml/json response from Bing.
tree = json.loads(content)
return tree['d']['results'][0]['Web'], int(tree['d']['results'][0]['WebTotal'])
class DatabaseClassifier(object):
def __init__(self):
self.sum_coverage = defaultdict(int)
self.coverage = defaultdict(int)
self.specificity = defaultdict(int)
self.sum_coverage_sub = defaultdict(int)
self.coverage_sub = defaultdict(int)
self.specificity_sub = defaultdict(int)
self.category = ['Root']
self.probe_count = defaultdict(int)
self.url_list = defaultdict(list)
def process_root_list(self):
# Performs Bing searches for all keywords in the root file
winner = 'Root'
with open(winner.lower() + '.txt') as f:
i = 1
for line in f:
value = line.split(' ', 1)
cat = value[0]
current_query = value[1].strip()
result_top4, result_count = run_query(current_query)
self.probe_count[winner] += 1
self.coverage[cat] += result_count
self.sum_coverage[winner] += result_count
for result in result_top4:
# This stores the top four results for each query to later extract the summaries
self.url_list[(winner, i)].append(result['Url'].encode('utf-8'))
i += 1
sub_winner = self.process_sub_list(winner)
winner = (winner, sub_winner)
self.category = winner
def process_sub_list(self, parent):
# Performs Bing searches for all keywords in the category(ies) most significant in the previous searches
winner = []
for key in sorted(self.coverage.keys()):
result_coverage = self.coverage[key]
# Normalization to calculate specificity
result_specificity = float(result_coverage) / self.sum_coverage[parent]
self.specificity[key] = result_specificity
print 'Specificity for category ' + str(key) + ' is ' + str(result_specificity)
print 'Coverage for category ' + str(key) + ' is ' + str(result_coverage)
if result_specificity > t_es and result_coverage > t_ec:
# If specificity and coverage are larger than thresholds, we run the queries for that subcategory
self.sum_coverage_sub = defaultdict(int)
self.coverage_sub = defaultdict(int)
self.specificity_sub = defaultdict(int)
with open(key.lower() + '.txt') as f:
i = 1
for line in f:
value = line.split(' ', 1)
cat = value[0]
current_query = value[1].strip()
result_top4, result_count = run_query(current_query)
self.probe_count[key] += 1
self.coverage_sub[cat] += result_count
self.sum_coverage_sub[key] += result_count
for result in result_top4:
# This stores the top four results for each query to later extract the summaries
self.url_list[(key, i)].append(result['Url'].encode('utf-8'))
i += 1
sub_key = self.process_final_specificity(key)
key = (key, sub_key)
winner.append(key)
return winner
def process_final_specificity(self, parent):
# Calculates the specificity for the leaf categories
winner = []
for key in sorted(self.coverage_sub.keys()):
result_coverage = self.coverage_sub[key]
# Normalization to calculate specificity
result_specificity = self.specificity[parent] * result_coverage / self.sum_coverage_sub[parent]
self.specificity_sub[key] = result_specificity
print 'Specificity for category ' + str(key) + ' is ' + str(result_specificity)
print 'Coverage for category ' + str(key) + ' is ' + str(result_coverage)
if result_specificity > t_es and result_coverage > t_ec:
# If specificity and coverage are larger than thresholds, that leaf category is chosen for the database
winner.append(key)
return winner
def print_categories(self):
# Prints the detected category(ies) for each database
if len(self.category[1]) > 0:
for name1 in self.category[1]:
if len(name1[1]) > 0:
for name2 in name1[1]:
print self.category[0] + '/' + name1[0] + '/' + name2
else:
print self.category[0] + '/' + name1[0]
else:
print self.category[0]
class ContentSummarizer(object):
def __init__(self):
self.word_count = defaultdict(int)
self.word_count_sub = defaultdict(int)
self.url_list = defaultdict(list)
self.url_read = defaultdict(int)
self.categories = []
self.probe_count = defaultdict(int)
def load_classifier(self, classifier):
# Load relevant information form classifier to content summarizer
self.probe_count = classifier.probe_count
self.categories = classifier.category
self.url_list = classifier.url_list
def summary(self):
# This first part extracts summaries for the categories
for entry in self.categories[1]:
category = entry[0]
print '\nCreating Content Summary for: ' + category
self.word_count_sub = defaultdict(int)
for count in range(1, self.probe_count[category] + 1):
# Get the URL list of the top 4 resulta in each probe query
listing = self.url_list[(category, count)]
if listing:
print str(count) + '/' + str(self.probe_count[category])
for url in listing:
# For each URL, extract all text from the page
print '\tGetting page: ' + url
if self.url_read[url] == 0:
self.process_text(url, category == 'Root')
self.url_read[url] = 1
output_text_3 = file(category + '-' + site + '.txt', 'w')
for word in sorted(self.word_count_sub.keys()):
# Outputs the file as instructed in the assignment
output_text_3.write('%s#%i\n' % (word, self.word_count_sub[word]))
output_text_3.flush()
output_text_3.close()
# This second part extracts summaries for the root part
category = self.categories[0]
print '\nCreating Content Summary for: ' + category
for count in range(1, self.probe_count[category] + 1):
# Get the URL list of the top 4 resulta in each probe query
listing = self.url_list[(category, count)]
if listing:
print str(count) + '/' + str(self.probe_count[category])
for url in listing:
# For each URL, extract all text from the page
print '\tGetting page: ' + url
if self.url_read[url] == 0:
self.process_text(url, category == 'Root')
self.url_read[url] = 1
output_text_2 = file('Root' + '-' + site + '.txt', 'w')
for word in sorted(self.word_count.keys()):
# Outputs the file as instructed in the assignment
output_text_2.write('%s#%i\n' % (word, self.word_count[word]))
output_text_2.flush()
output_text_2.close()
def process_text(self, url, root_flag):
# lynx is used to extract text from HTML files, as instructed in the assignment
p = subprocess.Popen("lynx '" + url + "' --dump", stdout=subprocess.PIPE, shell=True)
(output, err) = p.communicate()
reading = True
for line in output.split('\n'):
if line == 'References':
# The References keywork means that the whole textual content has been processed already
break
if line:
current = 0
# This whole routine is meant to discard all information surrounded by brackets
if reading:
while line.find('[', current) > 0:
current = line.find('[', current)
if line.find(']', current) > 0:
c2 = line.find(']', current)
line = line[:current] + line[c2 + 1:]
else:
line = line[:current]
reading = False
else:
if line.find(']', current) > 0:
current = line.find(']', current)
line = line[current + 1:]
current = 0
reading = True
while line.find('[', current) > 0:
current = line.find('[', current)
if line.find(']', current) > 0:
c2 = line.find(']', current)
line = line[:current] + line[c2 + 1:]
else:
line = line[:current]
reading = False
else:
line = ''
# Non-alphabetic characters are considered word separators
if line:
new_line = ''
for character in line:
if character.isalpha():
new_line += character.lower()
else:
new_line += ' '
phrase = new_line.split()
for word in phrase:
# The routine that counts word frequencies
self.word_count[word] += 1
if root_flag is False:
self.word_count_sub[word] += 1
# Call the methods for database classification
print 'Classifying for website ' + site + '\n'
db_classifier = DatabaseClassifier()
db_classifier.process_root_list()
print '\n\nClassification for ' + site + ': '
db_classifier.print_categories()
# Call the methods for summary creation
print '\n\nExtracting topic content summaries...'
c_summarizer = ContentSummarizer()
c_summarizer.load_classifier(db_classifier)
c_summarizer.summary()
| |
from gym.envs.registration import registry, register, make, spec
# Algorithmic
# ----------------------------------------
register(
id='Copy-v0',
entry_point='gym.envs.algorithmic:CopyEnv',
timestep_limit=200,
reward_threshold=25.0,
)
register(
id='RepeatCopy-v0',
entry_point='gym.envs.algorithmic:RepeatCopyEnv',
timestep_limit=200,
reward_threshold=75.0,
)
register(
id='ReversedAddition-v0',
entry_point='gym.envs.algorithmic:ReversedAdditionEnv',
kwargs={'rows' : 2},
timestep_limit=200,
reward_threshold=25.0,
)
register(
id='ReversedAddition3-v0',
entry_point='gym.envs.algorithmic:ReversedAdditionEnv',
kwargs={'rows' : 3},
timestep_limit=200,
reward_threshold=25.0,
)
register(
id='DuplicatedInput-v0',
entry_point='gym.envs.algorithmic:DuplicatedInputEnv',
timestep_limit=200,
reward_threshold=9.0,
)
register(
id='Reverse-v0',
entry_point='gym.envs.algorithmic:ReverseEnv',
timestep_limit=200,
reward_threshold=25.0,
)
# Classic
# ----------------------------------------
register(
id='CartPole-v0',
entry_point='gym.envs.classic_control:CartPoleEnv',
timestep_limit=200,
reward_threshold=195.0,
)
register(
id='CartPole-v1',
entry_point='gym.envs.classic_control:CartPoleEnv',
timestep_limit=500,
reward_threshold=475.0,
)
register(
id='MountainCar-v0',
entry_point='gym.envs.classic_control:MountainCarEnv',
timestep_limit=200,
reward_threshold=-110.0,
)
register(
id='MountainCarContinuous-v0',
entry_point='gym.envs.classic_control:Continuous_MountainCarEnv',
timestep_limit=999,
reward_threshold=90.0,
)
register(
id='Pendulum-v0',
entry_point='gym.envs.classic_control:PendulumEnv',
timestep_limit=200,
)
register(
id='Acrobot-v1',
entry_point='gym.envs.classic_control:AcrobotEnv',
timestep_limit=500,
)
# Box2d
# ----------------------------------------
register(
id='LunarLander-v2',
entry_point='gym.envs.box2d:LunarLander',
timestep_limit=1000,
reward_threshold=200,
)
register(
id='LunarLanderContinuous-v2',
entry_point='gym.envs.box2d:LunarLanderContinuous',
timestep_limit=1000,
reward_threshold=200,
)
register(
id='BipedalWalker-v2',
entry_point='gym.envs.box2d:BipedalWalker',
timestep_limit=1600,
reward_threshold=300,
)
register(
id='BipedalWalkerHardcore-v2',
entry_point='gym.envs.box2d:BipedalWalkerHardcore',
timestep_limit=2000,
reward_threshold=300,
)
register(
id='CarRacing-v0',
entry_point='gym.envs.box2d:CarRacing',
timestep_limit=1000,
reward_threshold=900,
)
# Toy Text
# ----------------------------------------
register(
id='Blackjack-v0',
entry_point='gym.envs.toy_text:BlackjackEnv',
)
register(
id='FrozenLake-v0',
entry_point='gym.envs.toy_text:FrozenLakeEnv',
kwargs={'map_name' : '4x4'},
timestep_limit=100,
reward_threshold=0.78, # optimum = .8196
)
register(
id='FrozenLake8x8-v0',
entry_point='gym.envs.toy_text:FrozenLakeEnv',
kwargs={'map_name' : '8x8'},
timestep_limit=200,
reward_threshold=0.99, # optimum = 1
)
register(
id='NChain-v0',
entry_point='gym.envs.toy_text:NChainEnv',
timestep_limit=1000,
)
register(
id='Roulette-v0',
entry_point='gym.envs.toy_text:RouletteEnv',
timestep_limit=100,
)
register(
id='Taxi-v1',
entry_point='gym.envs.toy_text.taxi:TaxiEnv',
timestep_limit=200,
reward_threshold=9.7, # optimum = 10.2
)
register(
id='GuessingGame-v0',
entry_point='gym.envs.toy_text.guessing_game:GuessingGame',
timestep_limit=200,
)
register(
id='HotterColder-v0',
entry_point='gym.envs.toy_text.hotter_colder:HotterColder',
timestep_limit=200,
)
# Mujoco
# ----------------------------------------
# 2D
register(
id='Reacher-v1',
entry_point='gym.envs.mujoco:ReacherEnv',
timestep_limit=50,
reward_threshold=-3.75,
)
register(
id='InvertedPendulum-v1',
entry_point='gym.envs.mujoco:InvertedPendulumEnv',
reward_threshold=950.0,
)
register(
id='InvertedDoublePendulum-v1',
entry_point='gym.envs.mujoco:InvertedDoublePendulumEnv',
reward_threshold=9100.0,
)
register(
id='HalfCheetah-v1',
entry_point='gym.envs.mujoco:HalfCheetahEnv',
reward_threshold=4800.0,
)
register(
id='Hopper-v1',
entry_point='gym.envs.mujoco:HopperEnv',
reward_threshold=3800.0,
)
register(
id='Swimmer-v1',
entry_point='gym.envs.mujoco:SwimmerEnv',
reward_threshold=360.0,
)
register(
id='Walker2d-v1',
entry_point='gym.envs.mujoco:Walker2dEnv',
)
register(
id='Ant-v1',
entry_point='gym.envs.mujoco:AntEnv',
reward_threshold=6000.0,
)
register(
id='Humanoid-v1',
entry_point='gym.envs.mujoco:HumanoidEnv',
)
register(
id='HumanoidStandup-v1',
entry_point='gym.envs.mujoco:HumanoidStandupEnv',
)
# Atari
# ----------------------------------------
# # print ', '.join(["'{}'".format(name.split('.')[0]) for name in atari_py.list_games()])
for game in ['air_raid', 'alien', 'amidar', 'assault', 'asterix', 'asteroids', 'atlantis',
'bank_heist', 'battle_zone', 'beam_rider', 'berzerk', 'bowling', 'boxing', 'breakout', 'carnival',
'centipede', 'chopper_command', 'crazy_climber', 'demon_attack', 'double_dunk',
'elevator_action', 'enduro', 'fishing_derby', 'freeway', 'frostbite', 'gopher', 'gravitar',
'ice_hockey', 'jamesbond', 'journey_escape', 'kangaroo', 'krull', 'kung_fu_master',
'montezuma_revenge', 'ms_pacman', 'name_this_game', 'phoenix', 'pitfall', 'pong', 'pooyan',
'private_eye', 'qbert', 'riverraid', 'road_runner', 'robotank', 'seaquest', 'skiing',
'solaris', 'space_invaders', 'star_gunner', 'tennis', 'time_pilot', 'tutankham', 'up_n_down',
'venture', 'video_pinball', 'wizard_of_wor', 'yars_revenge', 'zaxxon']:
for obs_type in ['image', 'ram']:
# space_invaders should yield SpaceInvaders-v0 and SpaceInvaders-ram-v0
name = ''.join([g.capitalize() for g in game.split('_')])
if obs_type == 'ram':
name = '{}-ram'.format(name)
nondeterministic = False
if game == 'elevator_action' and obs_type == 'ram':
# ElevatorAction-ram-v0 seems to yield slightly
# non-deterministic observations about 10% of the time. We
# should track this down eventually, but for now we just
# mark it as nondeterministic.
nondeterministic = True
register(
id='{}-v0'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type},
timestep_limit=10000,
nondeterministic=nondeterministic,
)
if game == 'space_invaders':
frameskip = 3
else:
frameskip = 4
register(
# Use a deterministic frame skip.
id='{}Deterministic-v0'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'frameskip': frameskip},
timestep_limit=10000,
nondeterministic=nondeterministic,
)
# Board games
# ----------------------------------------
register(
id='Go9x9-v0',
entry_point='gym.envs.board_game:GoEnv',
kwargs={
'player_color': 'black',
'opponent': 'pachi:uct:_2400',
'observation_type': 'image3c',
'illegal_move_mode': 'lose',
'board_size': 9,
},
# The pachi player seems not to be determistic given a fixed seed.
# (Reproduce by running 'import gym; h = gym.make('Go9x9-v0'); h.seed(1); h.reset(); h.step(15); h.step(16); h.step(17)' a few times.)
#
# This is probably due to a computation time limit.
nondeterministic=True,
)
register(
id='Go19x19-v0',
entry_point='gym.envs.board_game:GoEnv',
kwargs={
'player_color': 'black',
'opponent': 'pachi:uct:_2400',
'observation_type': 'image3c',
'illegal_move_mode': 'lose',
'board_size': 19,
},
nondeterministic=True,
)
register(
id='Hex9x9-v0',
entry_point='gym.envs.board_game:HexEnv',
kwargs={
'player_color': 'black',
'opponent': 'random',
'observation_type': 'numpy3c',
'illegal_move_mode': 'lose',
'board_size': 9,
},
)
# Soccer
# ----------------------------------------
register(
id='Soccer-v0',
entry_point='gym.envs.soccer:SoccerEnv',
timestep_limit=1000,
reward_threshold=1.0,
nondeterministic=True,
)
register(
id='SoccerEmptyGoal-v0',
entry_point='gym.envs.soccer:SoccerEmptyGoalEnv',
timestep_limit=1000,
reward_threshold=10.0,
nondeterministic=True,
)
register(
id='SoccerAgainstKeeper-v0',
entry_point='gym.envs.soccer:SoccerAgainstKeeperEnv',
timestep_limit=1000,
reward_threshold=8.0,
nondeterministic=True,
)
# Debugging
# ----------------------------------------
register(
id='OneRoundDeterministicReward-v0',
entry_point='gym.envs.debugging:OneRoundDeterministicRewardEnv',
local_only=True
)
register(
id='TwoRoundDeterministicReward-v0',
entry_point='gym.envs.debugging:TwoRoundDeterministicRewardEnv',
local_only=True
)
register(
id='OneRoundNondeterministicReward-v0',
entry_point='gym.envs.debugging:OneRoundNondeterministicRewardEnv',
local_only=True
)
register(
id='TwoRoundNondeterministicReward-v0',
entry_point='gym.envs.debugging:TwoRoundNondeterministicRewardEnv',
local_only=True,
)
# Parameter tuning
# ----------------------------------------
register(
id='ConvergenceControl-v0',
entry_point='gym.envs.parameter_tuning:ConvergenceControl',
)
register(
id='CNNClassifierTraining-v0',
entry_point='gym.envs.parameter_tuning:CNNClassifierTraining',
)
# Safety
# ----------------------------------------
# interpretability envs
register(
id='PredictActionsCartpole-v0',
entry_point='gym.envs.safety:PredictActionsCartpoleEnv',
timestep_limit=200,
)
register(
id='PredictObsCartpole-v0',
entry_point='gym.envs.safety:PredictObsCartpoleEnv',
timestep_limit=200,
)
# semi_supervised envs
# probably the easiest:
register(
id='SemisuperPendulumNoise-v0',
entry_point='gym.envs.safety:SemisuperPendulumNoiseEnv',
timestep_limit=200,
)
# somewhat harder because of higher variance:
register(
id='SemisuperPendulumRandom-v0',
entry_point='gym.envs.safety:SemisuperPendulumRandomEnv',
timestep_limit=200,
)
# probably the hardest because you only get a constant number of rewards in total:
register(
id='SemisuperPendulumDecay-v0',
entry_point='gym.envs.safety:SemisuperPendulumDecayEnv',
timestep_limit=200,
)
# off_switch envs
register(
id='OffSwitchCartpole-v0',
entry_point='gym.envs.safety:OffSwitchCartpoleEnv',
timestep_limit=200,
)
register(
id='OffSwitchCartpoleProb-v0',
entry_point='gym.envs.safety:OffSwitchCartpoleProbEnv',
timestep_limit=200,
)
| |
import os
import sys
import json
import tempfile
import shutil
import utils
import logging
from urlparse import urlparse
from zips import UnzipUtil
from hashes import HashUtil
from cache import DirectoryCacheManager
from downloads import Downloader
from downloads import CurlDownloader
from utils import safe_makedirs
_log = logging.getLogger('cloudfoundry')
class CloudFoundryUtil(object):
@staticmethod
def initialize():
# Open stdout unbuffered
if hasattr(sys.stdout, 'fileno'):
sys.stdout = os.fdopen(sys.stdout.fileno(), 'wb', 0)
ctx = utils.FormattedDict()
# Add environment variables
ctx.update(os.environ)
# Convert JSON env variables
ctx['VCAP_APPLICATION'] = json.loads(ctx.get('VCAP_APPLICATION',
'{}',
format=False))
ctx['VCAP_SERVICES'] = json.loads(ctx.get('VCAP_SERVICES',
'{}',
format=False))
# Build Pack Location
ctx['BP_DIR'] = os.path.dirname(os.path.dirname(sys.argv[0]))
# User's Application Files, build droplet here
ctx['BUILD_DIR'] = sys.argv[1]
# Cache space for the build pack
ctx['CACHE_DIR'] = (len(sys.argv) == 3) and sys.argv[2] or None
# Temp space
if 'TMPDIR' not in ctx.keys():
ctx['TMPDIR'] = tempfile.gettempdir()
# Make sure cache & build directories exist
if not os.path.exists(ctx['BUILD_DIR']):
os.makedirs(ctx['BUILD_DIR'])
if ctx['CACHE_DIR'] and not os.path.exists(ctx['CACHE_DIR']):
os.makedirs(ctx['CACHE_DIR'])
# Add place holder for extensions
ctx['EXTENSIONS'] = []
# Init Logging
CloudFoundryUtil.init_logging(ctx)
_log.info('CloudFoundry Initialized.')
_log.debug("CloudFoundry Context Setup [%s]", ctx)
return ctx
@staticmethod
def init_logging(ctx):
logFmt = '%(asctime)s [%(levelname)s] %(name)s - %(message)s'
if ctx.get('BP_DEBUG', False):
logging.basicConfig(level=logging.DEBUG, format=logFmt)
else:
logLevelStr = ctx.get('BP_LOG_LEVEL', 'INFO')
logLevel = getattr(logging, logLevelStr, logging.INFO)
logDir = os.path.join(ctx['BUILD_DIR'], '.bp', 'logs')
safe_makedirs(logDir)
logging.basicConfig(level=logLevel, format=logFmt,
filename=os.path.join(logDir, 'bp.log'))
@staticmethod
def load_json_config_file_from(folder, cfgFile):
return CloudFoundryUtil.load_json_config_file(os.path.join(folder,
cfgFile))
@staticmethod
def load_json_config_file(cfgPath):
if os.path.exists(cfgPath):
_log.debug("Loading config from [%s]", cfgPath)
with open(cfgPath, 'rt') as cfgFile:
return json.load(cfgFile)
return {}
class CloudFoundryInstaller(object):
def __init__(self, ctx):
self._log = _log
self._ctx = ctx
self._unzipUtil = UnzipUtil(ctx)
self._hashUtil = HashUtil(ctx)
self._dcm = DirectoryCacheManager(ctx)
self._dwn = self._get_downloader(ctx)(ctx)
def _get_downloader(self, ctx):
method = ctx.get('DOWNLOAD_METHOD', 'python')
if method == 'python':
self._log.debug('Using python downloader.')
return Downloader
elif method == 'curl':
self._log.debug('Using cURL downloader.')
return CurlDownloader
elif method == 'custom':
fullClsName = ctx['DOWNLOAD_CLASS']
self._log.debug('Using custom downloader [%s].', fullClsName)
dotLoc = fullClsName.rfind('.')
if dotLoc >= 0:
clsName = fullClsName[dotLoc + 1: len(fullClsName)]
modName = fullClsName[0:dotLoc]
m = __import__(modName, globals(), locals(), [clsName])
try:
return getattr(m, clsName)
except AttributeError:
self._log.exception(
'WARNING: DOWNLOAD_CLASS not found!')
else:
self._log.error(
'WARNING: DOWNLOAD_CLASS invalid, must include '
'package name!')
return Downloader
def _is_url(self, val):
return urlparse(val).scheme != ''
def install_binary_direct(self, url, hsh, installDir,
fileName=None, strip=False):
self._log.debug("Installing direct [%s]", url)
if not fileName:
fileName = url.split('/')[-1]
if self._is_url(hsh):
digest = self._dwn.download_direct(hsh)
else:
digest = hsh
self._log.debug(
"Installing [%s] with digest [%s] into [%s] with "
"name [%s] stripping [%s]",
url, digest, installDir, fileName, strip)
fileToInstall = self._dcm.get(fileName, digest)
if fileToInstall is None:
self._log.debug('File [%s] not in cache.', fileName)
fileToInstall = os.path.join(self._ctx['TMPDIR'], fileName)
self._dwn.download(url, fileToInstall)
digest = self._hashUtil.calculate_hash(fileToInstall)
fileToInstall = self._dcm.put(fileName, fileToInstall, digest)
return self._unzipUtil.extract(fileToInstall,
installDir,
strip)
def install_binary(self, installKey):
self._log.debug('Installing [%s]', installKey)
url = self._ctx['%s_DOWNLOAD_URL' % installKey]
hashUrl = self._ctx.get(
'%s_HASH_DOWNLOAD_URL' % installKey,
"%s.%s" % (url, self._ctx['CACHE_HASH_ALGORITHM']))
installDir = os.path.join(self._ctx['BUILD_DIR'],
self._ctx.get(
'%s_PACKAGE_INSTALL_DIR' % installKey,
installKey.lower()))
strip = self._ctx.get('%s_STRIP' % installKey, False)
return self.install_binary_direct(url, hashUrl, installDir,
strip=strip)
def _install_from(self, fromPath, fromLoc, toLocation=None, ignore=None):
"""Copy file or directory from a location to the droplet
Copies a file or directory from a location to the application
droplet. Directories are copied recursively, but specific files
in those directories can be ignored by specifing the ignore parameter.
fromPath -> file to copy, relative build pack
fromLoc -> root of the from path. Full path to file or
directory to be copied is fromLoc + fromPath
toLocation -> optional location where to copy the file
relative to app droplet. If not specified
uses fromPath.
ignore -> an optional callable that is passed to
the ignore argument of shutil.copytree.
"""
self._log.debug("Install file [%s] from [%s]", fromPath, fromLoc)
fullPathFrom = os.path.join(fromLoc, fromPath)
if os.path.exists(fullPathFrom):
fullPathTo = os.path.join(
self._ctx['BUILD_DIR'],
((toLocation is None) and fromPath or toLocation))
safe_makedirs(os.path.dirname(fullPathTo))
self._log.debug("Copying [%s] to [%s]", fullPathFrom, fullPathTo)
if os.path.isfile(fullPathFrom):
shutil.copy(fullPathFrom, fullPathTo)
else:
utils.copytree(fullPathFrom, fullPathTo, ignore=ignore)
def install_from_build_pack(self, fromPath, toLocation=None, ignore=None):
"""Copy file or directory from the build pack to the droplet
Copies a file or directory from the build pack to the application
droplet. Directories are copied recursively, but specific files
in those directories can be ignored by specifing the ignore parameter.
fromPath -> file to copy, relative build pack
toLocation -> optional location where to copy the file
relative to app droplet. If not specified
uses fromPath.
ignore -> an optional callable that is passed to
the ignore argument of shutil.copytree.
"""
self._install_from(
fromPath,
self._ctx['BP_DIR'],
toLocation,
ignore)
def install_from_application(self, fromPath, toLocation, ignore=None):
"""Copy file or directory from one place to another in the application
Copies a file or directory from one place to another place within the
application droplet.
fromPath -> file or directory to copy, relative
to application droplet.
toLocation -> location where to copy the file,
relative to app droplet.
ignore -> optional callable that is passed to the
ignore argument of shutil.copytree
"""
self._install_from(
fromPath,
self._ctx['BUILD_DIR'],
toLocation,
ignore)
| |
""" Change index / Reorder / Reverse order of limits of Sums and Products"""
from __future__ import print_function, division
from sympy.concrete import Product, Sum
from sympy import S
class ReorderError(NotImplementedError):
"""
Exception raised when trying to reorder dependent limits.
"""
def __init__(self, expr, msg):
super(ReorderError, self).__init__(
"%s could not be reordered: %s." % (expr, msg))
def index(expr, x):
"""
Return the index of a limit variable.
Usage
=====
``index(expr, x)`` returns the index of the limit variable ``x`` in the
limits of ``expr``. Note that we start counting with 0 at the inner-most
limits tuple.
Examples
========
>>> from sympy.concrete.simplification import index
>>> from sympy.abc import x, y, a, b, c, d
>>> from sympy import Sum, Product
>>> index(Sum(x*y, (x, a, b), (y, c, d)), x)
0
>>> index(Sum(x*y, (x, a, b), (y, c, d)), y)
1
>>> index(Product(x*y, (x, a, b), (y, c, d)), x)
0
>>> index(Product(x*y, (x, a, b), (y, c, d)), y)
1
See Also
========
sympy.concrete.simplification.change_index,
sympy.concrete.simplification.reorder_limit,
sympy.concrete.simplification.reorder,
sympy.concrete.simplification.reverse_order
"""
if isinstance(expr, Sum) or isinstance(expr, Product):
variables = [limit[0] for limit in expr.limits]
if variables.count(x) != 1:
raise ValueError(expr, "Number of instances of variable not equal to one")
else:
return variables.index(x)
def change_index(expr, var, trafo, newvar=None):
"""
Change index of a Sum or Product.
Perform a linear transformation `x \mapsto a x + b` on the index variable
`x`. For `a` the only values allowed are `\pm 1`. A new variable to be used
after the change of index can also be specified.
Usage
=====
``change_index(expr, var, trafo, newvar=None)`` where ``var`` specifies the
index variable `x` to transform. The transformation ``trafo`` must be linear
and given in terms of ``var``. If the optional argument ``newvar`` is
provided then ``var`` gets replaced by ``newvar`` in the final expression.
Examples
========
>>> from sympy.concrete.simplification import change_index
>>> from sympy import Sum, Product, simplify
>>> from sympy.abc import x, y, a, b, c, d, u, v, i, j, k, l
>>> S = Sum(x, (x, a, b))
>>> S.doit()
-a**2/2 + a/2 + b**2/2 + b/2
>>> Sn = change_index(S, x, x + 1, y)
>>> Sn
Sum(y - 1, (y, a + 1, b + 1))
>>> Sn.doit()
-a**2/2 + a/2 + b**2/2 + b/2
>>> Sn = change_index(S, x, -x, y)
>>> Sn
Sum(-y, (y, -b, -a))
>>> Sn.doit()
-a**2/2 + a/2 + b**2/2 + b/2
>>> Sn = change_index(S, x, x+u)
>>> Sn
Sum(-u + x, (x, a + u, b + u))
>>> Sn.doit()
-a**2/2 - a*u + a/2 + b**2/2 + b*u + b/2 - u*(-a + b + 1) + u
>>> simplify(Sn.doit())
-a**2/2 + a/2 + b**2/2 + b/2
>>> Sn = change_index(S, x, -x - u, y)
>>> Sn
Sum(-u - y, (y, -b - u, -a - u))
>>> Sn.doit()
-a**2/2 - a*u + a/2 + b**2/2 + b*u + b/2 - u*(-a + b + 1) + u
>>> simplify(Sn.doit())
-a**2/2 + a/2 + b**2/2 + b/2
>>> P = Product(i*j**2, (i, a, b), (j, c, d))
>>> P
Product(i*j**2, (i, a, b), (j, c, d))
>>> P2 = change_index(P, i, i+3, k)
>>> P2
Product(j**2*(k - 3), (k, a + 3, b + 3), (j, c, d))
>>> P3 = change_index(P2, j, -j, l)
>>> P3
Product(l**2*(k - 3), (k, a + 3, b + 3), (l, -d, -c))
When dealing with symbols only, we can make a
general linear transformation:
>>> Sn = change_index(S, x, u*x+v, y)
>>> Sn
Sum((-v + y)/u, (y, b*u + v, a*u + v))
>>> Sn.doit()
-v*(a*u - b*u + 1)/u + (a**2*u**2/2 + a*u*v + a*u/2 - b**2*u**2/2 - b*u*v + b*u/2 + v)/u
>>> simplify(Sn.doit())
a**2*u/2 + a/2 - b**2*u/2 + b/2
However, the last result can be inconsistent with usual
summation where the index increment is always 1. This is
obvious as we get back the original value only for ``u``
equal +1 or -1.
See Also
========
sympy.concrete.simplification.index,
sympy.concrete.simplification.reorder_limit,
sympy.concrete.simplification.reorder,
sympy.concrete.simplification.reverse_order
"""
if newvar is None:
newvar = var
limits = []
for limit in expr.limits:
if limit[0] == var:
p = trafo.as_poly(var)
if p.degree() != 1:
raise ValueError("Index transformation is not linear")
alpha = p.coeff_monomial(var)
beta = p.coeff_monomial(S.One)
if alpha.is_number:
if alpha == S.One:
limits.append((newvar, alpha*limit[1] + beta, alpha*limit[2] + beta))
elif alpha == S.NegativeOne:
limits.append((newvar, alpha*limit[2] + beta, alpha*limit[1] + beta))
else:
raise ValueError("Linear transformation results in non-linear summation stepsize")
else:
# Note that the case of alpha being symbolic can give issues if alpha < 0.
limits.append((newvar, alpha*limit[2] + beta, alpha*limit[1] + beta))
else:
limits.append(limit)
function = expr.function.subs(var, (var - beta)/alpha)
function = function.subs(var, newvar)
if isinstance(expr, Sum):
return Sum(function, *tuple(limits))
elif isinstance(expr, Product):
return Product(function, *tuple(limits))
else:
raise NotImplementedError(expr, "change_index only implemented for Sum and Product")
def reorder(expr, *arg):
"""
Reorder limits in a expression containing a Sum or a Product.
Usage
=====
``reorder(expr, *arg)`` reorders the limits in the expression ``expr``
according to the list of tuples given by ``arg``. These tuples can
contain numerical indices or index variable names or involve both.
Examples
========
>>> from sympy.concrete.simplification import reorder
>>> from sympy import Sum, Product
>>> from sympy.abc import x, y, z, a, b, c, d, e, f
>>> reorder(Sum(x*y, (x, a, b), (y, c, d)), (x, y))
Sum(x*y, (y, c, d), (x, a, b))
>>> reorder(Sum(x*y*z, (x, a, b), (y, c, d), (z, e, f)), (x, y), (x, z), (y, z))
Sum(x*y*z, (z, e, f), (y, c, d), (x, a, b))
>>> P = Product(x*y*z, (x, a, b), (y, c, d), (z, e, f))
>>> reorder(P, (x, y), (x, z), (y, z))
Product(x*y*z, (z, e, f), (y, c, d), (x, a, b))
We can also select the index variables by counting them, starting
with the inner-most one:
>>> reorder(Sum(x**2, (x, a, b), (x, c, d)), (0, 1))
Sum(x**2, (x, c, d), (x, a, b))
And of course we can mix both schemes:
>>> reorder(Sum(x*y, (x, a, b), (y, c, d)), (y, x))
Sum(x*y, (y, c, d), (x, a, b))
>>> reorder(Sum(x*y, (x, a, b), (y, c, d)), (y, 0))
Sum(x*y, (y, c, d), (x, a, b))
See Also
========
sympy.concrete.simplification.index,
sympy.concrete.simplification.change_index,
sympy.concrete.simplification.reorder_limit,
sympy.concrete.simplification.reverse_order
"""
new_expr = expr
for r in arg:
if len(r) != 2:
raise ValueError(r, "Invalid number of arguments")
index1 = r[0]
index2 = r[1]
if not isinstance(r[0], int):
index1 = index(expr, r[0])
if not isinstance(r[1], int):
index2 = index(expr, r[1])
new_expr = reorder_limit(new_expr, index1, index2)
return new_expr
def reorder_limit(expr, x, y):
"""
Interchange two limit tuples of a Sum or Product expression.
Usage
=====
``reorder_limit(expr, x, y)`` interchanges two limit tuples. The
arguments ``x`` and ``y`` are integers corresponding to the index
variables of the two limits which are to be interchanged. The
expression ``expr`` has to be either a Sum or a Product.
Examples
========
>>> from sympy.concrete.simplification import reorder_limit
>>> from sympy.abc import x, y, z, a, b, c, d, e, f
>>> from sympy import Sum, Product
>>> reorder_limit(Sum(x*y*z, (x, a, b), (y, c, d), (z, e, f)), 0, 2)
Sum(x*y*z, (z, e, f), (y, c, d), (x, a, b))
>>> reorder_limit(Sum(x**2, (x, a, b), (x, c, d)), 1, 0)
Sum(x**2, (x, c, d), (x, a, b))
>>> reorder_limit(Product(x*y*z, (x, a, b), (y, c, d), (z, e, f)), 0, 2)
Product(x*y*z, (z, e, f), (y, c, d), (x, a, b))
See Also
========
sympy.concrete.simplification.index,
sympy.concrete.simplification.change_index,
sympy.concrete.simplification.reorder,
sympy.concrete.simplification.reverse_order
"""
var = set([limit[0] for limit in expr.limits])
limit_x = expr.limits[x]
limit_y = expr.limits[y]
if (len(set(limit_x[1].free_symbols).intersection(var)) == 0 and
len(set(limit_x[2].free_symbols).intersection(var)) == 0 and
len(set(limit_y[1].free_symbols).intersection(var)) == 0 and
len(set(limit_y[2].free_symbols).intersection(var)) == 0):
limits = []
for i, limit in enumerate(expr.limits):
if i == x:
limits.append(limit_y)
elif i == y:
limits.append(limit_x)
else:
limits.append(limit)
if isinstance(expr, Sum):
return Sum(expr.function, *limits)
elif isinstance(expr, Product):
return Product(expr.function, *limits)
else:
raise NotImplementedError(expr, "reorder only implemented for Sum and Product")
else:
raise ReorderError(expr, "could not interchange the two limits specified")
def reverse_order(expr, *indices):
"""
Reverse the order of a limit in a Sum or Product.
Usage
=====
``reverse_order(expr, *indices)`` reverses some limits in the expression
``expr`` which can be either a ``Sum`` or a ``Product``. The selectors in
the argument ``indices`` specify some indices whose limits get reversed.
These selectors are either variable names or numerical indices counted
starting from the inner-most limit tuple.
Examples
========
>>> from sympy.concrete.simplification import reverse_order
>>> from sympy import Sum
>>> from sympy.abc import x, y, a, b, c, d
>>> reverse_order(Sum(x, (x, 0, 3)), x)
Sum(-x, (x, 4, -1))
>>> reverse_order(Sum(x*y, (x, 1, 5), (y, 0, 6)), x, y)
Sum(x*y, (x, 6, 0), (y, 7, -1))
>>> reverse_order(Sum(x, (x, a, b)), x)
Sum(-x, (x, b + 1, a - 1))
>>> reverse_order(Sum(x, (x, a, b)), 0)
Sum(-x, (x, b + 1, a - 1))
>>> from sympy import Product, simplify, RisingFactorial, gamma
>>> P = Product(x, (x, a, b))
>>> Pr = reverse_order(P, x)
>>> Pr
Product(1/x, (x, b + 1, a - 1))
>>> Pr = Pr.doit()
>>> Pr
1/RisingFactorial(b + 1, a - b - 1)
>>> simplify(Pr)
gamma(b + 1)/gamma(a)
>>> P = P.doit()
>>> P
RisingFactorial(a, -a + b + 1)
>>> simplify(P)
gamma(b + 1)/gamma(a)
While one should prefer variable names when specifying which limits
to reverse, the index counting notation comes in handy in case there
are several symbols with the same name.
>>> S = Sum(x**2, (x, a, b), (x, c, d))
>>> S
Sum(x**2, (x, a, b), (x, c, d))
>>> S0 = reverse_order(S, 0)
>>> S0
Sum(-x**2, (x, b + 1, a - 1), (x, c, d))
>>> S1 = reverse_order(S0, 1)
>>> S1
Sum(x**2, (x, b + 1, a - 1), (x, d + 1, c - 1))
Of course we can mix both notations:
>>> reverse_order(Sum(x*y, (x, a, b), (y, 2, 5)), x, 1)
Sum(x*y, (x, b + 1, a - 1), (y, 6, 1))
>>> reverse_order(Sum(x*y, (x, a, b), (y, 2, 5)), y, x)
Sum(x*y, (x, b + 1, a - 1), (y, 6, 1))
See Also
========
sympy.concrete.simplification.index,
sympy.concrete.simplification.change_index,
sympy.concrete.simplification.reorder_limit,
sympy.concrete.simplification.reorder
References
==========
.. [1] Michael Karr, "Summation in Finite Terms", Journal of the ACM,
Volume 28 Issue 2, April 1981, Pages 305-350
http://dl.acm.org/citation.cfm?doid=322248.322255
"""
l_indices = list(indices)
for i, indx in enumerate(l_indices):
if not isinstance(indx, int):
l_indices[i] = index(expr, indx)
if isinstance(expr, Sum) or isinstance(expr, Product):
e = 1
limits = []
for i, limit in enumerate(expr.limits):
l = limit
if i in l_indices:
e = -e
l = (limit[0], limit[2] + 1 , limit[1] - 1)
limits.append(l)
if isinstance(expr, Sum):
return Sum(e * expr.function, *limits)
elif isinstance(expr, Product):
return Product(expr.function ** e, *limits)
else:
return expr
| |
"""
The "engine room" of django mailer.
Methods here actually handle the sending of queued messages.
"""
from django.utils.encoding import smart_str
from django_mailer import constants, models, settings
from lockfile import FileLock, AlreadyLocked, LockTimeout
from socket import error as SocketError
import logging
import smtplib
import tempfile
import time
import os
if constants.EMAIL_BACKEND_SUPPORT:
from django.core.mail import get_connection
else:
from django.core.mail import SMTPConnection as get_connection
LOCK_PATH = settings.LOCK_PATH or os.path.join(tempfile.gettempdir(),
'send_mail')
logger = logging.getLogger('django_mailer.engine')
def _message_queue(block_size):
"""
A generator which iterates queued messages in blocks so that new
prioritised messages can be inserted during iteration of a large number of
queued messages.
To avoid an infinite loop, yielded messages *must* be deleted or deferred.
"""
def get_block():
queue = models.QueuedMessage.objects.non_deferred().select_related()
if block_size:
queue = queue[:block_size]
return queue
queue = get_block()
while queue:
for message in queue:
yield message
queue = get_block()
def send_all(block_size=500, backend=None):
"""
Send all non-deferred messages in the queue.
A lock file is used to ensure that this process can not be started again
while it is already running.
The ``block_size`` argument allows for queued messages to be iterated in
blocks, allowing new prioritised messages to be inserted during iteration
of a large number of queued messages.
"""
lock = FileLock(LOCK_PATH)
logger.debug("Acquiring lock...")
try:
# lockfile has a bug dealing with a negative LOCK_WAIT_TIMEOUT (which
# is the default if it's not provided) systems which use a LinkFileLock
# so ensure that it is never a negative number.
lock.acquire(settings.LOCK_WAIT_TIMEOUT or 0)
#lock.acquire(settings.LOCK_WAIT_TIMEOUT)
except AlreadyLocked:
logger.debug("Lock already in place. Exiting.")
return
except LockTimeout:
logger.debug("Waiting for the lock timed out. Exiting.")
return
logger.debug("Lock acquired.")
start_time = time.time()
sent = deferred = skipped = 0
try:
if constants.EMAIL_BACKEND_SUPPORT:
connection = get_connection(backend=backend)
else:
connection = get_connection()
blacklist = models.Blacklist.objects.values_list('email', flat=True)
connection.open()
for message in _message_queue(block_size):
result = send_queued_message(message, smtp_connection=connection,
blacklist=blacklist)
if result == constants.RESULT_SENT:
sent += 1
elif result == constants.RESULT_FAILED:
deferred += 1
elif result == constants.RESULT_SKIPPED:
skipped += 1
connection.close()
finally:
logger.debug("Releasing lock...")
lock.release()
logger.debug("Lock released.")
logger.debug("")
if sent or deferred or skipped:
log = logger.warning
else:
log = logger.info
log("%s sent, %s deferred, %s skipped." % (sent, deferred, skipped))
logger.debug("Completed in %.2f seconds." % (time.time() - start_time))
def send_loop(empty_queue_sleep=None):
"""
Loop indefinitely, checking queue at intervals and sending and queued
messages.
The interval (in seconds) can be provided as the ``empty_queue_sleep``
argument. The default is attempted to be retrieved from the
``MAILER_EMPTY_QUEUE_SLEEP`` setting (or if not set, 30s is used).
"""
empty_queue_sleep = empty_queue_sleep or settings.EMPTY_QUEUE_SLEEP
while True:
while not models.QueuedMessage.objects.all():
logger.debug("Sleeping for %s seconds before checking queue "
"again." % empty_queue_sleep)
time.sleep(empty_queue_sleep)
send_all()
def send_queued_message(queued_message, smtp_connection=None, blacklist=None,
log=True):
"""
Send a queued message, returning a response code as to the action taken.
The response codes can be found in ``django_mailer.constants``. The
response will be either ``RESULT_SKIPPED`` for a blacklisted email,
``RESULT_FAILED`` for a deferred message or ``RESULT_SENT`` for a
successful sent message.
To allow optimizations if multiple messages are to be sent, an SMTP
connection can be provided and a list of blacklisted email addresses.
Otherwise an SMTP connection will be opened to send this message and the
email recipient address checked against the ``Blacklist`` table.
If the message recipient is blacklisted, the message will be removed from
the queue without being sent. Otherwise, the message is attempted to be
sent with an SMTP failure resulting in the message being flagged as
deferred so it can be tried again later.
By default, a log is created as to the action. Either way, the original
message is not deleted.
"""
message = queued_message.message
if smtp_connection is None:
smtp_connection = get_connection()
opened_connection = False
if blacklist is None:
blacklisted = models.Blacklist.objects.filter(email=message.to_address)
else:
blacklisted = message.to_address in blacklist
log_message = ''
if blacklisted:
logger.info("Not sending to blacklisted email: %s" %
message.to_address.encode("utf-8"))
queued_message.delete()
result = constants.RESULT_SKIPPED
else:
try:
logger.info("Sending message to %s: %s" %
(message.to_address.encode("utf-8"),
message.subject.encode("utf-8")))
opened_connection = smtp_connection.open()
smtp_connection.connection.sendmail(message.from_address,
[message.to_address],
smart_str(message.encoded_message))
queued_message.delete()
result = constants.RESULT_SENT
except (SocketError, smtplib.SMTPSenderRefused,
smtplib.SMTPRecipientsRefused,
smtplib.SMTPAuthenticationError), err:
queued_message.defer()
logger.warning("Message to %s deferred due to failure: %s" %
(message.to_address.encode("utf-8"), err))
log_message = unicode(err)
result = constants.RESULT_FAILED
if log:
models.Log.objects.create(message=message, result=result,
log_message=log_message)
if opened_connection:
smtp_connection.close()
return result
def send_message(email_message, smtp_connection=None):
"""
Send an EmailMessage, returning a response code as to the action taken.
The response codes can be found in ``django_mailer.constants``. The
response will be either ``RESULT_FAILED`` for a failed send or
``RESULT_SENT`` for a successfully sent message.
To allow optimizations if multiple messages are to be sent, an SMTP
connection can be provided. Otherwise an SMTP connection will be opened
to send this message.
This function does not perform any logging or queueing.
"""
if smtp_connection is None:
smtp_connection = get_connection()
opened_connection = False
try:
opened_connection = smtp_connection.open()
smtp_connection.connection.sendmail(email_message.from_email,
email_message.recipients(),
email_message.message().as_string())
result = constants.RESULT_SENT
except (SocketError, smtplib.SMTPSenderRefused,
smtplib.SMTPRecipientsRefused,
smtplib.SMTPAuthenticationError):
result = constants.RESULT_FAILED
if opened_connection:
smtp_connection.close()
return result
| |
# -*- coding: utf-8 -*-
from tests import settings
from .resources import (PreAuthorization, PreAuthorizedPayIn)
from .test_base import BaseTest
from mangopay.utils import Money
from datetime import date
import responses
import time
class PreAuthorizationsTest(BaseTest):
@responses.activate
def test_create_preauthorization(self):
self.mock_natural_user()
self.mock_card()
self.register_mock([
{
'method': responses.GET,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/users/1167495',
'body': {
"FirstName": "Victor",
"LastName": "Hugo",
"Address": {
"AddressLine1": "AddressLine1",
"AddressLine2": "AddressLine2",
"City": "City",
"Region": "Region",
"PostalCode": "11222",
"Country": "FR"
},
"Birthday": int(time.mktime(date.today().timetuple())),
"Nationality": "FR",
"CountryOfResidence": "FR",
"Occupation": "Writer",
"IncomeRange": 6,
"ProofOfIdentity": None,
"ProofOfAddress": None,
"PersonType": "NATURAL",
"Email": "victor@hugo.com",
"Id": "1169419",
"Tag": "custom tag",
"CreationDate": 1383321421,
"KYCLevel": "LIGHT"
},
'status': 200
},
{
'method': responses.POST,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/preauthorizations/card/direct',
'body': {
"Id": "1209003",
"Tag": None,
"CreationDate": 1388653234,
"AuthorId": "1208974",
"DebitedFunds": {
"Currency": "EUR",
"Amount": 10000
},
"AuthorizationDate": 1388653377,
"Status": "SUCCEEDED",
"PaymentStatus": "WAITING",
"ExpirationDate": 1389258177,
"PayInId": "1209008",
"ResultCode": "000000",
"ResultMessage": "Success",
"SecureMode": "DEFAULT",
"CardId": "1208983",
"SecureModeReturnURL": None,
"SecureModeRedirectURL": "https://api-test.mangopay.com:443/Redirect/ACSWithoutValidation?token=8139ca555fd74fbbba14a50b7151a3e9",
"SecureModeNeeded": True,
"PaymentType": "CARD",
"ExecutionType": "DIRECT"
},
'status': 200
},
{
'method': responses.POST,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/preauthorizations/1209003',
'body': {
"Id": "1209003",
"Tag": None,
"CreationDate": 1388653234,
"AuthorId": "1208974",
"DebitedFunds": {
"Currency": "EUR",
"Amount": 1000
},
"AuthorizationDate": 1388653377,
"Status": "SUCCEEDED",
"PaymentStatus": "CREATED",
"ExpirationDate": 1389258177,
"PayInId": "1209008",
"ResultCode": "000000",
"ResultMessage": "Success",
"SecureMode": "FORCE",
"CardId": "1208983",
"SecureModeReturnURL": "https://www.mysite.com/secure?preAuthorizationId=1209003",
"SecureModeRedirectURL": "https://api-test.mangopay.com:443/Redirect/ACSWithoutValidation?token=8139ca555fd74fbbba14a50b7151a3e9",
"SecureModeNeeded": None,
"PaymentType": "CARD",
"ExecutionType": "DIRECT"
},
'status': 200
},
{
'method': responses.PUT,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/preauthorizations/1209003',
'body': {
"Id": "1209003",
"Tag": None,
"CreationDate": 1388653234,
"AuthorId": "1208974",
"DebitedFunds": {
"Currency": "EUR",
"Amount": 1000
},
"AuthorizationDate": 1388653377,
"Status": "SUCCEEDED",
"PaymentStatus": "CANCELED",
"ExpirationDate": 1389258177,
"PayInId": "1209008",
"ResultCode": "000000",
"ResultMessage": "Success",
"SecureMode": "FORCE",
"CardId": "1208983",
"SecureModeReturnURL": "https://www.mysite.com/secure?preAuthorizationId=1209003",
"SecureModeRedirectURL": "https://api-test.mangopay.com:443/Redirect/ACSWithoutValidation?token=8139ca555fd74fbbba14a50b7151a3e9",
"SecureModeNeeded": None,
"PaymentType": "CARD",
"ExecutionType": "DIRECT"
},
'status': 200
},
{
'method': responses.PUT,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/preauthorizations/1209003',
'body': {
"Id": "1209003",
"Tag": None,
"CreationDate": 1388653234,
"AuthorId": "1208974",
"DebitedFunds": {
"Currency": "EUR",
"Amount": 1000
},
"AuthorizationDate": 1388653377,
"Status": "SUCCEEDED",
"PaymentStatus": "CREATED",
"ExpirationDate": 1389258177,
"PayInId": "1209008",
"ResultCode": "000000",
"ResultMessage": "Success",
"SecureMode": "FORCE",
"CardId": "1208983",
"SecureModeReturnURL": "https://www.mysite.com/secure?preAuthorizationId=1209003",
"SecureModeRedirectURL": "https://api-test.mangopay.com:443/Redirect/ACSWithoutValidation?token=8139ca555fd74fbbba14a50b7151a3e9",
"SecureModeNeeded": None,
"PaymentType": "CARD",
"ExecutionType": "DIRECT"
},
'status': 200
},
{
'method': responses.GET,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/preauthorizations/1209003',
'body': {
"Id": "1209003",
"Tag": None,
"CreationDate": 1388653234,
"AuthorId": "1208974",
"DebitedFunds": {
"Currency": "EUR",
"Amount": 1000
},
"AuthorizationDate": 1388653377,
"Status": "SUCCEEDED",
"PaymentStatus": "CREATED",
"ExpirationDate": 1389258177,
"PayInId": "1209008",
"ResultCode": "000000",
"ResultMessage": "Success",
"SecureMode": "FORCE",
"CardId": "1208983",
"PaymentType": "CARD",
"ExecutionType": "DIRECT"
},
'status': 200
}])
params = {
"author": self.card.user,
"card": self.card,
"debited_funds": Money(amount=10000, currency='EUR'),
"secure_mode": "DEFAULT",
"secure_mode_return_url": "http://www.ulule.com/"
}
preauthorization = PreAuthorization(**params)
self.assertIsNone(preauthorization.get_pk())
preauthorization.save()
self.assertIsInstance(preauthorization, PreAuthorization)
self.assertEqual(preauthorization.status, 'SUCCEEDED')
self.assertEqual(preauthorization.payment_status, 'WAITING')
self.assertEqual(preauthorization.secure_mode_return_url, None)
params.pop('secure_mode_return_url')
self.assertEqual(preauthorization.debited_funds.amount, 10000)
params.pop('debited_funds')
for key, value in params.items():
self.assertEqual(getattr(preauthorization, key), value)
self.assertIsNotNone(preauthorization.get_pk())
# Test update
previous_pk = preauthorization.get_pk()
preauthorization.payment_status = 'CANCELED'
preauthorization.save()
self.assertEqual(previous_pk, preauthorization.get_pk())
self.assertEqual(preauthorization.payment_status, 'CANCELED')
@responses.activate
def test_retrieve_preauthorization(self):
self.mock_natural_user()
self.mock_card()
self.register_mock([
{
'method': responses.GET,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/users/1167495',
'body': {
"FirstName": "Victor",
"LastName": "Hugo",
"Address": {
"AddressLine1": "AddressLine1",
"AddressLine2": "AddressLine2",
"City": "City",
"Region": "Region",
"PostalCode": "11222",
"Country": "FR"
},
"Birthday": int(time.mktime(date.today().timetuple())),
"Nationality": "FR",
"CountryOfResidence": "FR",
"Occupation": "Writer",
"IncomeRange": 6,
"ProofOfIdentity": None,
"ProofOfAddress": None,
"PersonType": "NATURAL",
"Email": "victor@hugo.com",
"Id": "1167495",
"Tag": "custom tag",
"CreationDate": 1383321421,
"KYCLevel": "LIGHT"
},
'status': 200
},
{
'method': responses.POST,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/preauthorizations/card/direct',
'body': {
"Id": "1209003",
"Tag": None,
"CreationDate": 1388653234,
"AuthorId": "1208974",
"DebitedFunds": {
"Currency": "EUR",
"Amount": 10000
},
"AuthorizationDate": 1388653377,
"Status": "SUCCEEDED",
"PaymentStatus": "WAITING",
"ExpirationDate": 1389258177,
"PayInId": "1209008",
"ResultCode": "000000",
"ResultMessage": "Success",
"SecureMode": "DEFAULT",
"CardId": "1208983",
"SecureModeRedirectURL": "https://api.sandbox.mangopay.com:443/Redirect/ACSWithoutValidation?token=8139ca555fd74fbbba14a50b7151a3e9",
"SecureModeNeeded": True,
"PaymentType": "CARD",
"ExecutionType": "DIRECT"
},
'status': 200
},
{
'method': responses.GET,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/preauthorizations/1209003',
'body': {
"Id": "1209003",
"Tag": None,
"CreationDate": 1388653234,
"AuthorId": "1167495",
"DebitedFunds": {
"Currency": "EUR",
"Amount": 10000
},
"AuthorizationDate": 1388653377,
"Status": "SUCCEEDED",
"PaymentStatus": "CREATED",
"ExpirationDate": 1389258177,
"PayInId": "1209008",
"ResultCode": "000000",
"ResultMessage": "Success",
"SecureMode": "DEFAULT",
"CardId": "1208983",
"PaymentType": "CARD",
"ExecutionType": "DIRECT"
},
'status': 200
},
{
'method': responses.GET,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/preauthorizations/1209004',
'body': {"errors": []},
'status': 404
}])
params = {
"author": self.card.user,
"card": self.card,
"debited_funds": Money(amount=10000, currency='EUR'),
"secure_mode": "DEFAULT",
"secure_mode_return_url": "https://www.mysite.com/secure?preAuthorizationId=1209003"
}
preauthorization = PreAuthorization(**params)
preauthorization.save()
self.assertRaises(PreAuthorization.DoesNotExist, PreAuthorization.get, int(preauthorization.get_pk()) + 1)
self.assertIsNotNone(preauthorization.get_pk())
preauthorization = PreAuthorization.get(preauthorization.get_pk())
self.assertIsNotNone(preauthorization.get_pk())
self.assertEqual(preauthorization.secure_mode_return_url, None)
params.pop('secure_mode_return_url')
self.assertEqual(preauthorization.debited_funds.amount, 10000)
params.pop('debited_funds')
for key, value in params.items():
self.assertEqual(getattr(preauthorization, key), value)
@responses.activate
def test_create_succeeded_preauthorized_payin(self):
self.mock_natural_user()
self.mock_legal_user()
self.mock_user_wallet()
self.mock_card()
self.register_mock([
{
'method': responses.GET,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/users/1167495',
'body': {
"FirstName": "Victor",
"LastName": "Hugo",
"Address": {
"AddressLine1": "AddressLine1",
"AddressLine2": "AddressLine2",
"City": "City",
"Region": "Region",
"PostalCode": "11222",
"Country": "FR"
},
"Birthday": int(time.mktime(date.today().timetuple())),
"Nationality": "FR",
"CountryOfResidence": "FR",
"Occupation": "Writer",
"IncomeRange": 6,
"ProofOfIdentity": None,
"ProofOfAddress": None,
"PersonType": "NATURAL",
"Email": "victor@hugo.com",
"Id": "1167495",
"Tag": "custom tag",
"CreationDate": 1383321421,
"KYCLevel": "LIGHT"
},
'status': 200
},
{
'method': responses.POST,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/preauthorizations/card/direct',
'body': {
"Id": "1209003",
"Tag": None,
"CreationDate": 1388653234,
"AuthorId": "1208974",
"DebitedFunds": {
"Currency": "EUR",
"Amount": 10000
},
"AuthorizationDate": 1388653377,
"Status": "SUCCEEDED",
"PaymentStatus": "WAITING",
"ExpirationDate": 1389258177,
"PayInId": "1209008",
"ResultCode": "000000",
"ResultMessage": "Success",
"SecureMode": "DEFAULT",
"CardId": "1208983",
"SecureModeRedirectURL": "https://api-test.mangopay.com:443/Redirect/ACSWithoutValidation?token=8139ca555fd74fbbba14a50b7151a3e9",
"SecureModeNeeded": True,
"PaymentType": "CARD",
"ExecutionType": "DIRECT"
},
'status': 200
},
{
'method': responses.POST,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/payins/PreAuthorized/direct',
'body': {
"Id": "1209008",
"Tag": None,
"CreationDate": 1388653621,
"ResultCode": "000000",
"ResultMessage": "Success",
"AuthorId": "1208974",
"CreditedUserId": "1208974",
"DebitedFunds": {
"Currency": "EUR",
"Amount": 10000
},
"CreditedFunds": {
"Currency": "EUR",
"Amount": 900
},
"Fees": {
"Currency": "EUR",
"Amount": 100
},
"Status": "SUCCEEDED",
"ExecutionDate": 1388653622,
"Type": "PAYIN",
"Nature": "REGULAR",
"CreditedWalletId": "1208991",
"DebitedWalletId": None,
"PaymentType": "PREAUTHORIZED",
"ExecutionType": "DIRECT",
"PreauthorizationId": "1209003"
},
'status': 200
}])
params = {
"author": self.card.user,
"card": self.card,
"debited_funds": Money(amount=10000, currency='EUR'),
"secure_mode": "DEFAULT",
"secure_mode_return_url": "https://www.mysite.com/secure?preAuthorizationId=1209003"
}
preauthorization = PreAuthorization(**params)
preauthorization.save()
params = {
"author": self.card.user,
"debited_funds": Money(amount=10000, currency='EUR'),
"fees": Money(amount=1, currency='EUR'),
"credited_wallet": self.legal_user_wallet,
"preauthorization": preauthorization,
"secure_mode_return_url": "https://www.mysite.com/secure?preAuthorizationId=1209003"
}
preauthorized_payin = PreAuthorizedPayIn(**params)
self.assertIsNone(preauthorized_payin.get_pk())
preauthorized_payin.save()
self.assertIsInstance(preauthorized_payin, PreAuthorizedPayIn)
self.assertTrue(preauthorized_payin.secure_mode_return_url, "https://www.mysite.com/secure?preAuthorizationId=")
params.pop('secure_mode_return_url')
self.assertEqual(preauthorized_payin.debited_funds.amount, 10000)
params.pop('debited_funds')
self.assertEqual(preauthorized_payin.fees.amount, 100)
params.pop('fees')
for key, value in params.items():
self.assertEqual(getattr(preauthorized_payin, key), value)
self.assertIsNotNone(preauthorized_payin.get_pk())
self.assertEqual(preauthorized_payin.status, 'SUCCEEDED')
self.assertEqual(preauthorized_payin.payment_type, 'PREAUTHORIZED')
@responses.activate
def test_create_failed_preauthorized_payin(self):
self.mock_natural_user()
self.mock_legal_user()
self.mock_user_wallet()
self.mock_card()
self.register_mock([
{
'method': responses.GET,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/users/1167495',
'body': {
"FirstName": "Victor",
"LastName": "Hugo",
"Address": {
"AddressLine1": "AddressLine1",
"AddressLine2": "AddressLine2",
"City": "City",
"Region": "Region",
"PostalCode": "11222",
"Country": "FR"
},
"Birthday": int(time.mktime(date.today().timetuple())),
"Nationality": "FR",
"CountryOfResidence": "FR",
"Occupation": "Writer",
"IncomeRange": 6,
"ProofOfIdentity": None,
"ProofOfAddress": None,
"PersonType": "NATURAL",
"Email": "victor@hugo.com",
"Id": "1167495",
"Tag": "custom tag",
"CreationDate": 1383321421,
"KYCLevel": "LIGHT"
},
'status': 200
},
{
'method': responses.POST,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/preauthorizations/card/direct',
'body': {
"Id": "1209003",
"Tag": None,
"CreationDate": 1388653234,
"AuthorId": "1208974",
"DebitedFunds": {
"Currency": "EUR",
"Amount": 10000
},
"AuthorizationDate": 1388653377,
"Status": "SUCCEEDED",
"PaymentStatus": "WAITING",
"ExpirationDate": 1389258177,
"PayInId": "1209008",
"ResultCode": "000000",
"ResultMessage": "Success",
"SecureMode": "DEFAULT",
"CardId": "1208983",
"SecureModeRedirectURL": "https://api-test.mangopay.com:443/Redirect/ACSWithoutValidation?token=8139ca555fd74fbbba14a50b7151a3e9",
"SecureModeNeeded": True,
"PaymentType": "CARD",
"ExecutionType": "DIRECT"
},
'status': 200
},
{
'method': responses.POST,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/payins/PreAuthorized/direct',
'body': {
"Id": "1209008",
"Tag": None,
"CreationDate": 1388653621,
"ResultCode": "000000",
"ResultMessage": "Success",
"AuthorId": "1208974",
"CreditedUserId": "1208974",
"DebitedFunds": {
"Currency": "EUR",
"Amount": 30000
},
"CreditedFunds": {
"Currency": "EUR",
"Amount": 900
},
"Fees": {
"Currency": "EUR",
"Amount": 100
},
"Status": "FAILED",
"ExecutionDate": 1388653622,
"Type": "PAYIN",
"Nature": "REGULAR",
"CreditedWalletId": "1208991",
"DebitedWalletId": None,
"PaymentType": "PREAUTHORIZED",
"ExecutionType": "DIRECT",
"PreauthorizationId": "1209003"
},
'status': 200
}])
params = {
"author": self.card.user,
"card": self.card,
"debited_funds": Money(amount=10000, currency='EUR'),
"secure_mode": "DEFAULT",
"secure_mode_return_url": "http://www.ulule.com/"
}
preauthorization = PreAuthorization(**params)
preauthorization.save()
self.assertEqual(preauthorization.status, 'SUCCEEDED')
self.assertEqual(preauthorization.payment_status, 'WAITING')
params = {
"author": self.card.user,
"debited_funds": Money(amount=30000, currency='EUR'), # Amount is too high
"fees": Money(amount=1, currency='EUR'),
"credited_wallet": self.legal_user_wallet,
"preauthorization": preauthorization,
"secure_mode_url": "http://www.ulule.com/"
}
preauthorized_payin = PreAuthorizedPayIn(**params)
preauthorized_payin.save()
self.assertEqual(preauthorized_payin.status, 'FAILED')
self.assertEqual(preauthorized_payin.payment_type, 'PREAUTHORIZED')
| |
import unittest
import helper as t
mapping_file = 'complete_db_mapping.json'
def setup():
t.setup()
def teardown():
t.teardown()
#######################################################################
def test_import():
"""Import succeeds"""
t.drop_schemas()
assert not t.table_exists('osm_roads', schema=t.TEST_SCHEMA_IMPORT)
t.imposm3_import(t.db_conf, './build/complete_db.pbf', mapping_file)
assert t.table_exists('osm_roads', schema=t.TEST_SCHEMA_IMPORT)
def test_deploy():
"""Deploy succeeds"""
assert not t.table_exists('osm_roads', schema=t.TEST_SCHEMA_PRODUCTION)
t.imposm3_deploy(t.db_conf, mapping_file)
assert t.table_exists('osm_roads', schema=t.TEST_SCHEMA_PRODUCTION)
assert not t.table_exists('osm_roads', schema=t.TEST_SCHEMA_IMPORT)
#######################################################################
def test_imported_landusage():
"""Multipolygon relation is inserted"""
t.assert_cached_node(1001, (13, 47.5))
landusage_1001 = t.query_row(t.db_conf, 'osm_landusages', -1001)
# point in polygon
assert landusage_1001['geometry'].intersects(t.merc_point(13.4, 47.5))
# hole in multipolygon relation
assert not landusage_1001['geometry'].intersects(t.merc_point(14.75, 47.75))
def test_missing_nodes():
"""Cache does not contain nodes from previous imports"""
t.assert_missing_node(10001)
t.assert_missing_node(10002)
place_10000 = t.query_row(t.db_conf, 'osm_places', 10000)
assert place_10000['name'] == 'Foo', place_10000
def test_name_tags():
"""Road contains multiple names"""
road = t.query_row(t.db_conf, 'osm_roads', 1101)
assert road['name'] == 'name', road
assert road['name:de'] == 'name:de', road
assert road['name_en'] == 'name:en', road
def test_landusage_to_waterarea_1():
"""Parks inserted into landusages"""
t.assert_cached_way(11001)
t.assert_cached_way(12001)
t.assert_cached_way(13001)
assert not t.query_row(t.db_conf, 'osm_waterareas', 11001)
assert not t.query_row(t.db_conf, 'osm_waterareas', -12001)
assert not t.query_row(t.db_conf, 'osm_waterareas', -13001)
assert not t.query_row(t.db_conf, 'osm_waterareas_gen0', 11001)
assert not t.query_row(t.db_conf, 'osm_waterareas_gen0', -12001)
assert not t.query_row(t.db_conf, 'osm_waterareas_gen0', -13001)
assert not t.query_row(t.db_conf, 'osm_waterareas_gen1', 11001)
assert not t.query_row(t.db_conf, 'osm_waterareas_gen1', -12001)
assert not t.query_row(t.db_conf, 'osm_waterareas_gen1', -13001)
assert t.query_row(t.db_conf, 'osm_landusages', 11001)['type'] == 'park'
assert t.query_row(t.db_conf, 'osm_landusages', -12001)['type'] == 'park'
assert t.query_row(t.db_conf, 'osm_landusages', -13001)['type'] == 'park'
assert t.query_row(t.db_conf, 'osm_landusages_gen0', 11001)['type'] == 'park'
assert t.query_row(t.db_conf, 'osm_landusages_gen0', -12001)['type'] == 'park'
assert t.query_row(t.db_conf, 'osm_landusages_gen0', -13001)['type'] == 'park'
assert t.query_row(t.db_conf, 'osm_landusages_gen1', 11001)['type'] == 'park'
assert t.query_row(t.db_conf, 'osm_landusages_gen1', -12001)['type'] == 'park'
assert t.query_row(t.db_conf, 'osm_landusages_gen1', -13001)['type'] == 'park'
def test_changed_hole_tags_1():
"""Multipolygon relation with untagged hole"""
t.assert_cached_way(14001)
t.assert_cached_way(14011)
assert not t.query_row(t.db_conf, 'osm_waterareas', 14011)
assert not t.query_row(t.db_conf, 'osm_waterareas', -14011)
assert t.query_row(t.db_conf, 'osm_landusages', -14001)['type'] == 'park'
def test_split_outer_multipolygon_way_1():
"""Single outer way of multipolygon was inserted."""
park_15001 = t.query_row(t.db_conf, 'osm_landusages', -15001)
assert park_15001['type'] == 'park'
t.assert_almost_equal(park_15001['geometry'].area, 9816216452, -1)
assert t.query_row(t.db_conf, 'osm_roads', 15002) == None
def test_merge_outer_multipolygon_way_1():
"""Splitted outer way of multipolygon was inserted."""
park_16001 = t.query_row(t.db_conf, 'osm_landusages', -16001)
assert park_16001['type'] == 'park'
t.assert_almost_equal(park_16001['geometry'].area, 12779350582, -1)
assert t.query_row(t.db_conf, 'osm_roads', 16002)['type'] == 'residential'
def test_broken_multipolygon_ways():
"""MultiPolygons with broken outer ways are handled."""
# outer way does not merge (17002 has one node)
assert t.query_row(t.db_conf, 'osm_landusages', -17001) == None
assert t.query_row(t.db_conf, 'osm_roads', 17001)['type'] == 'residential'
assert t.query_row(t.db_conf, 'osm_roads', 17002) == None
# outer way does not merge (17102 has no nodes)
assert t.query_row(t.db_conf, 'osm_landusages', -17101) == None
assert t.query_row(t.db_conf, 'osm_roads', 17101)['type'] == 'residential'
assert t.query_row(t.db_conf, 'osm_roads', 17102) == None
def test_node_way_inserted_twice():
"""Way with multiple mappings is inserted twice in same table"""
rows = t.query_row(t.db_conf, 'osm_roads', 18001)
rows.sort(key=lambda x: x['type'])
assert rows[0]['type'] == 'residential'
assert rows[1]['type'] == 'tram'
def test_node_way_ref_after_delete_1():
"""Nodes refereces way"""
data = t.cache_query(nodes=[20001, 20002], deps=True)
assert '20001' in data['nodes']['20001']['ways']
assert '20001' in data['nodes']['20002']['ways']
assert t.query_row(t.db_conf, 'osm_roads', 20001)['type'] == 'residential'
assert t.query_row(t.db_conf, 'osm_barrierpoints', 20001)['type'] == 'block'
def test_way_rel_ref_after_delete_1():
"""Ways references relation"""
data = t.cache_query(ways=[21001], deps=True)
assert data['ways']['21001']['relations'].keys() == ['21001']
assert t.query_row(t.db_conf, 'osm_roads', 21001)['type'] == 'residential'
assert t.query_row(t.db_conf, 'osm_landusages', -21001)['type'] == 'park'
def test_relation_way_not_inserted():
"""Part of relation was inserted only once."""
park = t.query_row(t.db_conf, 'osm_landusages', -9001)
assert park['type'] == 'park'
assert park['name'] == 'rel 9001'
assert t.query_row(t.db_conf, 'osm_landusages', 9009) == None
park = t.query_row(t.db_conf, 'osm_landusages', -9101)
assert park['type'] == 'park'
assert park['name'] == 'rel 9101'
assert t.query_row(t.db_conf, 'osm_landusages', 9109) == None
scrub = t.query_row(t.db_conf, 'osm_landusages', 9110)
assert scrub['type'] == 'scrub'
def test_relation_ways_inserted():
"""Outer ways of multipolygon are inserted. """
park = t.query_row(t.db_conf, 'osm_landusages', -9201)
assert park['type'] == 'park'
assert park['name'] == '9209'
# outer ways of multipolygon stand for their own
road = t.query_row(t.db_conf, 'osm_roads', 9209)
assert road['type'] == 'secondary'
assert road['name'] == '9209'
road = t.query_row(t.db_conf, 'osm_roads', 9210)
assert road['type'] == 'residential'
assert road['name'] == '9210'
park = t.query_row(t.db_conf, 'osm_landusages', -9301)
assert park['type'] == 'park'
assert park['name'] == '' # no name on relation
# outer ways of multipolygon stand for their own
road = t.query_row(t.db_conf, 'osm_roads', 9309)
assert road['type'] == 'secondary'
assert road['name'] == '9309'
road = t.query_row(t.db_conf, 'osm_roads', 9310)
assert road['type'] == 'residential'
assert road['name'] == '9310'
def test_relation_way_inserted():
"""Part of relation was inserted twice."""
park = t.query_row(t.db_conf, 'osm_landusages', -8001)
assert park['type'] == 'park'
assert park['name'] == 'rel 8001'
assert t.query_row(t.db_conf, 'osm_roads', 8009)["type"] == 'residential'
def test_single_node_ways_not_inserted():
"""Ways with single/duplicate nodes are not inserted."""
assert not t.query_row(t.db_conf, 'osm_roads', 30001)
assert not t.query_row(t.db_conf, 'osm_roads', 30002)
assert not t.query_row(t.db_conf, 'osm_roads', 30003)
def test_polygon_with_duplicate_nodes_is_valid():
"""Polygon with duplicate nodes is valid."""
geom = t.query_row(t.db_conf, 'osm_landusages', 30005)['geometry']
assert geom.is_valid
assert len(geom.exterior.coords) == 4
def test_incomplete_polygons():
"""Non-closed/incomplete polygons are not inserted."""
assert not t.query_row(t.db_conf, 'osm_landusages', 30004)
assert not t.query_row(t.db_conf, 'osm_landusages', 30006)
def test_residential_to_secondary():
"""Residential road is not in roads_gen0/1."""
assert t.query_row(t.db_conf, 'osm_roads', 40001)['type'] == 'residential'
assert not t.query_row(t.db_conf, 'osm_roads_gen0', 40001)
assert not t.query_row(t.db_conf, 'osm_roads_gen1', 40001)
def test_relation_before_remove():
"""Relation and way is inserted."""
assert t.query_row(t.db_conf, 'osm_buildings', 50011)['type'] == 'yes'
assert t.query_row(t.db_conf, 'osm_landusages', -50021)['type'] == 'park'
def test_relation_without_tags():
"""Relation without tags is inserted."""
assert t.query_row(t.db_conf, 'osm_buildings', 50111) == None
assert t.query_row(t.db_conf, 'osm_buildings', -50121)['type'] == 'yes'
def test_duplicate_ids():
"""Relation/way with same ID is inserted."""
assert t.query_row(t.db_conf, 'osm_buildings', 51001)['type'] == 'way'
assert t.query_row(t.db_conf, 'osm_buildings', -51001)['type'] == 'mp'
assert t.query_row(t.db_conf, 'osm_buildings', 51011)['type'] == 'way'
assert t.query_row(t.db_conf, 'osm_buildings', -51011)['type'] == 'mp'
def test_generalized_banana_polygon_is_valid():
"""Generalized polygons are valid."""
park = t.query_row(t.db_conf, 'osm_landusages', 7101)
# geometry is not valid
assert not park['geometry'].is_valid, park
park = t.query_row(t.db_conf, 'osm_landusages_gen0', 7101)
# but simplified geometies are valid
assert park['geometry'].is_valid, park
park = t.query_row(t.db_conf, 'osm_landusages_gen1', 7101)
assert park['geometry'].is_valid, park
def test_generalized_linestring_is_valid():
"""Generalized linestring is valid."""
road = t.query_row(t.db_conf, 'osm_roads', 7201)
# geometry is not simple, but valid
# check that geometry 'survives' simplification
assert not road['geometry'].is_simple, road['geometry'].wkt
assert road['geometry'].is_valid, road['geometry'].wkt
assert road['geometry'].length > 1000000
road = t.query_row(t.db_conf, 'osm_roads_gen0', 7201)
# but simplified geometies are simple
assert road['geometry'].is_valid, road['geometry'].wkt
assert road['geometry'].length > 1000000
road = t.query_row(t.db_conf, 'osm_roads_gen1', 7201)
assert road['geometry'].is_valid, road['geometry'].wkt
assert road['geometry'].length > 1000000
def test_ring_with_gap():
"""Multipolygon and way with gap (overlapping but different endpoints) gets closed"""
park = t.query_row(t.db_conf, 'osm_landusages', -7301)
assert park['geometry'].is_valid, park
park = t.query_row(t.db_conf, 'osm_landusages', 7311)
assert park['geometry'].is_valid, park
def test_updated_nodes1():
"""Zig-Zag line is inserted."""
road = t.query_row(t.db_conf, 'osm_roads', 60000)
t.assert_almost_equal(road['geometry'].length, 14035.61150207768)
def test_update_node_to_coord_1():
"""Node is inserted with tag."""
coords = t.cache_query(nodes=(70001, 70002))
assert coords['nodes']["70001"]["tags"] == {"amenity": "police"}
assert "tags" not in coords['nodes']["70002"]
assert t.query_row(t.db_conf, 'osm_amenities', 70001)
assert not t.query_row(t.db_conf, 'osm_amenities', 70002)
#######################################################################
def test_update():
"""Diff import applies"""
t.imposm3_update(t.db_conf, './build/complete_db.osc.gz', mapping_file)
#######################################################################
def test_updated_landusage():
"""Multipolygon relation was modified"""
t.assert_cached_node(1001, (13.5, 47.5))
landusage_1001 = t.query_row(t.db_conf, 'osm_landusages', -1001)
# point not in polygon after update
assert not landusage_1001['geometry'].intersects(t.merc_point(13.4, 47.5))
def test_partial_delete():
"""Deleted relation but nodes are still cached"""
t.assert_cached_node(2001)
t.assert_cached_way(2001)
t.assert_cached_way(2002)
assert not t.query_row(t.db_conf, 'osm_landusages', -2001)
assert not t.query_row(t.db_conf, 'osm_landusages', 2001)
def test_updated_nodes():
"""Nodes were added, modified or deleted"""
t.assert_missing_node(10000)
t.assert_cached_node(10001, (10.0, 40.0))
t.assert_cached_node(10002, (10.1, 40.0))
place_10001 = t.query_row(t.db_conf, 'osm_places', 10001)
assert place_10001['name'] == 'Bar', place_10001
place_10002 = t.query_row(t.db_conf, 'osm_places', 10002)
assert place_10002['name'] == 'Baz', place_10002
def test_landusage_to_waterarea_2():
"""Parks converted to water moved from landusages to waterareas"""
t.assert_cached_way(11001)
t.assert_cached_way(12001)
t.assert_cached_way(13001)
assert not t.query_row(t.db_conf, 'osm_landusages', 11001)
assert not t.query_row(t.db_conf, 'osm_landusages', -12001)
assert not t.query_row(t.db_conf, 'osm_landusages', -13001)
assert not t.query_row(t.db_conf, 'osm_landusages_gen0', 11001)
assert not t.query_row(t.db_conf, 'osm_landusages_gen0', -12001)
assert not t.query_row(t.db_conf, 'osm_landusages_gen0', -13001)
assert not t.query_row(t.db_conf, 'osm_landusages_gen1', 11001)
assert not t.query_row(t.db_conf, 'osm_landusages_gen1', -12001)
assert not t.query_row(t.db_conf, 'osm_landusages_gen1', -13001)
assert t.query_row(t.db_conf, 'osm_waterareas', 11001)['type'] == 'water'
assert t.query_row(t.db_conf, 'osm_waterareas', -12001)['type'] == 'water'
assert t.query_row(t.db_conf, 'osm_waterareas', -13001)['type'] == 'water'
assert t.query_row(t.db_conf, 'osm_waterareas_gen0', 11001)['type'] == 'water'
assert t.query_row(t.db_conf, 'osm_waterareas_gen0', -12001)['type'] == 'water'
assert t.query_row(t.db_conf, 'osm_waterareas_gen0', -13001)['type'] == 'water'
assert t.query_row(t.db_conf, 'osm_waterareas_gen1', 11001)['type'] == 'water'
assert t.query_row(t.db_conf, 'osm_waterareas_gen1', -12001)['type'] == 'water'
assert t.query_row(t.db_conf, 'osm_waterareas_gen1', -13001)['type'] == 'water'
def test_changed_hole_tags_2():
"""Newly tagged hole is inserted"""
t.assert_cached_way(14001)
t.assert_cached_way(14011)
assert t.query_row(t.db_conf, 'osm_waterareas', 14011)['type'] == 'water'
assert t.query_row(t.db_conf, 'osm_landusages', -14001)['type'] == 'park'
t.assert_almost_equal(t.query_row(t.db_conf, 'osm_waterareas', 14011)['geometry'].area, 26672000000, -6)
t.assert_almost_equal(t.query_row(t.db_conf, 'osm_landusages', -14001)['geometry'].area, 10373600000, -6)
def test_split_outer_multipolygon_way_2():
"""Splitted outer way of multipolygon was inserted"""
data = t.cache_query(ways=[15001, 15002], deps=True)
assert data['ways']['15001']['relations'].keys() == ['15001']
assert data['ways']['15002']['relations'].keys() == ['15001']
assert t.query_row(t.db_conf, 'osm_landusages', 15001) == None
park_15001 = t.query_row(t.db_conf, 'osm_landusages', -15001)
assert park_15001['type'] == 'park'
t.assert_almost_equal(park_15001['geometry'].area, 9816216452, -1)
assert t.query_row(t.db_conf, 'osm_roads', 15002)['type'] == 'residential'
def test_merge_outer_multipolygon_way_2():
"""Merged outer way of multipolygon was inserted"""
data = t.cache_query(ways=[16001, 16002], deps=True)
assert data['ways']['16001']['relations'].keys() == ['16001']
assert data['ways']['16002'] == None
data = t.cache_query(relations=[16001], full=True)
assert sorted(data['relations']['16001']['ways'].keys()) == ['16001', '16011']
assert t.query_row(t.db_conf, 'osm_landusages', 16001) == None
park_16001 = t.query_row(t.db_conf, 'osm_landusages', -16001)
assert park_16001['type'] == 'park'
t.assert_almost_equal(park_16001['geometry'].area, 12779350582, -1)
assert t.query_row(t.db_conf, 'osm_roads', 16002) == None
def test_node_way_ref_after_delete_2():
"""Node does not referece deleted way"""
data = t.cache_query(nodes=[20001, 20002], deps=True)
assert 'ways' not in data['nodes']['20001']
assert data['nodes']['20002'] == None
assert t.query_row(t.db_conf, 'osm_roads', 20001) == None
assert t.query_row(t.db_conf, 'osm_barrierpoints', 20001)['type'] == 'block'
def test_way_rel_ref_after_delete_2():
"""Way does not referece deleted relation"""
data = t.cache_query(ways=[21001], deps=True)
assert 'relations' not in data['ways']['21001']
assert t.query_row(t.db_conf, 'osm_roads', 21001)['type'] == 'residential'
assert t.query_row(t.db_conf, 'osm_landusages', 21001) == None
assert t.query_row(t.db_conf, 'osm_landusages', -21001) == None
def test_residential_to_secondary2():
"""New secondary (from residential) is now in roads_gen0/1."""
assert t.query_row(t.db_conf, 'osm_roads', 40001)['type'] == 'secondary'
assert t.query_row(t.db_conf, 'osm_roads_gen0', 40001)['type'] == 'secondary'
assert t.query_row(t.db_conf, 'osm_roads_gen1', 40001)['type'] == 'secondary'
def test_relation_after_remove():
"""Relation is deleted and way is still present."""
assert t.query_row(t.db_conf, 'osm_buildings', 50011)['type'] == 'yes'
assert t.query_row(t.db_conf, 'osm_landusages', 50021) == None
assert t.query_row(t.db_conf, 'osm_landusages', -50021) == None
def test_relation_without_tags2():
"""Relation without tags is removed."""
t.cache_query(ways=[50111], deps=True)
assert t.cache_query(relations=[50121], deps=True)['relations']["50121"] == None
assert t.query_row(t.db_conf, 'osm_buildings', 50111)['type'] == 'yes'
assert t.query_row(t.db_conf, 'osm_buildings', 50121) == None
assert t.query_row(t.db_conf, 'osm_buildings', -50121) == None
def test_duplicate_ids2():
"""Only relation/way with same ID was deleted."""
assert t.query_row(t.db_conf, 'osm_buildings', 51001)['type'] == 'way'
assert t.query_row(t.db_conf, 'osm_buildings', -51001) == None
assert t.query_row(t.db_conf, 'osm_buildings', -51011)['type'] == 'mp'
assert t.query_row(t.db_conf, 'osm_buildings', 51011) == None
def test_updated_way2():
"""All nodes of straightened way are updated."""
road = t.query_row(t.db_conf, 'osm_roads', 60000)
# new length 0.1 degree
t.assert_almost_equal(road['geometry'].length, 20037508.342789244/180.0/10.0)
def test_update_node_to_coord_2():
"""Node is becomes coord after tags are removed."""
coords = t.cache_query(nodes=(70001, 70002))
assert "tags" not in coords['nodes']["70001"]
assert coords['nodes']["70002"]["tags"] == {"amenity": "police"}
assert not t.query_row(t.db_conf, 'osm_amenities', 70001)
assert t.query_row(t.db_conf, 'osm_amenities', 70002)
#######################################################################
def test_deploy_and_revert_deploy():
"""Revert deploy succeeds"""
assert not t.table_exists('osm_roads', schema=t.TEST_SCHEMA_IMPORT)
assert t.table_exists('osm_roads', schema=t.TEST_SCHEMA_PRODUCTION)
assert not t.table_exists('osm_roads', schema=t.TEST_SCHEMA_BACKUP)
# import again to have a new import schema
t.imposm3_import(t.db_conf, './build/complete_db.pbf', mapping_file)
assert t.table_exists('osm_roads', schema=t.TEST_SCHEMA_IMPORT)
t.imposm3_deploy(t.db_conf, mapping_file)
assert not t.table_exists('osm_roads', schema=t.TEST_SCHEMA_IMPORT)
assert t.table_exists('osm_roads', schema=t.TEST_SCHEMA_PRODUCTION)
assert t.table_exists('osm_roads', schema=t.TEST_SCHEMA_BACKUP)
t.imposm3_revert_deploy(t.db_conf, mapping_file)
assert t.table_exists('osm_roads', schema=t.TEST_SCHEMA_IMPORT)
assert t.table_exists('osm_roads', schema=t.TEST_SCHEMA_PRODUCTION)
assert not t.table_exists('osm_roads', schema=t.TEST_SCHEMA_BACKUP)
def test_remove_backup():
"""Remove backup succeeds"""
assert t.table_exists('osm_roads', schema=t.TEST_SCHEMA_IMPORT)
assert t.table_exists('osm_roads', schema=t.TEST_SCHEMA_PRODUCTION)
assert not t.table_exists('osm_roads', schema=t.TEST_SCHEMA_BACKUP)
t.imposm3_deploy(t.db_conf, mapping_file)
assert not t.table_exists('osm_roads', schema=t.TEST_SCHEMA_IMPORT)
assert t.table_exists('osm_roads', schema=t.TEST_SCHEMA_PRODUCTION)
assert t.table_exists('osm_roads', schema=t.TEST_SCHEMA_BACKUP)
t.imposm3_remove_backups(t.db_conf, mapping_file)
assert not t.table_exists('osm_roads', schema=t.TEST_SCHEMA_IMPORT)
assert t.table_exists('osm_roads', schema=t.TEST_SCHEMA_PRODUCTION)
assert not t.table_exists('osm_roads', schema=t.TEST_SCHEMA_BACKUP)
| |
from typing import List, Tuple, Dict, Any, Optional
from ray.job_config import JobConfig
from ray._private.client_mode_hook import (
_explicitly_disable_client_mode,
_explicitly_enable_client_mode,
)
import os
import sys
import logging
import threading
import grpc
import ray.ray_constants as ray_constants
from ray._private.ray_logging import setup_logger
logger = logging.getLogger(__name__)
# This version string is incremented to indicate breaking changes in the
# protocol that require upgrading the client version.
CURRENT_PROTOCOL_VERSION = "2022-02-22"
class _ClientContext:
def __init__(self):
from ray.util.client.api import ClientAPI
self.api = ClientAPI()
self.client_worker = None
self._server = None
self._connected_with_init = False
self._inside_client_test = False
def connect(
self,
conn_str: str,
job_config: JobConfig = None,
secure: bool = False,
metadata: List[Tuple[str, str]] = None,
connection_retries: int = 3,
namespace: str = None,
*,
ignore_version: bool = False,
_credentials: Optional[grpc.ChannelCredentials] = None,
ray_init_kwargs: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
"""Connect the Ray Client to a server.
Args:
conn_str: Connection string, in the form "[host]:port"
job_config: The job config of the server.
secure: Whether to use a TLS secured gRPC channel
metadata: gRPC metadata to send on connect
connection_retries: number of connection attempts to make
ignore_version: whether to ignore Python or Ray version mismatches.
This should only be used for debugging purposes.
Returns:
Dictionary of connection info, e.g., {"num_clients": 1}.
"""
# Delay imports until connect to avoid circular imports.
from ray.util.client.worker import Worker
if self.client_worker is not None:
if self._connected_with_init:
return
raise Exception("ray.init() called, but ray client is already connected")
if not self._inside_client_test:
# If we're calling a client connect specifically and we're not
# currently in client mode, ensure we are.
_explicitly_enable_client_mode()
if namespace is not None:
job_config = job_config or JobConfig()
job_config.set_ray_namespace(namespace)
logging_level = ray_constants.LOGGER_LEVEL
logging_format = ray_constants.LOGGER_FORMAT
if ray_init_kwargs is not None:
if ray_init_kwargs.get("logging_level") is not None:
logging_level = ray_init_kwargs["logging_level"]
if ray_init_kwargs.get("logging_format") is not None:
logging_format = ray_init_kwargs["logging_format"]
setup_logger(logging_level, logging_format)
try:
self.client_worker = Worker(
conn_str,
secure=secure,
_credentials=_credentials,
metadata=metadata,
connection_retries=connection_retries,
)
self.api.worker = self.client_worker
self.client_worker._server_init(job_config, ray_init_kwargs)
conn_info = self.client_worker.connection_info()
self._check_versions(conn_info, ignore_version)
self._register_serializers()
return conn_info
except Exception:
self.disconnect()
raise
def _register_serializers(self):
"""Register the custom serializer addons at the client side.
The server side should have already registered the serializers via
regular worker's serialization_context mechanism.
"""
import ray.serialization_addons
from ray.util.serialization import StandaloneSerializationContext
ctx = StandaloneSerializationContext()
ray.serialization_addons.apply(ctx)
def _check_versions(self, conn_info: Dict[str, Any], ignore_version: bool) -> None:
local_major_minor = f"{sys.version_info[0]}.{sys.version_info[1]}"
if not conn_info["python_version"].startswith(local_major_minor):
version_str = f"{local_major_minor}.{sys.version_info[2]}"
msg = (
"Python minor versions differ between client and server:"
+ f" client is {version_str},"
+ f" server is {conn_info['python_version']}"
)
if ignore_version or "RAY_IGNORE_VERSION_MISMATCH" in os.environ:
logger.warning(msg)
else:
raise RuntimeError(msg)
if CURRENT_PROTOCOL_VERSION != conn_info["protocol_version"]:
msg = (
"Client Ray installation incompatible with server:"
+ f" client is {CURRENT_PROTOCOL_VERSION},"
+ f" server is {conn_info['protocol_version']}"
)
if ignore_version or "RAY_IGNORE_VERSION_MISMATCH" in os.environ:
logger.warning(msg)
else:
raise RuntimeError(msg)
def disconnect(self):
"""Disconnect the Ray Client."""
from ray.util.client.api import ClientAPI
if self.client_worker is not None:
self.client_worker.close()
self.api = ClientAPI()
self.client_worker = None
# remote can be called outside of a connection, which is why it
# exists on the same API layer as connect() itself.
def remote(self, *args, **kwargs):
"""remote is the hook stub passed on to replace `ray.remote`.
This sets up remote functions or actors, as the decorator,
but does not execute them.
Args:
args: opaque arguments
kwargs: opaque keyword arguments
"""
return self.api.remote(*args, **kwargs)
def __getattr__(self, key: str):
if self.is_connected():
return getattr(self.api, key)
elif key in ["is_initialized", "_internal_kv_initialized"]:
# Client is not connected, thus Ray is not considered initialized.
return lambda: False
else:
raise Exception(
"Ray Client is not connected. " "Please connect by calling `ray.init`."
)
def is_connected(self) -> bool:
if self.client_worker is None:
return False
return self.client_worker.is_connected()
def init(self, *args, **kwargs):
if self._server is not None:
raise Exception("Trying to start two instances of ray via client")
import ray.util.client.server.server as ray_client_server
server_handle, address_info = ray_client_server.init_and_serve(
"127.0.0.1:50051", *args, **kwargs
)
self._server = server_handle.grpc_server
self.connect("127.0.0.1:50051")
self._connected_with_init = True
return address_info
def shutdown(self, _exiting_interpreter=False):
self.disconnect()
import ray.util.client.server.server as ray_client_server
if self._server is None:
return
ray_client_server.shutdown_with_server(self._server, _exiting_interpreter)
self._server = None
# All connected context will be put here
# This struct will be guarded by a lock for thread safety
_all_contexts = set()
_lock = threading.Lock()
# This is the default context which is used when allow_multiple is not True
_default_context = _ClientContext()
class RayAPIStub:
"""This class stands in as the replacement API for the `import ray` module.
Much like the ray module, this mostly delegates the work to the
_client_worker. As parts of the ray API are covered, they are piped through
here or on the client worker API.
"""
def __init__(self):
self._cxt = threading.local()
self._cxt.handler = _default_context
self._inside_client_test = False
def get_context(self):
try:
return self._cxt.__getattribute__("handler")
except AttributeError:
self._cxt.handler = _default_context
return self._cxt.handler
def set_context(self, cxt):
old_cxt = self.get_context()
if cxt is None:
self._cxt.handler = _ClientContext()
else:
self._cxt.handler = cxt
return old_cxt
def is_default(self):
return self.get_context() == _default_context
def connect(self, *args, **kw_args):
self.get_context()._inside_client_test = self._inside_client_test
conn = self.get_context().connect(*args, **kw_args)
global _lock, _all_contexts
with _lock:
_all_contexts.add(self._cxt.handler)
return conn
def disconnect(self, *args, **kw_args):
global _lock, _all_contexts, _default_context
with _lock:
if _default_context == self.get_context():
for cxt in _all_contexts:
cxt.disconnect(*args, **kw_args)
_all_contexts = set()
else:
self.get_context().disconnect(*args, **kw_args)
if self.get_context() in _all_contexts:
_all_contexts.remove(self.get_context())
if len(_all_contexts) == 0:
_explicitly_disable_client_mode()
def remote(self, *args, **kwargs):
return self.get_context().remote(*args, **kwargs)
def __getattr__(self, name):
return self.get_context().__getattr__(name)
def is_connected(self, *args, **kwargs):
return self.get_context().is_connected(*args, **kwargs)
def init(self, *args, **kwargs):
ret = self.get_context().init(*args, **kwargs)
global _lock, _all_contexts
with _lock:
_all_contexts.add(self._cxt.handler)
return ret
def shutdown(self, *args, **kwargs):
global _lock, _all_contexts
with _lock:
if _default_context == self.get_context():
for cxt in _all_contexts:
cxt.shutdown(*args, **kwargs)
_all_contexts = set()
else:
self.get_context().shutdown(*args, **kwargs)
if self.get_context() in _all_contexts:
_all_contexts.remove(self.get_context())
if len(_all_contexts) == 0:
_explicitly_disable_client_mode()
ray = RayAPIStub()
def num_connected_contexts():
"""Return the number of client connections active."""
global _lock, _all_contexts
with _lock:
return len(_all_contexts)
# Someday we might add methods in this module so that someone who
# tries to `import ray_client as ray` -- as a module, instead of
# `from ray_client import ray` -- as the API stub
# still gets expected functionality. This is the way the ray package
# worked in the past.
#
# This really calls for PEP 562: https://www.python.org/dev/peps/pep-0562/
# But until Python 3.6 is EOL, here we are.
| |
'''
zstack security group test class
@author: Youyk
'''
import zstackwoodpecker.header.security_group as sg_header
import zstackwoodpecker.header.vm as vm_header
import zstackwoodpecker.operations.net_operations as net_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import apibinding.inventory as inventory
class ZstackTestSecurityGroup(sg_header.TestSecurityGroup):
def __init__(self):
self.sg_creation_option = None
#save SG rules based on target address: {allowedCidr:[rule1, rule2]}
self.icmp_ingress_rule = {}
self.icmp_egress_rule = {}
self.tcp_ingress_rule = {}
self.tcp_egress_rule = {}
self.udp_ingress_rule = {}
self.udp_egress_rule = {}
#nic_dict will save attached nic per l3_uuid based: l3_uuid:[nic_uuid1, nic_uuid2]
self.nic_dict = {}
super(ZstackTestSecurityGroup, self).__init__()
def __hash__(self):
return hash(self.security_group.uuid)
def __eq__(self, other):
return self.security_group.uuid == other.security_group.uuid
def create(self):
self.security_group = net_ops.create_security_group(self.sg_creation_option)
super(ZstackTestSecurityGroup, self).create()
#target_nics are nic_uuid list. The attach() API should not be directly called by test case. Test case should call zstack_test_sg_vm.ZstackTestSgVm.attach() API.
def attach(self, target_nic_uuids, ipv6 = None):
added_l3_uuid = []
for nic in target_nic_uuids:
l3_uuid = test_lib.lib_get_l3_uuid_by_nic(nic)
self._add_nic(l3_uuid, nic)
#In zstack, need attach Nic's L3 to SG, before add Nic to SG
# If SG has been attached to L3, it should not be attached again.
if l3_uuid in added_l3_uuid:
continue
conditions = res_ops.gen_query_conditions('uuid', '=', self.security_group.uuid)
sg = res_ops.query_resource(res_ops.SECURITY_GROUP, conditions)[0]
if l3_uuid in sg.attachedL3NetworkUuids:
added_l3_uuid.append(l3_uuid)
continue
added_l3_uuid.append(l3_uuid)
if not ipv6:
net_ops.attach_security_group_to_l3(self.security_group.uuid, l3_uuid)
net_ops.add_nic_to_security_group(self.security_group.uuid, target_nic_uuids)
super(ZstackTestSecurityGroup, self).attach(target_nic_uuids)
#The dettach() API should not be directly called by test case. Test case should call zstack_test_sg_vm.ZstackTestSgVm.detach() API.
def detach(self, target_nic_uuid):
net_ops.remove_nic_from_security_group(self.security_group.uuid, [target_nic_uuid])
self._rm_nic(target_nic_uuid)
if not self.nic_dict:
super(ZstackTestSecurityGroup, self).detach(target_nic_uuid)
def detach_l3(self, l3_uuid):
#FIXME: this operation has global impaction, if the other parallel case are doing security group testing on the same l3. E.g. the 1st case is testing sg connection, while the 2nd case might call detach_l3.
self._rm_l3(l3_uuid)
net_ops.detach_security_group_from_l3(self.security_group.uuid, l3_uuid)
#The delete() API should not be directly called by test case. Test case should call zstack_test_sg_vm.ZstackTestSgVm.delete_sg() API.
def delete(self):
net_ops.delete_security_group(self.security_group.uuid)
super(ZstackTestSecurityGroup, self).delete()
self.nic_dict = {}
#The check() API should not be directly called by test case. Test case should call zstack_test_sg_vm.ZstackTestSgVm.check() API.
def check(self):
import zstackwoodpecker.zstack_test.checker_factory as checker_factory
#self.update()
#checker = checker_factory.CheckerFactory().create_checker(self)
#checker.check()
super(ZstackTestSecurityGroup, self).check()
#check attached VM's status. If VM is destroyed, it should be removed from nic_dict
#This function doesn't work, if destroyed VM structure is not exist in DB. The next function should be called instead.
#When test vm is destroyed, the update action is similar with detach()
#def update(self):
# for test_vm in self.test_vm_list:
# if test_vm.state == vm_header.DESTROYED or test_vm.state == vm_header.EXPUNGED:
# self.delete_vm(test_vm)
def delete_vm(self, test_vm):
all_nics = self.get_all_attached_nics()
for nic in test_vm.vm.vmNics:
if nic.uuid in all_nics:
test_util.test_logger('Test [vm:] %s is destroyed, need to remove its [nic:] %s from attached list. ' % (test_vm.vm.uuid, nic.uuid))
self._rm_nic(nic.uuid)
target_nic = nic.uuid
if not self.nic_dict:
super(ZstackTestSecurityGroup, self).detach(target_nic)
def set_creation_option(self, sg_creation_option):
self.sg_creation_option = sg_creation_option
def get_creation_option(self):
return self.sg_creation_option
def add_rule(self, target_rule_objs, remote_security_group_uuid=None):
rules = net_ops.add_rules_to_security_group(self.security_group.uuid, target_rule_objs, remote_security_group_uuid).rules
for rule in rules:
if rule.protocol == inventory.TCP:
if rule.type == inventory.INGRESS:
self._add_rule(rule, self.tcp_ingress_rule)
else:
self._add_rule(rule, self.tcp_egress_rule)
elif rule.protocol == inventory.UDP:
if rule.type == inventory.INGRESS:
self._add_rule(rule, self.udp_ingress_rule)
else:
self._add_rule(rule, self.udp_egress_rule)
elif rule.protocol == inventory.ICMP:
if rule.type == inventory.INGRESS:
self._add_rule(rule, self.icmp_ingress_rule)
else:
self._add_rule(rule, self.icmp_egress_rule)
return rules
def _add_rule(self, target_rule, rule_dict):
if rule_dict.has_key(target_rule.allowedCidr):
current_rule_uuids = []
for rule in rule_dict[target_rule.allowedCidr]:
current_rule_uuids.append(rule.uuid)
if not target_rule.uuid in current_rule_uuids:
rule_dict[target_rule.allowedCidr].append(target_rule)
else:
rule_dict[target_rule.allowedCidr] = [target_rule]
def delete_rule_by_uuids(self, target_rule_uuids):
for rule_uuid in target_rule_uuids:
rule = test_lib.lib_get_sg_rule_by_uuid(rule_uuid)
if rule.protocol == inventory.TCP:
if rule.type == inventory.INGRESS:
self._delete_rule(rule, self.tcp_ingress_rule)
else:
self._delete_rule(rule, self.tcp_egress_rule)
elif rule.protocol == inventory.UDP:
if rule.type == inventory.INGRESS:
self._delete_rule(rule, self.udp_ingress_rule)
else:
self._delete_rule(rule, self.udp_egress_rule)
elif rule.protocol == inventory.ICMP:
if rule.type == inventory.INGRESS:
self._delete_rule(rule, self.icmp_ingress_rule)
else:
self._delete_rule(rule, self.icmp_egress_rule)
net_ops.remove_rules_from_security_group(target_rule_uuids)
def delete_rule(self, target_rule_objs):
target_rule_uuids = test_lib.lib_get_sg_rule_uuid_by_rule_obj(self.security_group.uuid, target_rule_objs)
self.delete_rule_by_uuids(target_rule_uuids)
def _delete_rule(self, target_rule, rule_dict):
if not rule_dict.has_key(target_rule.allowedCidr):
return
for rule in rule_dict[target_rule.allowedCidr]:
if rule.uuid == target_rule.uuid:
rule_dict[target_rule.allowedCidr].remove(rule)
break
if not rule_dict[target_rule.allowedCidr]:
rule_dict.pop(target_rule.allowedCidr)
def _add_nic(self, l3_uuid, nic_uuid):
if self.nic_dict.has_key(l3_uuid):
if not nic_uuid in self.nic_dict[l3_uuid]:
self.nic_dict[l3_uuid].append(nic_uuid)
else:
self.nic_dict[l3_uuid] = [nic_uuid]
def _rm_nic(self, nic_uuid):
for key, value in self.nic_dict.iteritems():
if nic_uuid in value:
self.nic_dict[key].remove(nic_uuid)
if not self.nic_dict[key]:
self.nic_dict.pop(key)
return
def _rm_l3(self, l3_uuid):
if self.nic_dict.has_key(l3_uuid):
self.nic_dict.pop(l3_uuid)
def get_all_l3(self):
return self.nic_dict.keys()
def get_attached_nics_by_l3(self, l3_uuid):
if self.nic_dict.has_key(l3_uuid):
return self.nic_dict[l3_uuid]
else:
return None
def get_all_attached_nics(self):
nics = []
for l3 in self.get_all_l3():
nics.extend(self.get_attached_nics_by_l3(l3))
return nics
#get all tcp ingress rules' allowedCidr
def get_tcp_ingress_all_addr(self):
return self.tcp_ingress_rule.keys()
def get_tcp_ingress_rule_by_addr(self, allowedCidr):
return self.tcp_ingress_rule[allowedCidr]
def get_tcp_ingress_all_rule(self):
rules= []
for addr in self.get_tcp_ingress_all_addr():
rules.extend(self.get_tcp_ingress_rule_by_addr(addr))
return rules
#get all tcp egress rules' allowedCidr
def get_tcp_egress_all_addr(self):
return self.tcp_egress_rule.keys()
def get_tcp_egress_rule_by_addr(self, allowedCidr):
return self.tcp_egress_rule[allowedCidr]
def get_tcp_egress_all_rule(self):
rules= []
for addr in self.get_tcp_egress_all_addr():
rules.extend(self.get_tcp_egress_rule_by_addr(addr))
return rules
#get all udp ingress rules' allowedCidr
def get_udp_ingress_all_addr(self):
return self.udp_ingress_rule.keys()
def get_udp_ingress_rule_by_addr(self, allowedCidr):
return self.udp_ingress_rule[allowedCidr]
def get_udp_ingress_all_rule(self):
rules= []
for addr in self.get_udp_ingress_all_addr():
rules.extend(self.get_udp_ingress_rule_by_addr(addr))
return rules
#get all udp egress rules' allowedCidr
def get_udp_egress_all_addr(self):
return self.udp_egress_rule.keys()
def get_udp_egress_rule_by_addr(self, allowedCidr):
return self.udp_egress_rule[allowedCidr]
def get_udp_egress_all_rule(self):
rules= []
for addr in self.get_udp_egress_all_addr():
rules.extend(self.get_udp_egress_rule_by_addr(addr))
return rules
#get all icmp ingress rules' allowedCidr
def get_icmp_ingress_all_addr(self):
return self.icmp_ingress_rule.keys()
def get_icmp_ingress_rule_by_addr(self, allowedCidr):
return self.icmp_ingress_rule[allowedCidr]
def get_icmp_ingress_all_rule(self):
rules= []
for addr in self.get_icmp_ingress_all_addr():
rules.extend(self.get_icmp_ingress_rule_by_addr(addr))
return rules
#get all icmp egress rules' allowedCidr
def get_icmp_egress_all_addr(self):
return self.icmp_egress_rule.keys()
def get_icmp_egress_rule_by_addr(self, allowedCidr):
return self.icmp_egress_rule[allowedCidr]
def get_icmp_egress_all_rule(self):
rules= []
for addr in self.get_icmp_egress_all_addr():
rules.extend(self.get_icmp_egress_rule_by_addr(addr))
return rules
def get_all_rules(self):
return self.get_tcp_egress_all_rule() + self.get_tcp_ingress_all_rule() + self.get_udp_ingress_all_rule() + self.get_udp_egress_all_rule() + self.get_icmp_ingress_all_rule() + self.get_icmp_egress_all_rule()
| |
from base import InterfaceObject, Seat, Currency
from pyticketswitch.util import (
to_int_or_return, to_float_or_none,
to_float_summed, format_price_with_symbol
)
import performance as perf_objs
import event as event_objs
import availability
class Order(InterfaceObject):
"""Object that represents a TSW order.
Information relating to a TSW order is accessible with this
object, e.g. price information, seats, the event. Orders should
be created with the Core.create_order method, the constructor is
intended for internal use.
"""
def __init__(
self,
order_id=None,
core_order=None,
core_currency=None,
**settings
):
self.order_id = order_id
self._core_order = core_order
self._core_currency = core_currency
self._self_print_url = None
self._self_print_relative_url = None
self._requested_seats = False
super(Order, self).__init__(**settings)
@property
def item_number(self):
"""Interger for identifying the order within the bundle."""
return to_int_or_return(
self._core_order.item_number
)
@property
def self_print_url(self):
if self._self_print_url:
return self._self_print_url
else:
return None
@self_print_url.setter
def self_print_url(self, value):
self._self_print_url = value
@property
def self_print_relative_url(self):
if self._self_print_relative_url:
return self._self_print_relative_url
else:
return None
@self_print_relative_url.setter
def self_print_relative_url(self, value):
self._self_print_relative_url = value
@property
def event_desc(self):
return self._core_order.event_desc
@property
def venue_desc(self):
return self._core_order.venue_desc
@property
def total_combined_float(self):
"""Float value of the total combined price."""
if self._core_order.total_combined:
return to_float_or_none(
self._core_order.total_combined
)
else:
return to_float_summed(
self._core_order.total_seatprice,
self._core_order.total_surcharge
)
@property
def total_combined(self):
"""Formatted string value of the total combined price with currency
symbol.
"""
return format_price_with_symbol(
str(self.total_combined_float),
self._core_currency.currency_pre_symbol,
self._core_currency.currency_post_symbol
)
@property
def total_inc_despatch_float(self):
"""Float value of the total combined price including despatch."""
total = self.total_combined_float
if self.despatch_method and self.despatch_method.cost_float:
total += self.despatch_method.cost_float
return total
@property
def total_inc_despatch(self):
"""Formatted string value of the total combined price including
despatch with currency symbol.
"""
return format_price_with_symbol(
str(self.total_inc_despatch_float),
self._core_currency.currency_pre_symbol,
self._core_currency.currency_post_symbol
)
@property
def currency(self):
if self._core_currency:
return Currency(
core_currency=self._core_currency
)
return None
@property
def price_band_code(self):
return self._core_order.price_band_code
@property
def average_price_per_ticket_float(self):
return (
self.total_combined_float /
float(self._core_order.total_no_of_tickets)
)
@property
def performance(self):
"""Performance object for this order."""
if not hasattr(self, '_performance'):
if self._core_order.performance:
self._performance = perf_objs.Performance(
core_performance=self._core_order.performance,
**self._internal_settings()
)
else:
self._performance = None
return self._performance
@property
def event(self):
"""Event object for this order."""
if not hasattr(self, '_event'):
if self._core_order.event:
self._event = event_objs.Event(
event_id=self._core_order.event.event_id,
core_event=self._core_order.event,
**self._internal_settings()
)
else:
self._event = None
return self._event
@property
def concessions(self):
"""List of Concession objects on this order."""
if not hasattr(self, '_concessions'):
self._concessions = []
for discount in self._core_order.discounts:
self._concessions.append(availability.Concession(
core_discount=discount,
core_currency=self._core_currency,
**self._internal_settings()
))
return self._concessions
@property
def despatch_method(self):
"""DespatchMethod object for this order."""
if not hasattr(self, '_despatch_method'):
if self._core_order.despatch_method:
self._despatch_method = availability.DespatchMethod(
core_despatch_method=self._core_order.despatch_method,
core_currency=self._core_currency,
**self._internal_settings()
)
return self._despatch_method
@property
def all_seats(self):
"""List of Seat objects on this order."""
seats = []
for con in self.concessions:
seats = seats + con.seats
return seats
@property
def all_seat_ids(self):
"""List of Seat Ids on this order."""
return [s.seat_id for s in self.all_seats if s.seat_id]
@property
def ticket_type_desc(self):
return self._core_order.ticket_type_desc
@property
def ticket_type_code(self):
return self._core_order.ticket_type_code
@property
def backend_purchase_reference(self):
"""Supplier reference for this order."""
return self._core_order.backend_purchase_reference
@property
def has_seat_with_restricted_view(self):
"""Boolean to indicate if any seats have restricted views."""
restricted = False
for con in self.concessions:
if con.has_restricted_view:
restricted = True
return restricted
@property
def unique_seat_text(self):
"""List of unique seat text strings on this order."""
seat_text = []
for con in self.concessions:
for text in con.unique_seat_text:
if text not in seat_text:
seat_text.append(text)
return seat_text
@property
def requested_seats(self):
"""If specific seats were requested, they will be listed here."""
if self._requested_seats is False:
if self._core_order.requested_seats:
self._requested_seats = []
for seat in self._core_order.requested_seats:
self._requested_seats.append(
Seat(core_seat=seat)
)
else:
self._requested_seats = None
return self._requested_seats
@property
def requested_seat_ids(self):
"""List of requested Seat Ids on this order."""
return [r.seat_id for r in self.requested_seats if r.seat_id]
@property
def seat_request_status(self):
"""Describes the status of the request for specific seats, i.e. were
the specified seats successfully selected."""
return self._core_order.seat_request_status
@property
def got_all_requested_seats(self):
"""Boolean indicating if the requested seats were successfully
reserved.
Returns True if all requested seats were reserved, False if some or
none of the requested seats were reserved and None if no seats were
requested.
"""
if self.seat_request_status == 'not_requested':
return None
elif self.seat_request_status == 'got_all':
return True
return False
| |
# -*- coding: utf-8 -*-
import datetime
import hashlib
import logging
import os
import alerts
import enhancements
import jsonschema
import ruletypes
import yaml
import yaml.scanner
from opsgenie import OpsGenieAlerter
from staticconf.loader import yaml_loader
from util import dt_to_ts
from util import dt_to_unix
from util import dt_to_unixms
from util import EAException
from util import ts_to_dt
from util import unix_to_dt
from util import unixms_to_dt
# schema for rule yaml
rule_schema = jsonschema.Draft4Validator(yaml.load(open(os.path.join(os.path.dirname(__file__), 'schema.yaml'))))
# Required global (config.yaml) and local (rule.yaml) configuration options
required_globals = frozenset(['run_every', 'rules_folder', 'es_host', 'es_port', 'writeback_index', 'buffer_time'])
required_locals = frozenset(['alert', 'type', 'name', 'index'])
# Used to map the names of rules to their classes
rules_mapping = {
'frequency': ruletypes.FrequencyRule,
'any': ruletypes.AnyRule,
'spike': ruletypes.SpikeRule,
'blacklist': ruletypes.BlacklistRule,
'whitelist': ruletypes.WhitelistRule,
'change': ruletypes.ChangeRule,
'flatline': ruletypes.FlatlineRule,
'new_term': ruletypes.NewTermsRule,
'cardinality': ruletypes.CardinalityRule
}
# Used to map names of alerts to their classes
alerts_mapping = {
'email': alerts.EmailAlerter,
'jira': alerts.JiraAlerter,
'opsgenie': OpsGenieAlerter,
'debug': alerts.DebugAlerter,
'command': alerts.CommandAlerter
}
def get_module(module_name):
""" Loads a module and returns a specific object.
module_name should 'module.file.object'.
Returns object or raises EAException on error. """
try:
module_path, module_class = module_name.rsplit('.', 1)
base_module = __import__(module_path, globals(), locals(), [module_class])
module = getattr(base_module, module_class)
except (ImportError, AttributeError, ValueError) as e:
raise EAException("Could not import module %s: %s" % (module_name, e))
return module
def load_configuration(filename, conf, args=None):
""" Load a yaml rule file and fill in the relevant fields with objects.
:param filename: The name of a rule configuration file.
:param conf: The global configuration dictionary, used for populating defaults.
:return: The rule configuration, a dictionary.
"""
try:
rule = yaml_loader(filename)
except yaml.scanner.ScannerError as e:
raise EAException('Could not parse file %s: %s' % (filename, e))
rule['rule_file'] = filename
load_options(rule, conf, args)
load_modules(rule, args)
return rule
def load_options(rule, conf, args=None):
""" Converts time objects, sets defaults, and validates some settings.
:param rule: A dictionary of parsed YAML from a rule config file.
:param conf: The global configuration dictionary, used for populating defaults.
"""
try:
rule_schema.validate(rule)
except jsonschema.ValidationError as e:
raise EAException("Invalid Rule: %s\n%s" % (rule.get('name'), e))
try:
# Set all time based parameters
if 'timeframe' in rule:
rule['timeframe'] = datetime.timedelta(**rule['timeframe'])
if 'realert' in rule:
rule['realert'] = datetime.timedelta(**rule['realert'])
else:
rule['realert'] = datetime.timedelta(minutes=1)
if 'aggregation' in rule:
rule['aggregation'] = datetime.timedelta(**rule['aggregation'])
if 'query_delay' in rule:
rule['query_delay'] = datetime.timedelta(**rule['query_delay'])
if 'buffer_time' in rule:
rule['buffer_time'] = datetime.timedelta(**rule['buffer_time'])
if 'exponential_realert' in rule:
rule['exponential_realert'] = datetime.timedelta(**rule['exponential_realert'])
if 'kibana4_start_timedelta' in rule:
rule['kibana4_start_timedelta'] = datetime.timedelta(**rule['kibana4_start_timedelta'])
if 'kibana4_end_timedelta' in rule:
rule['kibana4_end_timedelta'] = datetime.timedelta(**rule['kibana4_end_timedelta'])
except (KeyError, TypeError) as e:
raise EAException('Invalid time format used: %s' % (e))
# Set defaults
rule.setdefault('realert', datetime.timedelta(seconds=0))
rule.setdefault('aggregation', datetime.timedelta(seconds=0))
rule.setdefault('query_delay', datetime.timedelta(seconds=0))
rule.setdefault('timestamp_field', '@timestamp')
rule.setdefault('filter', [])
rule.setdefault('timestamp_type', 'iso')
rule.setdefault('_source_enabled', True)
rule.setdefault('use_local_time', True)
rule.setdefault('es_port', conf.get('es_port'))
rule.setdefault('es_host', conf.get('es_host'))
# Set timestamp_type conversion function, used when generating queries and processing hits
rule['timestamp_type'] = rule['timestamp_type'].strip().lower()
if rule['timestamp_type'] == 'iso':
rule['ts_to_dt'] = ts_to_dt
rule['dt_to_ts'] = dt_to_ts
elif rule['timestamp_type'] == 'unix':
rule['ts_to_dt'] = unix_to_dt
rule['dt_to_ts'] = dt_to_unix
elif rule['timestamp_type'] == 'unix_ms':
rule['ts_to_dt'] = unixms_to_dt
rule['dt_to_ts'] = dt_to_unixms
else:
raise EAException('timestamp_type must be one of iso, unix, or unix_ms')
# Set email options from global config
rule.setdefault('smtp_host', conf.get('smtp_host', 'localhost'))
if 'smtp_host' in conf:
rule.setdefault('smtp_host', conf.get('smtp_port'))
rule.setdefault('from_addr', conf.get('from_addr', 'ElastAlert'))
if 'email_reply_to' in conf:
rule.setdefault('email_reply_to', conf['email_reply_to'])
# Make sure we have required options
if required_locals - frozenset(rule.keys()):
raise EAException('Missing required option(s): %s' % (', '.join(required_locals - frozenset(rule.keys()))))
if 'include' in rule and type(rule['include']) != list:
raise EAException('include option must be a list')
if isinstance(rule.get('query_key'), list):
rule['compound_query_key'] = rule['query_key']
rule['query_key'] = ','.join(rule['query_key'])
# Add QK, CK and timestamp to include
include = rule.get('include', ['*'])
if 'query_key' in rule:
include.append(rule['query_key'])
if 'compound_query_key' in rule:
include += rule['compound_query_key']
if 'compare_key' in rule:
include.append(rule['compare_key'])
if 'top_count_keys' in rule:
include += rule['top_count_keys']
include.append(rule['timestamp_field'])
rule['include'] = list(set(include))
# Change top_count_keys to .raw
if 'top_count_keys' in rule and rule.get('raw_count_keys', True):
keys = rule.get('top_count_keys')
rule['top_count_keys'] = [key + '.raw' if not key.endswith('.raw') else key for key in keys]
# Check that generate_kibana_url is compatible with the filters
if rule.get('generate_kibana_link'):
for es_filter in rule.get('filter'):
if es_filter:
if 'not' in es_filter:
es_filter = es_filter['not']
if 'query' in es_filter:
es_filter = es_filter['query']
if es_filter.keys()[0] not in ('term', 'query_string', 'range'):
raise EAException('generate_kibana_link is incompatible with filters other than term, query_string and range. '
'Consider creating a dashboard and using use_kibana_dashboard instead.')
# Check that doc_type is provided if use_count/terms_query
if rule.get('use_count_query') or rule.get('use_terms_query'):
if 'doc_type' not in rule:
raise EAException('doc_type must be specified.')
# Check that query_key is set if use_terms_query
if rule.get('use_terms_query'):
if 'query_key' not in rule:
raise EAException('query_key must be specified with use_terms_query')
# Warn if use_strf_index is used with %y, %M or %D
# (%y = short year, %M = minutes, %D = full date)
if rule.get('use_strftime_index'):
for token in ['%y', '%M', '%D']:
if token in rule.get('index'):
logging.warning('Did you mean to use %s in the index? '
'The index will be formatted like %s' % (token,
datetime.datetime.now().strftime(rule.get('index'))))
def load_modules(rule, args=None):
""" Loads things that could be modules. Enhancements, alerts and rule type. """
# Set match enhancements
match_enhancements = []
for enhancement_name in rule.get('match_enhancements', []):
if enhancement_name in dir(enhancements):
enhancement = getattr(enhancements, enhancement_name)
else:
enhancement = get_module(enhancement_name)
if not issubclass(enhancement, enhancements.BaseEnhancement):
raise EAException("Enhancement module %s not a subclass of BaseEnhancement" % (enhancement_name))
match_enhancements.append(enhancement(rule))
rule['match_enhancements'] = match_enhancements
# Convert all alerts into Alerter objects
rule_alerts = []
if type(rule['alert']) != list:
rule['alert'] = [rule['alert']]
for alert in rule['alert']:
if alert in alerts_mapping:
rule_alerts.append(alerts_mapping[alert])
else:
rule_alerts.append(get_module(alert))
if not issubclass(rule_alerts[-1], alerts.Alerter):
raise EAException('Alert module %s is not a subclass of Alerter' % (alert))
rule['alert'] = rule_alerts
# Convert rule type into RuleType object
if rule['type'] in rules_mapping:
rule['type'] = rules_mapping[rule['type']]
else:
rule['type'] = get_module(rule['type'])
if not issubclass(rule['type'], ruletypes.RuleType):
raise EAException('Rule module %s is not a subclass of RuleType' % (rule['type']))
# Make sure we have required alert and type options
reqs = rule['type'].required_options
for alert in rule['alert']:
reqs = reqs.union(alert.required_options)
if reqs - frozenset(rule.keys()):
raise EAException('Missing required option(s): %s' % (', '.join(reqs - frozenset(rule.keys()))))
# Instantiate alert
try:
rule['alert'] = [alert(rule) for alert in rule['alert']]
except (KeyError, EAException) as e:
raise EAException('Error initiating alert %s: %s' % (rule['alert'], e))
# Instantiate rule
try:
rule['type'] = rule['type'](rule, args)
except (KeyError, EAException) as e:
raise EAException('Error initializing rule %s: %s' % (rule['name'], e))
def get_file_paths(conf, use_rule=None):
# Passing a filename directly can bypass rules_folder and .yaml checks
if use_rule and os.path.isfile(use_rule):
return [use_rule]
rule_folder = conf['rules_folder']
rule_files = []
for root, folders, files in os.walk(rule_folder):
for filename in files:
if use_rule and use_rule != filename:
continue
if filename.endswith('.yaml'):
rule_files.append(os.path.join(root, filename))
return rule_files
def load_rules(args):
""" Creates a conf dictionary for ElastAlerter. Loads the global
config file and then each rule found in rules_folder.
:param args: The parsed arguments to ElastAlert
:return: The global configuration, a dictionary.
"""
names = []
filename = args.config
conf = yaml_loader(filename)
use_rule = args.rule
# Make sure we have all required globals
if required_globals - frozenset(conf.keys()):
raise EAException('%s must contain %s' % (filename, ', '.join(required_globals - frozenset(conf.keys()))))
conf.setdefault('max_query_size', 100000)
conf.setdefault('disable_rules_on_error', True)
# Convert run_every, buffer_time into a timedelta object
try:
conf['run_every'] = datetime.timedelta(**conf['run_every'])
conf['buffer_time'] = datetime.timedelta(**conf['buffer_time'])
if 'alert_time_limit' in conf:
conf['alert_time_limit'] = datetime.timedelta(**conf['alert_time_limit'])
else:
conf['alert_time_limit'] = datetime.timedelta(days=2)
if 'old_query_limit' in conf:
conf['old_query_limit'] = datetime.timedelta(**conf['old_query_limit'])
else:
conf['old_query_limit'] = datetime.timedelta(weeks=1)
except (KeyError, TypeError) as e:
raise EAException('Invalid time format used: %s' % (e))
# Load each rule configuration file
rules = []
rule_files = get_file_paths(conf, use_rule)
for rule_file in rule_files:
try:
rule = load_configuration(rule_file, conf, args)
if rule['name'] in names:
raise EAException('Duplicate rule named %s' % (rule['name']))
except EAException as e:
raise EAException('Error loading file %s: %s' % (rule_file, e))
rules.append(rule)
names.append(rule['name'])
if not rules:
logging.exception('No rules loaded. Exiting')
exit(1)
conf['rules'] = rules
return conf
def get_rule_hashes(conf, use_rule=None):
rule_files = get_file_paths(conf, use_rule)
rule_mod_times = {}
for rule_file in rule_files:
with open(rule_file) as fh:
rule_mod_times[rule_file] = hashlib.sha1(fh.read()).digest()
return rule_mod_times
| |
import threading
import abc
import time
from GA import GA
from GA import GAStop
from server import BColors
class SingletonMixin(object):
__singleton_lock = threading.Lock()
__singleton_instance = None
@classmethod
def instance(cls):
if cls.__singleton_instance is None:
with cls.__singleton_lock:
if cls.__singleton_instance is None:
cls.__singleton_instance = cls()
return cls.__singleton_instance
class Observer(metaclass=abc.ABCMeta):
"""
Define an updating interface for objects that should be notified of
changes in a subject.
"""
def __init__(self):
self._subject = None
self._generation_count = None
@abc.abstractmethod
def update(self, arg):
pass
class SATController(Observer, SingletonMixin):
def __init__(self):
Observer.__init__(self)
self.GA = None
self.server_thread = None
self.time_started = None
self.time_finished = None
self.ga_thread = None
def update(self, arg):
from SATSolver.RequestHandler import encode
self._generation_count = arg
if self.GA.best_individual is None:
best_true_clauses = ''
else:
best_true_clauses = self.GA.true_clauses(self.GA.best_individual)
if self.GA.current_child is None:
new_true_clauses = ''
else:
new_true_clauses = self.GA.true_clauses(self.GA.current_child)
encoded_message = encode("PROGRESS", [[self._generation_count, self.GA.max_generations],
[self.time_started],
[self.GA.best_individual_fitness],
[str(self.GA.best_individual)],
[self.GA.current_child_fitness],
[str(self.GA.current_child)],
[self.GA.numberOfVariables],
[self.GA.numberOfClauses],
[best_true_clauses],
[new_true_clauses]]
)
if self.server_thread is not None:
self.server_thread.push_to_all(encoded_message)
time_elapsed = int(time.time()*1000)-self.time_started
if time_elapsed >= 1000:
time_elapsed = str(time_elapsed/1000) + 's'
else:
time_elapsed = str(time_elapsed) + 'ms'
print("Generations: " + str(self._generation_count) + "/" + str(self.GA.max_generations) + "\t|\tElapsed Time: "
+ time_elapsed + "\t|\tBest Individual's Fitness: "
+ str(self.GA.best_individual_fitness))
def send_update(self, msg):
self.server_thread.push_to_all(msg)
def has_ga_instance(self):
return self.GA is not None
def create_ga(self, ga_parameters):
new_params = {key: ga_parameters[key] for key in ga_parameters.keys() if ga_parameters[key] is not None}
self.GA = GA(**new_params)
self.GA.attach(self)
def start_ga(self):
try:
self.time_started = int(time.time()*1000)
result = self.GA.gasat()
self.time_finished = int(time.time()*1000)
time_elapsed = self.time_finished - self.time_started
if time_elapsed >= 1000:
time_elapsed = str(time_elapsed / 1000) + 's'
else:
time_elapsed = str(time_elapsed) + 'ms'
if result.fitness == 0:
print(BColors.OKGREEN + "Successfully found a solution in " +
time_elapsed + BColors.ENDC)
print('A solution is: ' + str(result))
else:
print(BColors.FAIL + "Could not find a solution in the given amount of generations." + BColors.ENDC)
print('The best solution found is: ' + str(result))
if self.server_thread is not None:
from SATSolver.RequestHandler import encode
encoded_message = encode("FINISHED", [
result.fitness == 0,
result.fitness,
[self._generation_count, self.GA.max_generations],
self.time_started,
self.time_finished,
str(result),
self.GA.true_clauses(result)
])
time.sleep(0.1)
self.server_thread.push_to_all(encoded_message)
self.GA = None
except GAStop:
self.time_finished = int(time.time() * 1000)
time_elapsed = self.time_finished - self.time_started
if time_elapsed >= 1000:
time_elapsed = str(time_elapsed / 1000) + 's'
else:
time_elapsed = str(time_elapsed) + 'ms'
if self.GA.best_individual is None:
print(BColors.FAIL + "Could not find a solution, solving stopped by client." + BColors.ENDC)
if self.server_thread is not None:
from SATSolver.RequestHandler import encode
encoded_message = encode("FINISHED", [
False,
None,
[0, 0],
self.time_started,
self.time_finished,
'',
''
])
self.server_thread.push_to_all(encoded_message)
self.GA = None
return
result = self.GA.best_individual
if result.fitness == 0:
print(BColors.OKGREEN + "Successfully found a solution in " +
time_elapsed + BColors.ENDC)
print('A solution is: ' + str(result))
else:
print(BColors.FAIL + "Could not find a solution, solving stopped by client." + BColors.ENDC)
print('The best solution found is: ' + str(result))
if self.server_thread is not None:
from SATSolver.RequestHandler import encode
encoded_message = encode("FINISHED", [
result.fitness == 0,
result.fitness,
[self._generation_count, self.GA.max_generations],
self.time_started,
self.time_finished,
str(self.GA.best_individual),
self.GA.true_clauses(self.GA.best_individual)
])
self.server_thread.push_to_all(encoded_message)
self.GA = None
def parse_formula(self, raw_formula, local=True):
"""
Takes a list of lines read from the input file and
"""
# Read all the lines from the file that aren't comments
if local:
lines = [line.replace("\n", "") for line in raw_formula if line[0] != "c" and line.strip() != ""]
number_of_variables, number_of_clauses = int(lines[0].split()[2]), int(lines[0].split()[3])
else:
number_of_variables, number_of_clauses = int(raw_formula[0].split()[2]), int(raw_formula[0].split()[3])
lines = raw_formula
formula = []
# Go through the lines and create numberOfClauses clauses
line = 1
# for line in range(1, len(lines)):
try:
while line < len(lines):
clause = []
# We need a while loop as a clause may be split over many lines, but eventually ends with a 0
end_of_clause = False
while line < len(lines) and not end_of_clause:
# Split the line and append a list of all integers, excluding 0, to clause
clause.append([int(variable.strip()) for variable in lines[line].split()
if int(variable.strip()) != 0])
# If this line ended with a 0, we reached the end of the clause
if int(lines[line].split()[-1].strip()) == 0:
end_of_clause = True
line += 1
# Otherwise continue reading this clause from the next line
else:
line += 1
# clause is now a list of lists, so we need to flatten it and convert it to a list
formula.append(tuple([item for sublist in clause for item in sublist]))
except Exception as e:
raise Exception(str(line) + ' ' + str(e))
return formula, number_of_variables, number_of_clauses
| |
"""Algorithms for computing symbolic roots of polynomials. """
from __future__ import print_function, division
import math
from sympy.core.symbol import Dummy, Symbol, symbols
from sympy.core import S, I, pi
from sympy.core.compatibility import ordered
from sympy.core.mul import expand_2arg, Mul
from sympy.core.power import Pow
from sympy.core.relational import Eq
from sympy.core.sympify import sympify
from sympy.core.numbers import Rational, igcd, comp
from sympy.core.exprtools import factor_terms
from sympy.core.logic import fuzzy_not
from sympy.ntheory import divisors, isprime, nextprime
from sympy.functions import exp, sqrt, im, cos, acos, Piecewise
from sympy.functions.elementary.miscellaneous import root
from sympy.polys.polytools import Poly, cancel, factor, gcd_list, discriminant
from sympy.polys.specialpolys import cyclotomic_poly
from sympy.polys.polyerrors import (PolynomialError, GeneratorsNeeded,
DomainError)
from sympy.polys.polyquinticconst import PolyQuintic
from sympy.polys.rationaltools import together
from sympy.simplify import simplify, powsimp
from sympy.utilities import public
from sympy.core.compatibility import reduce, range
def roots_linear(f):
"""Returns a list of roots of a linear polynomial."""
r = -f.nth(0)/f.nth(1)
dom = f.get_domain()
if not dom.is_Numerical:
if dom.is_Composite:
r = factor(r)
else:
r = simplify(r)
return [r]
def roots_quadratic(f):
"""Returns a list of roots of a quadratic polynomial. If the domain is ZZ
then the roots will be sorted with negatives coming before positives.
The ordering will be the same for any numerical coefficients as long as
the assumptions tested are correct, otherwise the ordering will not be
sorted (but will be canonical).
"""
a, b, c = f.all_coeffs()
dom = f.get_domain()
def _sqrt(d):
# remove squares from square root since both will be represented
# in the results; a similar thing is happening in roots() but
# must be duplicated here because not all quadratics are binomials
co = []
other = []
for di in Mul.make_args(d):
if di.is_Pow and di.exp.is_Integer and di.exp % 2 == 0:
co.append(Pow(di.base, di.exp//2))
else:
other.append(di)
if co:
d = Mul(*other)
co = Mul(*co)
return co*sqrt(d)
return sqrt(d)
def _simplify(expr):
if dom.is_Composite:
return factor(expr)
else:
return simplify(expr)
if c is S.Zero:
r0, r1 = S.Zero, -b/a
if not dom.is_Numerical:
r1 = _simplify(r1)
elif r1.is_negative:
r0, r1 = r1, r0
elif b is S.Zero:
r = -c/a
if not dom.is_Numerical:
r = _simplify(r)
R = _sqrt(r)
r0 = -R
r1 = R
else:
d = b**2 - 4*a*c
A = 2*a
B = -b/A
if not dom.is_Numerical:
d = _simplify(d)
B = _simplify(B)
D = factor_terms(_sqrt(d)/A)
r0 = B - D
r1 = B + D
if a.is_negative:
r0, r1 = r1, r0
elif not dom.is_Numerical:
r0, r1 = [expand_2arg(i) for i in (r0, r1)]
return [r0, r1]
def roots_cubic(f, trig=False):
"""Returns a list of roots of a cubic polynomial.
References
==========
[1] https://en.wikipedia.org/wiki/Cubic_function, General formula for roots,
(accessed November 17, 2014).
"""
if trig:
a, b, c, d = f.all_coeffs()
p = (3*a*c - b**2)/3/a**2
q = (2*b**3 - 9*a*b*c + 27*a**2*d)/(27*a**3)
D = 18*a*b*c*d - 4*b**3*d + b**2*c**2 - 4*a*c**3 - 27*a**2*d**2
if (D > 0) == True:
rv = []
for k in range(3):
rv.append(2*sqrt(-p/3)*cos(acos(3*q/2/p*sqrt(-3/p))/3 - k*2*pi/3))
return [i - b/3/a for i in rv]
_, a, b, c = f.monic().all_coeffs()
if c is S.Zero:
x1, x2 = roots([1, a, b], multiple=True)
return [x1, S.Zero, x2]
p = b - a**2/3
q = c - a*b/3 + 2*a**3/27
pon3 = p/3
aon3 = a/3
u1 = None
if p is S.Zero:
if q is S.Zero:
return [-aon3]*3
if q.is_real:
if q.is_positive:
u1 = -root(q, 3)
elif q.is_negative:
u1 = root(-q, 3)
elif q is S.Zero:
y1, y2 = roots([1, 0, p], multiple=True)
return [tmp - aon3 for tmp in [y1, S.Zero, y2]]
elif q.is_real and q.is_negative:
u1 = -root(-q/2 + sqrt(q**2/4 + pon3**3), 3)
coeff = I*sqrt(3)/2
if u1 is None:
u1 = S(1)
u2 = -S.Half + coeff
u3 = -S.Half - coeff
a, b, c, d = S(1), a, b, c
D0 = b**2 - 3*a*c
D1 = 2*b**3 - 9*a*b*c + 27*a**2*d
C = root((D1 + sqrt(D1**2 - 4*D0**3))/2, 3)
return [-(b + uk*C + D0/C/uk)/3/a for uk in [u1, u2, u3]]
u2 = u1*(-S.Half + coeff)
u3 = u1*(-S.Half - coeff)
if p is S.Zero:
return [u1 - aon3, u2 - aon3, u3 - aon3]
soln = [
-u1 + pon3/u1 - aon3,
-u2 + pon3/u2 - aon3,
-u3 + pon3/u3 - aon3
]
return soln
def _roots_quartic_euler(p, q, r, a):
"""
Descartes-Euler solution of the quartic equation
Parameters
==========
p, q, r: coefficients of ``x**4 + p*x**2 + q*x + r``
a: shift of the roots
Notes
=====
This is a helper function for ``roots_quartic``.
Look for solutions of the form ::
``x1 = sqrt(R) - sqrt(A + B*sqrt(R))``
``x2 = -sqrt(R) - sqrt(A - B*sqrt(R))``
``x3 = -sqrt(R) + sqrt(A - B*sqrt(R))``
``x4 = sqrt(R) + sqrt(A + B*sqrt(R))``
To satisfy the quartic equation one must have
``p = -2*(R + A); q = -4*B*R; r = (R - A)**2 - B**2*R``
so that ``R`` must satisfy the Descartes-Euler resolvent equation
``64*R**3 + 32*p*R**2 + (4*p**2 - 16*r)*R - q**2 = 0``
If the resolvent does not have a rational solution, return None;
in that case it is likely that the Ferrari method gives a simpler
solution.
Examples
========
>>> from sympy import S
>>> from sympy.polys.polyroots import _roots_quartic_euler
>>> p, q, r = -S(64)/5, -S(512)/125, -S(1024)/3125
>>> _roots_quartic_euler(p, q, r, S(0))[0]
-sqrt(32*sqrt(5)/125 + 16/5) + 4*sqrt(5)/5
"""
# solve the resolvent equation
x = Symbol('x')
eq = 64*x**3 + 32*p*x**2 + (4*p**2 - 16*r)*x - q**2
xsols = list(roots(Poly(eq, x), cubics=False).keys())
xsols = [sol for sol in xsols if sol.is_rational]
if not xsols:
return None
R = max(xsols)
c1 = sqrt(R)
B = -q*c1/(4*R)
A = -R - p/2
c2 = sqrt(A + B)
c3 = sqrt(A - B)
return [c1 - c2 - a, -c1 - c3 - a, -c1 + c3 - a, c1 + c2 - a]
def roots_quartic(f):
r"""
Returns a list of roots of a quartic polynomial.
There are many references for solving quartic expressions available [1-5].
This reviewer has found that many of them require one to select from among
2 or more possible sets of solutions and that some solutions work when one
is searching for real roots but don't work when searching for complex roots
(though this is not always stated clearly). The following routine has been
tested and found to be correct for 0, 2 or 4 complex roots.
The quasisymmetric case solution [6] looks for quartics that have the form
`x**4 + A*x**3 + B*x**2 + C*x + D = 0` where `(C/A)**2 = D`.
Although no general solution that is always applicable for all
coefficients is known to this reviewer, certain conditions are tested
to determine the simplest 4 expressions that can be returned:
1) `f = c + a*(a**2/8 - b/2) == 0`
2) `g = d - a*(a*(3*a**2/256 - b/16) + c/4) = 0`
3) if `f != 0` and `g != 0` and `p = -d + a*c/4 - b**2/12` then
a) `p == 0`
b) `p != 0`
Examples
========
>>> from sympy import Poly, symbols, I
>>> from sympy.polys.polyroots import roots_quartic
>>> r = roots_quartic(Poly('x**4-6*x**3+17*x**2-26*x+20'))
>>> # 4 complex roots: 1+-I*sqrt(3), 2+-I
>>> sorted(str(tmp.evalf(n=2)) for tmp in r)
['1.0 + 1.7*I', '1.0 - 1.7*I', '2.0 + 1.0*I', '2.0 - 1.0*I']
References
==========
1. http://mathforum.org/dr.math/faq/faq.cubic.equations.html
2. http://en.wikipedia.org/wiki/Quartic_function#Summary_of_Ferrari.27s_method
3. http://planetmath.org/encyclopedia/GaloisTheoreticDerivationOfTheQuarticFormula.html
4. http://staff.bath.ac.uk/masjhd/JHD-CA.pdf
5. http://www.albmath.org/files/Math_5713.pdf
6. http://www.statemaster.com/encyclopedia/Quartic-equation
7. eqworld.ipmnet.ru/en/solutions/ae/ae0108.pdf
"""
_, a, b, c, d = f.monic().all_coeffs()
if not d:
return [S.Zero] + roots([1, a, b, c], multiple=True)
elif (c/a)**2 == d:
x, m = f.gen, c/a
g = Poly(x**2 + a*x + b - 2*m, x)
z1, z2 = roots_quadratic(g)
h1 = Poly(x**2 - z1*x + m, x)
h2 = Poly(x**2 - z2*x + m, x)
r1 = roots_quadratic(h1)
r2 = roots_quadratic(h2)
return r1 + r2
else:
a2 = a**2
e = b - 3*a2/8
f = c + a*(a2/8 - b/2)
g = d - a*(a*(3*a2/256 - b/16) + c/4)
aon4 = a/4
if f is S.Zero:
y1, y2 = [sqrt(tmp) for tmp in
roots([1, e, g], multiple=True)]
return [tmp - aon4 for tmp in [-y1, -y2, y1, y2]]
if g is S.Zero:
y = [S.Zero] + roots([1, 0, e, f], multiple=True)
return [tmp - aon4 for tmp in y]
else:
# Descartes-Euler method, see [7]
sols = _roots_quartic_euler(e, f, g, aon4)
if sols:
return sols
# Ferrari method, see [1, 2]
a2 = a**2
e = b - 3*a2/8
f = c + a*(a2/8 - b/2)
g = d - a*(a*(3*a2/256 - b/16) + c/4)
p = -e**2/12 - g
q = -e**3/108 + e*g/3 - f**2/8
TH = Rational(1, 3)
def _ans(y):
w = sqrt(e + 2*y)
arg1 = 3*e + 2*y
arg2 = 2*f/w
ans = []
for s in [-1, 1]:
root = sqrt(-(arg1 + s*arg2))
for t in [-1, 1]:
ans.append((s*w - t*root)/2 - aon4)
return ans
# p == 0 case
y1 = -5*e/6 - q**TH
if p.is_zero:
return _ans(y1)
# if p != 0 then u below is not 0
root = sqrt(q**2/4 + p**3/27)
r = -q/2 + root # or -q/2 - root
u = r**TH # primary root of solve(x**3 - r, x)
y2 = -5*e/6 + u - p/u/3
if fuzzy_not(p.is_zero):
return _ans(y2)
# sort it out once they know the values of the coefficients
return [Piecewise((a1, Eq(p, 0)), (a2, True))
for a1, a2 in zip(_ans(y1), _ans(y2))]
def roots_binomial(f):
"""Returns a list of roots of a binomial polynomial. If the domain is ZZ
then the roots will be sorted with negatives coming before positives.
The ordering will be the same for any numerical coefficients as long as
the assumptions tested are correct, otherwise the ordering will not be
sorted (but will be canonical).
"""
n = f.degree()
a, b = f.nth(n), f.nth(0)
base = -cancel(b/a)
alpha = root(base, n)
if alpha.is_number:
alpha = alpha.expand(complex=True)
# define some parameters that will allow us to order the roots.
# If the domain is ZZ this is guaranteed to return roots sorted
# with reals before non-real roots and non-real sorted according
# to real part and imaginary part, e.g. -1, 1, -1 + I, 2 - I
neg = base.is_negative
even = n % 2 == 0
if neg:
if even == True and (base + 1).is_positive:
big = True
else:
big = False
# get the indices in the right order so the computed
# roots will be sorted when the domain is ZZ
ks = []
imax = n//2
if even:
ks.append(imax)
imax -= 1
if not neg:
ks.append(0)
for i in range(imax, 0, -1):
if neg:
ks.extend([i, -i])
else:
ks.extend([-i, i])
if neg:
ks.append(0)
if big:
for i in range(0, len(ks), 2):
pair = ks[i: i + 2]
pair = list(reversed(pair))
# compute the roots
roots, d = [], 2*I*pi/n
for k in ks:
zeta = exp(k*d).expand(complex=True)
roots.append((alpha*zeta).expand(power_base=False))
return roots
def _inv_totient_estimate(m):
"""
Find ``(L, U)`` such that ``L <= phi^-1(m) <= U``.
Examples
========
>>> from sympy.polys.polyroots import _inv_totient_estimate
>>> _inv_totient_estimate(192)
(192, 840)
>>> _inv_totient_estimate(400)
(400, 1750)
"""
primes = [ d + 1 for d in divisors(m) if isprime(d + 1) ]
a, b = 1, 1
for p in primes:
a *= p
b *= p - 1
L = m
U = int(math.ceil(m*(float(a)/b)))
P = p = 2
primes = []
while P <= U:
p = nextprime(p)
primes.append(p)
P *= p
P //= p
b = 1
for p in primes[:-1]:
b *= p - 1
U = int(math.ceil(m*(float(P)/b)))
return L, U
def roots_cyclotomic(f, factor=False):
"""Compute roots of cyclotomic polynomials. """
L, U = _inv_totient_estimate(f.degree())
for n in range(L, U + 1):
g = cyclotomic_poly(n, f.gen, polys=True)
if f == g:
break
else: # pragma: no cover
raise RuntimeError("failed to find index of a cyclotomic polynomial")
roots = []
if not factor:
# get the indices in the right order so the computed
# roots will be sorted
h = n//2
ks = [i for i in range(1, n + 1) if igcd(i, n) == 1]
ks.sort(key=lambda x: (x, -1) if x <= h else (abs(x - n), 1))
d = 2*I*pi/n
for k in reversed(ks):
roots.append(exp(k*d).expand(complex=True))
else:
g = Poly(f, extension=root(-1, n))
for h, _ in ordered(g.factor_list()[1]):
roots.append(-h.TC())
return roots
def roots_quintic(f):
"""
Calulate exact roots of a solvable quintic
"""
result = []
coeff_5, coeff_4, p, q, r, s = f.all_coeffs()
# Eqn must be of the form x^5 + px^3 + qx^2 + rx + s
if coeff_4:
return result
if coeff_5 != 1:
l = [p/coeff_5, q/coeff_5, r/coeff_5, s/coeff_5]
if not all(coeff.is_Rational for coeff in l):
return result
f = Poly(f/coeff_5)
quintic = PolyQuintic(f)
# Eqn standardized. Algo for solving starts here
if not f.is_irreducible:
return result
f20 = quintic.f20
# Check if f20 has linear factors over domain Z
if f20.is_irreducible:
return result
# Now, we know that f is solvable
for _factor in f20.factor_list()[1]:
if _factor[0].is_linear:
theta = _factor[0].root(0)
break
d = discriminant(f)
delta = sqrt(d)
# zeta = a fifth root of unity
zeta1, zeta2, zeta3, zeta4 = quintic.zeta
T = quintic.T(theta, d)
tol = S(1e-10)
alpha = T[1] + T[2]*delta
alpha_bar = T[1] - T[2]*delta
beta = T[3] + T[4]*delta
beta_bar = T[3] - T[4]*delta
disc = alpha**2 - 4*beta
disc_bar = alpha_bar**2 - 4*beta_bar
l0 = quintic.l0(theta)
l1 = _quintic_simplify((-alpha + sqrt(disc)) / S(2))
l4 = _quintic_simplify((-alpha - sqrt(disc)) / S(2))
l2 = _quintic_simplify((-alpha_bar + sqrt(disc_bar)) / S(2))
l3 = _quintic_simplify((-alpha_bar - sqrt(disc_bar)) / S(2))
order = quintic.order(theta, d)
test = (order*delta.n()) - ( (l1.n() - l4.n())*(l2.n() - l3.n()) )
# Comparing floats
if not comp(test, 0, tol):
l2, l3 = l3, l2
# Now we have correct order of l's
R1 = l0 + l1*zeta1 + l2*zeta2 + l3*zeta3 + l4*zeta4
R2 = l0 + l3*zeta1 + l1*zeta2 + l4*zeta3 + l2*zeta4
R3 = l0 + l2*zeta1 + l4*zeta2 + l1*zeta3 + l3*zeta4
R4 = l0 + l4*zeta1 + l3*zeta2 + l2*zeta3 + l1*zeta4
Res = [None, [None]*5, [None]*5, [None]*5, [None]*5]
Res_n = [None, [None]*5, [None]*5, [None]*5, [None]*5]
sol = Symbol('sol')
# Simplifying improves performace a lot for exact expressions
R1 = _quintic_simplify(R1)
R2 = _quintic_simplify(R2)
R3 = _quintic_simplify(R3)
R4 = _quintic_simplify(R4)
# Solve imported here. Causing problems if imported as 'solve'
# and hence the changed name
from sympy.solvers.solvers import solve as _solve
a, b = symbols('a b', cls=Dummy)
_sol = _solve( sol**5 - a - I*b, sol)
for i in range(5):
_sol[i] = factor(_sol[i])
R1 = R1.as_real_imag()
R2 = R2.as_real_imag()
R3 = R3.as_real_imag()
R4 = R4.as_real_imag()
for i, root in enumerate(_sol):
Res[1][i] = _quintic_simplify(root.subs({ a: R1[0], b: R1[1] }))
Res[2][i] = _quintic_simplify(root.subs({ a: R2[0], b: R2[1] }))
Res[3][i] = _quintic_simplify(root.subs({ a: R3[0], b: R3[1] }))
Res[4][i] = _quintic_simplify(root.subs({ a: R4[0], b: R4[1] }))
for i in range(1, 5):
for j in range(5):
Res_n[i][j] = Res[i][j].n()
Res[i][j] = _quintic_simplify(Res[i][j])
r1 = Res[1][0]
r1_n = Res_n[1][0]
for i in range(5):
if comp(im(r1_n*Res_n[4][i]), 0, tol):
r4 = Res[4][i]
break
u, v = quintic.uv(theta, d)
sqrt5 = math.sqrt(5)
# Now we have various Res values. Each will be a list of five
# values. We have to pick one r value from those five for each Res
u, v = quintic.uv(theta, d)
testplus = (u + v*delta*sqrt(5)).n()
testminus = (u - v*delta*sqrt(5)).n()
# Evaluated numbers suffixed with _n
# We will use evaluated numbers for calculation. Much faster.
r4_n = r4.n()
r2 = r3 = None
for i in range(5):
r2temp_n = Res_n[2][i]
for j in range(5):
# Again storing away the exact number and using
# evaluated numbers in computations
r3temp_n = Res_n[3][j]
if( comp( r1_n*r2temp_n**2 + r4_n*r3temp_n**2 - testplus, 0, tol) and
comp( r3temp_n*r1_n**2 + r2temp_n*r4_n**2 - testminus, 0, tol ) ):
r2 = Res[2][i]
r3 = Res[3][j]
break
if r2:
break
# Now, we have r's so we can get roots
x1 = (r1 + r2 + r3 + r4)/5
x2 = (r1*zeta4 + r2*zeta3 + r3*zeta2 + r4*zeta1)/5
x3 = (r1*zeta3 + r2*zeta1 + r3*zeta4 + r4*zeta2)/5
x4 = (r1*zeta2 + r2*zeta4 + r3*zeta1 + r4*zeta3)/5
x5 = (r1*zeta1 + r2*zeta2 + r3*zeta3 + r4*zeta4)/5
result = [x1, x2, x3, x4, x5]
# Now check if solutions are distinct
saw = set()
for r in result:
r = r.n(2)
if r in saw:
# Roots were identical. Abort, return []
# and fall back to usual solve
return []
saw.add(r)
return result
def _quintic_simplify(expr):
expr = powsimp(expr)
expr = cancel(expr)
return together(expr)
def _integer_basis(poly):
"""Compute coefficient basis for a polynomial over integers.
Returns the integer ``div`` such that substituting ``x = div*y``
``p(x) = m*q(y)`` where the coefficients of ``q`` are smaller
than those of ``p``.
For example ``x**5 + 512*x + 1024 = 0``
with ``div = 4`` becomes ``y**5 + 2*y + 1 = 0``
Returns the integer ``div`` or ``None`` if there is no possible scaling.
Examples
========
>>> from sympy.polys import Poly
>>> from sympy.abc import x
>>> from sympy.polys.polyroots import _integer_basis
>>> p = Poly(x**5 + 512*x + 1024, x, domain='ZZ')
>>> _integer_basis(p)
4
"""
monoms, coeffs = list(zip(*poly.terms()))
monoms, = list(zip(*monoms))
coeffs = list(map(abs, coeffs))
if coeffs[0] < coeffs[-1]:
coeffs = list(reversed(coeffs))
n = monoms[0]
monoms = [n - i for i in reversed(monoms)]
else:
return None
monoms = monoms[:-1]
coeffs = coeffs[:-1]
divs = reversed(divisors(gcd_list(coeffs))[1:])
try:
div = next(divs)
except StopIteration:
return None
while True:
for monom, coeff in zip(monoms, coeffs):
if coeff % div**monom != 0:
try:
div = next(divs)
except StopIteration:
return None
else:
break
else:
return div
def preprocess_roots(poly):
"""Try to get rid of symbolic coefficients from ``poly``. """
coeff = S.One
try:
_, poly = poly.clear_denoms(convert=True)
except DomainError:
return coeff, poly
poly = poly.primitive()[1]
poly = poly.retract()
# TODO: This is fragile. Figure out how to make this independent of construct_domain().
if poly.get_domain().is_Poly and all(c.is_term for c in poly.rep.coeffs()):
poly = poly.inject()
strips = list(zip(*poly.monoms()))
gens = list(poly.gens[1:])
base, strips = strips[0], strips[1:]
for gen, strip in zip(list(gens), strips):
reverse = False
if strip[0] < strip[-1]:
strip = reversed(strip)
reverse = True
ratio = None
for a, b in zip(base, strip):
if not a and not b:
continue
elif not a or not b:
break
elif b % a != 0:
break
else:
_ratio = b // a
if ratio is None:
ratio = _ratio
elif ratio != _ratio:
break
else:
if reverse:
ratio = -ratio
poly = poly.eval(gen, 1)
coeff *= gen**(-ratio)
gens.remove(gen)
if gens:
poly = poly.eject(*gens)
if poly.is_univariate and poly.get_domain().is_ZZ:
basis = _integer_basis(poly)
if basis is not None:
n = poly.degree()
def func(k, coeff):
return coeff//basis**(n - k[0])
poly = poly.termwise(func)
coeff *= basis
return coeff, poly
@public
def roots(f, *gens, **flags):
"""
Computes symbolic roots of a univariate polynomial.
Given a univariate polynomial f with symbolic coefficients (or
a list of the polynomial's coefficients), returns a dictionary
with its roots and their multiplicities.
Only roots expressible via radicals will be returned. To get
a complete set of roots use RootOf class or numerical methods
instead. By default cubic and quartic formulas are used in
the algorithm. To disable them because of unreadable output
set ``cubics=False`` or ``quartics=False`` respectively. If cubic
roots are real but are expressed in terms of complex numbers
(casus irreducibilis [1]) the ``trig`` flag can be set to True to
have the solutions returned in terms of cosine and inverse cosine
functions.
To get roots from a specific domain set the ``filter`` flag with
one of the following specifiers: Z, Q, R, I, C. By default all
roots are returned (this is equivalent to setting ``filter='C'``).
By default a dictionary is returned giving a compact result in
case of multiple roots. However to get a list containing all
those roots set the ``multiple`` flag to True; the list will
have identical roots appearing next to each other in the result.
(For a given Poly, the all_roots method will give the roots in
sorted numerical order.)
Examples
========
>>> from sympy import Poly, roots
>>> from sympy.abc import x, y
>>> roots(x**2 - 1, x)
{-1: 1, 1: 1}
>>> p = Poly(x**2-1, x)
>>> roots(p)
{-1: 1, 1: 1}
>>> p = Poly(x**2-y, x, y)
>>> roots(Poly(p, x))
{-sqrt(y): 1, sqrt(y): 1}
>>> roots(x**2 - y, x)
{-sqrt(y): 1, sqrt(y): 1}
>>> roots([1, 0, -1])
{-1: 1, 1: 1}
References
==========
1. http://en.wikipedia.org/wiki/Cubic_function#Trigonometric_.28and_hyperbolic.29_method
"""
from sympy.polys.polytools import to_rational_coeffs
flags = dict(flags)
auto = flags.pop('auto', True)
cubics = flags.pop('cubics', True)
trig = flags.pop('trig', False)
quartics = flags.pop('quartics', True)
quintics = flags.pop('quintics', False)
multiple = flags.pop('multiple', False)
filter = flags.pop('filter', None)
predicate = flags.pop('predicate', None)
if isinstance(f, list):
if gens:
raise ValueError('redundant generators given')
x = Dummy('x')
poly, i = {}, len(f) - 1
for coeff in f:
poly[i], i = sympify(coeff), i - 1
f = Poly(poly, x, field=True)
else:
try:
f = Poly(f, *gens, **flags)
if f.length == 2 and f.degree() != 1:
# check for foo**n factors in the constant
n = f.degree()
npow_bases = []
expr = f.as_expr()
con = expr.as_independent(*gens)[0]
for p in Mul.make_args(con):
if p.is_Pow and not p.exp % n:
npow_bases.append(p.base**(p.exp/n))
else:
other.append(p)
if npow_bases:
b = Mul(*npow_bases)
B = Dummy()
d = roots(Poly(expr - con + B**n*Mul(*others), *gens,
**flags), *gens, **flags)
rv = {}
for k, v in d.items():
rv[k.subs(B, b)] = v
return rv
except GeneratorsNeeded:
if multiple:
return []
else:
return {}
if f.is_multivariate:
raise PolynomialError('multivariate polynomials are not supported')
def _update_dict(result, root, k):
if root in result:
result[root] += k
else:
result[root] = k
def _try_decompose(f):
"""Find roots using functional decomposition. """
factors, roots = f.decompose(), []
for root in _try_heuristics(factors[0]):
roots.append(root)
for factor in factors[1:]:
previous, roots = list(roots), []
for root in previous:
g = factor - Poly(root, f.gen)
for root in _try_heuristics(g):
roots.append(root)
return roots
def _try_heuristics(f):
"""Find roots using formulas and some tricks. """
if f.is_ground:
return []
if f.is_monomial:
return [S(0)]*f.degree()
if f.length() == 2:
if f.degree() == 1:
return list(map(cancel, roots_linear(f)))
else:
return roots_binomial(f)
result = []
for i in [-1, 1]:
if not f.eval(i):
f = f.quo(Poly(f.gen - i, f.gen))
result.append(i)
break
n = f.degree()
if n == 1:
result += list(map(cancel, roots_linear(f)))
elif n == 2:
result += list(map(cancel, roots_quadratic(f)))
elif f.is_cyclotomic:
result += roots_cyclotomic(f)
elif n == 3 and cubics:
result += roots_cubic(f, trig=trig)
elif n == 4 and quartics:
result += roots_quartic(f)
elif n == 5 and quintics:
result += roots_quintic(f)
return result
(k,), f = f.terms_gcd()
if not k:
zeros = {}
else:
zeros = {S(0): k}
coeff, f = preprocess_roots(f)
if auto and f.get_domain().has_Ring:
f = f.to_field()
rescale_x = None
translate_x = None
result = {}
if not f.is_ground:
if not f.get_domain().is_Exact:
for r in f.nroots():
_update_dict(result, r, 1)
elif f.degree() == 1:
result[roots_linear(f)[0]] = 1
elif f.length() == 2:
roots_fun = roots_quadratic if f.degree() == 2 else roots_binomial
for r in roots_fun(f):
_update_dict(result, r, 1)
else:
_, factors = Poly(f.as_expr()).factor_list()
if len(factors) == 1 and f.degree() == 2:
for r in roots_quadratic(f):
_update_dict(result, r, 1)
else:
if len(factors) == 1 and factors[0][1] == 1:
if f.get_domain().is_EX:
res = to_rational_coeffs(f)
if res:
if res[0] is None:
translate_x, f = res[2:]
else:
rescale_x, f = res[1], res[-1]
result = roots(f)
if not result:
for root in _try_decompose(f):
_update_dict(result, root, 1)
else:
for root in _try_decompose(f):
_update_dict(result, root, 1)
else:
for factor, k in factors:
for r in _try_heuristics(Poly(factor, f.gen, field=True)):
_update_dict(result, r, k)
if coeff is not S.One:
_result, result, = result, {}
for root, k in _result.items():
result[coeff*root] = k
result.update(zeros)
if filter not in [None, 'C']:
handlers = {
'Z': lambda r: r.is_Integer,
'Q': lambda r: r.is_Rational,
'R': lambda r: r.is_real,
'I': lambda r: r.is_imaginary,
}
try:
query = handlers[filter]
except KeyError:
raise ValueError("Invalid filter: %s" % filter)
for zero in dict(result).keys():
if not query(zero):
del result[zero]
if predicate is not None:
for zero in dict(result).keys():
if not predicate(zero):
del result[zero]
if rescale_x:
result1 = {}
for k, v in result.items():
result1[k*rescale_x] = v
result = result1
if translate_x:
result1 = {}
for k, v in result.items():
result1[k + translate_x] = v
result = result1
if not multiple:
return result
else:
zeros = []
for zero in ordered(result):
zeros.extend([zero]*result[zero])
return zeros
def root_factors(f, *gens, **args):
"""
Returns all factors of a univariate polynomial.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.polys.polyroots import root_factors
>>> root_factors(x**2 - y, x)
[x - sqrt(y), x + sqrt(y)]
"""
args = dict(args)
filter = args.pop('filter', None)
F = Poly(f, *gens, **args)
if not F.is_Poly:
return [f]
if F.is_multivariate:
raise ValueError('multivariate polynomials are not supported')
x = F.gens[0]
zeros = roots(F, filter=filter)
if not zeros:
factors = [F]
else:
factors, N = [], 0
for r, n in ordered(zeros.items()):
factors, N = factors + [Poly(x - r, x)]*n, N + n
if N < F.degree():
G = reduce(lambda p, q: p*q, factors)
factors.append(F.quo(G))
if not isinstance(f, Poly):
factors = [ f.as_expr() for f in factors ]
return factors
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ServiceEndpointPoliciesOperations:
"""ServiceEndpointPoliciesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified service endpoint policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy.
:type service_endpoint_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
service_endpoint_policy_name=service_endpoint_policy_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
async def get(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.ServiceEndpointPolicy":
"""Gets the specified service Endpoint Policies in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy.
:type service_endpoint_policy_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServiceEndpointPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_12_01.models.ServiceEndpointPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
parameters: "_models.ServiceEndpointPolicy",
**kwargs: Any
) -> "_models.ServiceEndpointPolicy":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ServiceEndpointPolicy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
parameters: "_models.ServiceEndpointPolicy",
**kwargs: Any
) -> AsyncLROPoller["_models.ServiceEndpointPolicy"]:
"""Creates or updates a service Endpoint Policies.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy.
:type service_endpoint_policy_name: str
:param parameters: Parameters supplied to the create or update service endpoint policy
operation.
:type parameters: ~azure.mgmt.network.v2019_12_01.models.ServiceEndpointPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ServiceEndpointPolicy or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_12_01.models.ServiceEndpointPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
service_endpoint_policy_name=service_endpoint_policy_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.ServiceEndpointPolicy":
"""Updates tags of a service endpoint policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy.
:type service_endpoint_policy_name: str
:param parameters: Parameters supplied to update service endpoint policy tags.
:type parameters: ~azure.mgmt.network.v2019_12_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServiceEndpointPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_12_01.models.ServiceEndpointPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.ServiceEndpointPolicyListResult"]:
"""Gets all the service endpoint policies in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ServiceEndpointPolicyListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_12_01.models.ServiceEndpointPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ServiceEndpointPolicies'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ServiceEndpointPolicyListResult"]:
"""Gets all service endpoint Policies in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ServiceEndpointPolicyListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_12_01.models.ServiceEndpointPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies'} # type: ignore
| |
# Python test set -- part 6, built-in types
from test_support import *
print '6. Built-in types'
print '6.1 Truth value testing'
if None: raise TestFailed, 'None is true instead of false'
if 0: raise TestFailed, '0 is true instead of false'
if 0L: raise TestFailed, '0L is true instead of false'
if 0.0: raise TestFailed, '0.0 is true instead of false'
if '': raise TestFailed, '\'\' is true instead of false'
if (): raise TestFailed, '() is true instead of false'
if []: raise TestFailed, '[] is true instead of false'
if {}: raise TestFailed, '{} is true instead of false'
if not 1: raise TestFailed, '1 is false instead of true'
if not 1L: raise TestFailed, '1L is false instead of true'
if not 1.0: raise TestFailed, '1.0 is false instead of true'
if not 'x': raise TestFailed, '\'x\' is false instead of true'
if not (1, 1): raise TestFailed, '(1, 1) is false instead of true'
if not [1]: raise TestFailed, '[1] is false instead of true'
if not {'x': 1}: raise TestFailed, '{\'x\': 1} is false instead of true'
def f(): pass
class C: pass
import sys
x = C()
if not f: raise TestFailed, 'f is false instead of true'
if not C: raise TestFailed, 'C is false instead of true'
if not sys: raise TestFailed, 'sys is false instead of true'
if not x: raise TestFailed, 'x is false instead of true'
print '6.2 Boolean operations'
if 0 or 0: raise TestFailed, '0 or 0 is true instead of false'
if 1 and 1: pass
else: raise TestFailed, '1 and 1 is false instead of false'
if not 1: raise TestFailed, 'not 1 is true instead of false'
print '6.3 Comparisons'
if 0 < 1 <= 1 == 1 >= 1 > 0 != 1: pass
else: raise TestFailed, 'int comparisons failed'
if 0L < 1L <= 1L == 1L >= 1L > 0L != 1L: pass
else: raise TestFailed, 'long int comparisons failed'
if 0.0 < 1.0 <= 1.0 == 1.0 >= 1.0 > 0.0 != 1.0: pass
else: raise TestFailed, 'float comparisons failed'
if '' < 'a' <= 'a' == 'a' < 'abc' < 'abd' < 'b': pass
else: raise TestFailed, 'string comparisons failed'
if 0 in [0] and 0 not in [1]: pass
else: raise TestFailed, 'membership test failed'
if None is None and [] is not []: pass
else: raise TestFailed, 'identity test failed'
print '6.4 Numeric types (mostly conversions)'
if 0 != 0L or 0 != 0.0 or 0L != 0.0: raise TestFailed, 'mixed comparisons'
if 1 != 1L or 1 != 1.0 or 1L != 1.0: raise TestFailed, 'mixed comparisons'
if -1 != -1L or -1 != -1.0 or -1L != -1.0:
raise TestFailed, 'int/long/float value not equal'
if int(1.9) == 1 == int(1.1) and int(-1.1) == -1 == int(-1.9): pass
else: raise TestFailed, 'int() does not round properly'
if long(1.9) == 1L == long(1.1) and long(-1.1) == -1L == long(-1.9): pass
else: raise TestFailed, 'long() does not round properly'
if float(1) == 1.0 and float(-1) == -1.0 and float(0) == 0.0: pass
else: raise TestFailed, 'float() does not work properly'
print '6.4.1 32-bit integers'
if 12 + 24 != 36: raise TestFailed, 'int op'
if 12 + (-24) != -12: raise TestFailed, 'int op'
if (-12) + 24 != 12: raise TestFailed, 'int op'
if (-12) + (-24) != -36: raise TestFailed, 'int op'
if not 12 < 24: raise TestFailed, 'int op'
if not -24 < -12: raise TestFailed, 'int op'
# Test for a particular bug in integer multiply
xsize, ysize, zsize = 238, 356, 4
if not (xsize*ysize*zsize == zsize*xsize*ysize == 338912):
raise TestFailed, 'int mul commutativity'
print '6.4.2 Long integers'
if 12L + 24L != 36L: raise TestFailed, 'long op'
if 12L + (-24L) != -12L: raise TestFailed, 'long op'
if (-12L) + 24L != 12L: raise TestFailed, 'long op'
if (-12L) + (-24L) != -36L: raise TestFailed, 'long op'
if not 12L < 24L: raise TestFailed, 'long op'
if not -24L < -12L: raise TestFailed, 'long op'
x = sys.maxint
if int(long(x)) != x: raise TestFailed, 'long op'
try: int(long(x)+1L)
except OverflowError: pass
else:raise TestFailed, 'long op'
x = -x
if int(long(x)) != x: raise TestFailed, 'long op'
x = x-1
if int(long(x)) != x: raise TestFailed, 'long op'
try: int(long(x)-1L)
except OverflowError: pass
else:raise TestFailed, 'long op'
print '6.4.3 Floating point numbers'
if 12.0 + 24.0 != 36.0: raise TestFailed, 'float op'
if 12.0 + (-24.0) != -12.0: raise TestFailed, 'float op'
if (-12.0) + 24.0 != 12.0: raise TestFailed, 'float op'
if (-12.0) + (-24.0) != -36.0: raise TestFailed, 'float op'
if not 12.0 < 24.0: raise TestFailed, 'float op'
if not -24.0 < -12.0: raise TestFailed, 'float op'
print '6.5 Sequence types'
print '6.5.1 Strings'
if len('') != 0: raise TestFailed, 'len(\'\')'
if len('a') != 1: raise TestFailed, 'len(\'a\')'
if len('abcdef') != 6: raise TestFailed, 'len(\'abcdef\')'
if 'xyz' + 'abcde' != 'xyzabcde': raise TestFailed, 'string concatenation'
if 'xyz'*3 != 'xyzxyzxyz': raise TestFailed, 'string repetition *3'
if 0*'abcde' != '': raise TestFailed, 'string repetition 0*'
if min('abc') != 'a' or max('abc') != 'c': raise TestFailed, 'min/max string'
if 'a' in 'abc' and 'b' in 'abc' and 'c' in 'abc' and 'd' not in 'abc': pass
else: raise TestFailed, 'in/not in string'
x = 'x'*103
if '%s!'%x != x+'!': raise TestFailed, 'nasty string formatting bug'
print '6.5.2 Tuples'
if len(()) != 0: raise TestFailed, 'len(())'
if len((1,)) != 1: raise TestFailed, 'len((1,))'
if len((1,2,3,4,5,6)) != 6: raise TestFailed, 'len((1,2,3,4,5,6))'
if (1,2)+(3,4) != (1,2,3,4): raise TestFailed, 'tuple concatenation'
if (1,2)*3 != (1,2,1,2,1,2): raise TestFailed, 'tuple repetition *3'
if 0*(1,2,3) != (): raise TestFailed, 'tuple repetition 0*'
if min((1,2)) != 1 or max((1,2)) != 2: raise TestFailed, 'min/max tuple'
if 0 in (0,1,2) and 1 in (0,1,2) and 2 in (0,1,2) and 3 not in (0,1,2): pass
else: raise TestFailed, 'in/not in tuple'
print '6.5.3 Lists'
if len([]) != 0: raise TestFailed, 'len([])'
if len([1,]) != 1: raise TestFailed, 'len([1,])'
if len([1,2,3,4,5,6]) != 6: raise TestFailed, 'len([1,2,3,4,5,6])'
if [1,2]+[3,4] != [1,2,3,4]: raise TestFailed, 'list concatenation'
if [1,2]*3 != [1,2,1,2,1,2]: raise TestFailed, 'list repetition *3'
if [1,2]*3L != [1,2,1,2,1,2]: raise TestFailed, 'list repetition *3L'
if 0*[1,2,3] != []: raise TestFailed, 'list repetition 0*'
if 0L*[1,2,3] != []: raise TestFailed, 'list repetition 0L*'
if min([1,2]) != 1 or max([1,2]) != 2: raise TestFailed, 'min/max list'
if 0 in [0,1,2] and 1 in [0,1,2] and 2 in [0,1,2] and 3 not in [0,1,2]: pass
else: raise TestFailed, 'in/not in list'
a = [1, 2, 3, 4, 5]
a[:-1] = a
if a != [1, 2, 3, 4, 5, 5]:
raise TestFailed, "list self-slice-assign (head)"
a = [1, 2, 3, 4, 5]
a[1:] = a
if a != [1, 1, 2, 3, 4, 5]:
raise TestFailed, "list self-slice-assign (tail)"
a = [1, 2, 3, 4, 5]
a[1:-1] = a
if a != [1, 1, 2, 3, 4, 5, 5]:
raise TestFailed, "list self-slice-assign (center)"
print '6.5.3a Additional list operations'
a = [0,1,2,3,4]
a[0L] = 1
a[1L] = 2
a[2L] = 3
if a != [1,2,3,3,4]: raise TestFailed, 'list item assignment [0L], [1L], [2L]'
a[0] = 5
a[1] = 6
a[2] = 7
if a != [5,6,7,3,4]: raise TestFailed, 'list item assignment [0], [1], [2]'
a[-2L] = 88
a[-1L] = 99
if a != [5,6,7,88,99]: raise TestFailed, 'list item assignment [-2L], [-1L]'
a[-2] = 8
a[-1] = 9
if a != [5,6,7,8,9]: raise TestFailed, 'list item assignment [-2], [-1]'
a[:2] = [0,4]
a[-3:] = []
a[1:1] = [1,2,3]
if a != [0,1,2,3,4]: raise TestFailed, 'list slice assignment'
a[ 1L : 4L] = [7,8,9]
if a != [0,7,8,9,4]: raise TestFailed, 'list slice assignment using long ints'
del a[1:4]
if a != [0,4]: raise TestFailed, 'list slice deletion'
del a[0]
if a != [4]: raise TestFailed, 'list item deletion [0]'
del a[-1]
if a != []: raise TestFailed, 'list item deletion [-1]'
a=range(0,5)
del a[1L:4L]
if a != [0,4]: raise TestFailed, 'list slice deletion'
del a[0L]
if a != [4]: raise TestFailed, 'list item deletion [0]'
del a[-1L]
if a != []: raise TestFailed, 'list item deletion [-1]'
a.append(0)
a.append(1)
a.append(2)
if a != [0,1,2]: raise TestFailed, 'list append'
a.insert(0, -2)
a.insert(1, -1)
a.insert(2,0)
if a != [-2,-1,0,0,1,2]: raise TestFailed, 'list insert'
if a.count(0) != 2: raise TestFailed, ' list count'
if a.index(0) != 2: raise TestFailed, 'list index'
a.remove(0)
if a != [-2,-1,0,1,2]: raise TestFailed, 'list remove'
a.reverse()
if a != [2,1,0,-1,-2]: raise TestFailed, 'list reverse'
a.sort()
if a != [-2,-1,0,1,2]: raise TestFailed, 'list sort'
def revcmp(a, b): return cmp(b, a)
a.sort(revcmp)
if a != [2,1,0,-1,-2]: raise TestFailed, 'list sort with cmp func'
# The following dumps core in unpatched Python 1.5:
def myComparison(x,y):
return cmp(x%3, y%7)
z = range(12)
z.sort(myComparison)
# Test extreme cases with long ints
a = [0,1,2,3,4]
if a[ -pow(2,128L): 3 ] != [0,1,2]:
raise TestFailed, "list slicing with too-small long integer"
if a[ 3: pow(2,145L) ] != [3,4]:
raise TestFailed, "list slicing with too-large long integer"
print '6.6 Mappings == Dictionaries'
d = {}
if d.keys() != []: raise TestFailed, '{}.keys()'
if d.has_key('a') != 0: raise TestFailed, '{}.has_key(\'a\')'
if len(d) != 0: raise TestFailed, 'len({})'
d = {'a': 1, 'b': 2}
if len(d) != 2: raise TestFailed, 'len(dict)'
k = d.keys()
k.sort()
if k != ['a', 'b']: raise TestFailed, 'dict keys()'
if d.has_key('a') and d.has_key('b') and not d.has_key('c'): pass
else: raise TestFailed, 'dict keys()'
if d['a'] != 1 or d['b'] != 2: raise TestFailed, 'dict item'
d['c'] = 3
d['a'] = 4
if d['c'] != 3 or d['a'] != 4: raise TestFailed, 'dict item assignment'
del d['b']
if d != {'a': 4, 'c': 3}: raise TestFailed, 'dict item deletion'
d = {1:1, 2:2, 3:3}
d.clear()
if d != {}: raise TestFailed, 'dict clear'
d.update({1:100})
d.update({2:20})
d.update({1:1, 2:2, 3:3})
if d != {1:1, 2:2, 3:3}: raise TestFailed, 'dict update'
if d.copy() != {1:1, 2:2, 3:3}: raise TestFailed, 'dict copy'
if {}.copy() != {}: raise TestFailed, 'empty dict copy'
# dict.get()
d = {}
if d.get('c') is not None: raise TestFailed, 'missing {} get, no 2nd arg'
if d.get('c', 3) != 3: raise TestFailed, 'missing {} get, w/ 2nd arg'
d = {'a' : 1, 'b' : 2}
if d.get('c') is not None: raise TestFailed, 'missing dict get, no 2nd arg'
if d.get('c', 3) != 3: raise TestFailed, 'missing dict get, w/ 2nd arg'
if d.get('a') != 1: raise TestFailed, 'present dict get, no 2nd arg'
if d.get('a', 3) != 1: raise TestFailed, 'present dict get, w/ 2nd arg'
# dict.setdefault()
d = {}
if d.setdefault('key0') is not None:
raise TestFailed, 'missing {} setdefault, no 2nd arg'
if d.setdefault('key0') is not None:
raise TestFailed, 'present {} setdefault, no 2nd arg'
d.setdefault('key', []).append(3)
if d['key'][0] != 3:
raise TestFailed, 'missing {} setdefault, w/ 2nd arg'
d.setdefault('key', []).append(4)
if len(d['key']) != 2:
raise TestFailed, 'present {} setdefault, w/ 2nd arg'
# dict.popitem()
for copymode in -1, +1:
# -1: b has same structure as a
# +1: b is a.copy()
for log2size in range(12):
size = 2**log2size
a = {}
b = {}
for i in range(size):
a[`i`] = i
if copymode < 0:
b[`i`] = i
if copymode > 0:
b = a.copy()
for i in range(size):
ka, va = ta = a.popitem()
if va != int(ka): raise TestFailed, "a.popitem: %s" % str(ta)
kb, vb = tb = b.popitem()
if vb != int(kb): raise TestFailed, "b.popitem: %s" % str(tb)
if copymode < 0 and ta != tb:
raise TestFailed, "a.popitem != b.popitem: %s, %s" % (
str(ta), str(tb))
if a: raise TestFailed, 'a not empty after popitems: %s' % str(a)
if b: raise TestFailed, 'b not empty after popitems: %s' % str(b)
| |
import math
import string
import sys
import struct
import matplotlib
import matplotlib.pyplot as pyplot
import numpy as np
import array
import scipy.stats as ss
import scipy as sp
#import astropy.io.fits as pyfits
#import cosmocalc as cc
import datetime
#import asciitable
import astropy.io.ascii as ascii
import astropy
import astropy.cosmology
####
#### Name: geometry.py
#### Author: Greg Snyder gsnyder@stsci.edu
#### Purpose: Generates parameter setup file for mock survey fields from
#### continuous-volume hydrodynamical simulations using Sunrise (Jonsson 2006).
#### Disclaimer: This code is provided AS-IS with absolutely NO warranty.
#### It is largely meant as a guide rather than ideal code.
#### It can and should be replaced with other lightcone generation techniques.
#### I make no claims about the immediate usability of this code.
#### That said, I am happy to field questions and discuss issues
#### related to this code. And also to help use it.
#### License: ?
#### Credit: Users should cite Snyder et al. (2017)
#### AND ALSO Kitzbichler & White 2007, Henriques et al. 2013, Overzier et al. 2013, etc.
#### These papers were used to create the lightcone generation algorithm below.
####
class Cosmology:
def __init__(self, H0=70.0, WM=0.27,WV=0.73,WB=0.0456):
self.H0=H0
self.WM=WM
self.WV=WV
self.WB=WB
self.thisc = astropy.cosmology.FlatLambdaCDM(H0=self.H0,Om0=self.WM,Ob0=self.WB)
self.redshift_grid = np.logspace(-3,2,100)
self.comoving_mpc_grid=self.thisc.comoving_distance(self.redshift_grid).value
self.DA_mpc_grid=self.thisc.angular_diameter_distance(self.redshift_grid).value
#self.comoving_mpc_grid = np.asarray([(cc.cosmocalc(zf,H0=self.H0,WM=self.WM,WV=self.WV))['DCMR_Mpc'] for zf in self.redshift_grid])
#self.DA_mpc_grid = np.asarray([(cc.cosmocalc(zf,H0=self.H0,WM=self.WM,WV=self.WV))['DA_Mpc'] for zf in self.redshift_grid])
class ReplicatedBox:
def __init__(self, v_lab, v_ingress):
self.v_origin=v_lab
self.v_ingress=v_ingress
#maybe should define some print/convert functions for this
class LightCone:
def __init__(self, boxSize, cosmology, name="A Lightcone"):
self.name=name
self.cosmology = cosmology
self.L=boxSize
self.v1=np.ndarray(shape=(3))
self.v2=np.ndarray(shape=(3))
self.v3=np.ndarray(shape=(3))
self.v4=np.ndarray(shape=(3))
self.boxlist=[]
def BasicCone(self, n, m, namelist, zlist, manual_dist_limit=0.0, manual_fov_arcmin=0.0):
self.n = 1.0*n
self.m = 1.0*m
self.namelist = namelist
self.zlist = zlist
self.dist_firstRep = np.linalg.norm(np.asarray([self.n,self.m,self.n*self.m])*self.L)
self.dist_limit = manual_dist_limit
if manual_dist_limit==0.0:
self.dist_limit = self.dist_firstRep
self.redshift_firstRep = np.interp(self.dist_firstRep,self.cosmology.comoving_mpc_grid,self.cosmology.redshift_grid)
self.numRep = int(self.n*self.m)
self.x_com = np.asarray( [(self.n - 0.5/self.m)*self.L, (self.n + 0.5/self.m)*self.L] )
self.y_com = np.asarray( [(self.m - 0.5/self.n)*self.L, (self.m + 0.5/self.n)*self.L] )
self.z_com = np.asarray([self.n*self.m*self.L])
self.delta_a_rad = (1.0/(self.n*self.m**2)) #small angle approx?
self.delta_b_rad = (1.0/(self.m*self.n**2))
print("WARNING: I'm pretty sure you are assuming that the survey area is small, because I am making some small-angle approximations! If you are looking for surveys of bigger than ~degree scales, please fix me!")
self.square_fov_rad = (manual_fov_arcmin/60.0)*(math.pi/180.0)
if manual_fov_arcmin==0.0:
self.square_fov_rad = self.delta_b_rad
self.v1 = np.asarray([(self.x_com)[0],(self.y_com[0]),(self.z_com)[0]])
self.v2 = np.asarray([(self.x_com)[1],(self.y_com[0]),(self.z_com)[0]])
self.v3 = np.asarray([(self.x_com)[1],(self.y_com[1]),(self.z_com)[0]])
self.v4 = np.asarray([(self.x_com)[0],(self.y_com[1]),(self.z_com)[0]])
self.xaxis = np.asarray([1.0,0.0,0.0])
self.u3 = np.asarray([(self.n),(self.m),(self.n*self.m)])#/(self.n**2 + self.m**2 + (self.n*self.m)**2)**(0.5)
self.u3 = self.u3/np.linalg.norm(self.u3)
self.primaryaxis = np.asarray([0.0,0.0,1.0])
self.u1 = np.cross(self.u3,self.xaxis)#np.cross(self.xaxis,self.u3)
self.u1 = self.u1/np.linalg.norm(self.u1)
self.u2 = np.cross(self.u3,self.u1)
self.u2 = self.u2/np.linalg.norm(self.u2)
self.origin=np.asarray([0.0,0.0,0.0])
self.snapindex = np.where(self.zlist == np.min(self.zlist))
self.BasicInfo()
self.ComputeBoxes()
def BasicInfo(self):
print("\n")
print("Information about: ", self.name)
print("\t Comoving Single Box L = ", self.L)
print("\t Basic info: n,m = ", self.n, self.m)
print("\t Approx. Comoving distance at first repeat: ", round(self.dist_firstRep,2))
print("\t Approx. Redshift at first repeat: ", round(self.redshift_firstRep,2))
print("\t Number of replications: ", self.numRep)
print(" ")
print("\t X range [Mpc] = ", self.x_com)
print("\t Y range [Mpc] = ", self.y_com)
print("\t Z height [Mpc] = ", self.z_com)
print("\n\t del A, arcmin: {:5.2f}".format(self.delta_a_rad*(180.0/math.pi)*60.0))
print("\t del B, arcmin: {:5.2f}".format(self.delta_b_rad*(180.0/math.pi)*60.0))
print("\n\t Direction Unit Vector: ", self.u3)
print("\t Alpha Unit Vector: ", self.u1)
print("\t Delta Unit Vector: ", self.u2)
print("\t Test, should be Direction vector: ", np.cross(self.u1,self.u2))
print(" ")
def export_runparams(self, filename,follow=False, follow_index=60, swapxy=False , swapxz=False ):
dirvector = 1.0*self.u3
alpha_vector = 1.0*self.u1
delta_vector = 1.0*self.u2
xind=0
yind=1
zind=2
if swapxy==True:
temp=dirvector[0]
dirvector[0]=dirvector[1] ; dirvector[1]=temp
temp=alpha_vector[0]
alpha_vector[0]=alpha_vector[1] ; alpha_vector[1]=temp
temp=delta_vector[0]
delta_vector[0]=delta_vector[1] ; delta_vector[1]=temp
xind= 1 ; yind=0 ; zind=2
if swapxz==True:
temp=dirvector[0]
dirvector[0]=dirvector[2] ; dirvector[2]=temp
temp=alpha_vector[0]
alpha_vector[0]=alpha_vector[2] ; alpha_vector[2]=temp
temp=delta_vector[0]
delta_vector[0]=delta_vector[2] ; delta_vector[2]=temp
xind= 2 ; yind=1 ; zind=0
f = open(filename,'w')
line = '## ' + self.name + ', LightCone Created, '+ str(datetime.date.today()) + '\n' ; f.write(line) ; print(line)
line = "## Comoving Single Box L = " + str(self.L) +'\n' ; f.write(line) ; print(line)
line = "## HubbleParam = " + str(self.cosmology.H0/100.0) + '\n' ; f.write(line) ; print(line) ; h = self.cosmology.H0/100.0
line = "## Basic info: n,m = " +str( self.n) + " , " + str( self.m) + '\n' ; f.write(line) ; print(line)
line = "## Approx. Comoving distance at first repeat: " + str( round(self.dist_firstRep,6) ) + '\n' ; f.write(line) ; print(line)
line = "## Approx. Redshift at first repeat: " + str( round(self.redshift_firstRep,6) ) + '\n' ; f.write(line) ; print(line)
line = "## Number of replications: " + str( self.numRep) + '\n' ; f.write(line) ; print(line)
line = "## del A, arcmin: {:10.5f}".format(self.delta_a_rad*(180.0/math.pi)*60.0) + '\n' ; f.write(line) ; print(line)
line = "## del B, arcmin: {:10.5f}".format(self.delta_b_rad*(180.0/math.pi)*60.0) + '\n' ; f.write(line) ; print(line)
line = "## At 0.04 arcsec/pixel, need > {:6.1f} pixels\n".format(self.square_fov_rad*(180.0/math.pi)*3600.0/0.04) ; f.write(line) ; print(line)
line = "## Direction Unit Vector: " + str( dirvector ) + '\n' ; f.write(line) ; print(line)
line = "## Alpha Unit Vector: " + str( alpha_vector ) + '\n' ; f.write(line) ; print(line)
line = "## Delta Unit Vector: " + str( delta_vector ) + '\n' ; f.write(line) ; print(line)
line = "## Buffered Cylindricial Radius Maximum: "+str( ((self.boxlist)[-2]).cylinder_radius_approx) + '\n' ; f.write(line) ; print(line)
line = "## Column 1: ID#\n" ; f.write(line)
line = "## Column 2: Snapshot Label\n" ; f.write(line)
line = "## Column 3: Snapshot Redshift\n" ; f.write(line)
line = "## Column 4: v_Ingress along x [Comoving h^-1 kpc]\n" ; f.write(line)
line = "## Column 5: v_Ingress along y [Comoving h^-1 kpc]\n" ; f.write(line)
line = "## Column 6: v_Ingress along z [Comoving h^-1 kpc]\n" ; f.write(line)
line = "## Column 7: v_Egress along x [Comoving h^-1 kpc]\n" ; f.write(line)
line = "## Column 8: v_Egress along y [Comoving h^-1 kpc]\n" ; f.write(line)
line = "## Column 9: v_Egress along z [Comoving h^-1 kpc]\n" ; f.write(line)
line = "## Column 10: v_Ingress along x [Physical kpc]\n" ; f.write(line)
line = "## Column 11: v_Ingress along y [Physical kpc]\n" ; f.write(line)
line = "## Column 12: v_Ingress along z [Physical kpc]\n" ; f.write(line)
line = "## Column 13: v_Camera along x [Physical kpc] \n" ; f.write(line)
line = "## Column 14: v_Camera along y [Physical kpc] \n" ; f.write(line)
line = "## Column 15: v_Camera along z [Physical kpc] \n" ; f.write(line)
line = "## Column 16: v_Camera - v_Ingress along x [Physical kpc] \n" ; f.write(line)
line = "## Column 17: v_Camera - v_Ingress along y [Physical kpc] \n" ; f.write(line)
line = "## Column 18: v_Camera - v_Ingress along z [Physical kpc] \n" ; f.write(line)
line = "## Column 19: Square Field of View (smaller axis) at v_Ingress [Physical kpc]\n" ; f.write(line)
line = "## Column 20: Geometrically-appropriate redshift at center of box\n" ; f.write(line)
line = "## Column 21: Radius buffered to subtend FOV [Comoving h^-1 kpc]\n" ; f.write(line)
i=0
MaxRadSize = ((self.boxlist)[-2]).cylinder_radius_approx
for box in (self.boxlist)[:-1]:
if follow==True:
followbox = (self.boxlist)[follow_index]
if follow==False:
followbox=box
v_in_snap = followbox.v_ingress_local*1000.0*h#np.mod(box.v_ingress, self.L)*1000.0*h #in comoving kpc h^-1 units
v_out_snap = followbox.v_egress_local*1000.0*h #np.mod(box.v_egress, self.L)*1000.0*h
v_in_phys = followbox.v_ingress_local*1000.0/(1.0 + box.mid_z) # in physical kpc
v_out_phys = followbox.v_egress_local*1000.0/(1.0 + box.mid_z) # in physical kpc
v_cam_phys = v_in_phys - 1.0*box.camera_offset*1000.0*self.u3/(1.0 + box.mid_z) # in physical kpc, laboratory frame -- does Sunrise translate camera coords too?!?!
v_cam_cent_phys = v_cam_phys - v_in_phys # in case we want to center on the ingress point
fov_phys = 2.0*(box.start_distance)*math.sin(self.square_fov_rad/2.0)*1000.0/(1.0 + box.mid_z) #in physical kpc
RadSize_snap = box.cylinder_radius_approx*1000.0*h #MaxRadSize*1000.0*h
line = "{:5d} {:4s} {:7.4f} {:10.4f} {:10.4f} {:10.4f}" \
" {:10.4f} {:10.4f} {:10.4f} {:10.4f} {:10.4f} {:10.4f}" \
" {:10.4f} {:10.4f} {:10.4f} {:10.4f} {:10.4f} {:10.4f}" \
" {:10.4f} {:7.4f} {:10.4f}\n".format(i,box.snaplabel,box.snapredshift,
v_in_snap[xind], v_in_snap[yind], v_in_snap[zind],
v_out_snap[xind], v_out_snap[yind], v_out_snap[zind],
v_in_phys[xind], v_in_phys[yind], v_in_phys[zind],
v_cam_phys[xind], v_cam_phys[yind], v_cam_phys[zind],
v_cam_cent_phys[xind], v_cam_cent_phys[yind], v_cam_cent_phys[zind],
fov_phys, box.mid_z, RadSize_snap) ; f.write(line) ; print(line)
i=i+1
f.close()
def ComputeBoxes(self):
print("\t Computing camera parameters for lightcone: ", self.name)
distancetraveled=0.0
ingress_point=self.origin
ingress_snapindex = self.snapindex
cmpc_from_z0 = np.interp((self.zlist)[ingress_snapindex], self.cosmology.redshift_grid, self.cosmology.comoving_mpc_grid)
print("cmpc: ", cmpc_from_z0)
self.boxlist.append(ReplicatedBox(self.origin,ingress_point))
i=0
Nvec = np.asarray([1.0,1.0,1.0])
volfrac = 0.0
while (self.dist_limit - distancetraveled > 1e-10):
box_i = (self.boxlist)[-1]
box_i.num = i
testvec = Nvec*np.asarray([self.L,self.L,self.L]) #boundary to test
ftest = (testvec - box_i.v_ingress)/self.u3 #propagate to nearest boundary
factor = np.min(ftest) #how far til we get one exit?
ind_exit = np.where((ftest - factor) < 1e-10) #which axis/es was it?
#print i, ftest, ind_exit[0]
box_i.v_ingress_local = box_i.v_ingress - (Nvec - 1.0)*self.L
box_i.v_egress = box_i.v_ingress + factor*self.u3 #this is where the ray leaves this box
box_i.v_egress_local = box_i.v_egress - (Nvec-1.0)*self.L
Nvec[ind_exit[0]] = Nvec[ind_exit[0]] + 1.0 #iterate the boundary along these axes; note generically this could be - 1.0 if using arbitrary start/direction
olddist = distancetraveled
distancetraveled = np.linalg.norm(box_i.v_egress)
mid_dist = olddist + (distancetraveled - olddist)/2.0
mid_z = np.interp(mid_dist,self.cosmology.comoving_mpc_grid, self.cosmology.redshift_grid)
box_i.far_z = np.interp(distancetraveled,self.cosmology.comoving_mpc_grid, self.cosmology.redshift_grid)
box_i.near_z = np.interp(olddist,self.cosmology.comoving_mpc_grid, self.cosmology.redshift_grid)
box_i.mid_z = mid_z #this is used later
box_i.mid_dist = mid_dist
diffs = np.abs(self.zlist - mid_z)
closest_ind = np.where(diffs == np.min(diffs)) # is this {snapshot selection} the only thing z is used for here?
box_i.snaplabel = ((self.namelist)[closest_ind[0]])[0]
box_i.snapredshift = ((self.zlist)[closest_ind[0]])[0]
box_i.tot_distance_traveled_through = distancetraveled
box_i.box_distance = (distancetraveled - olddist)
box_i.start_distance = olddist
box_i.camera_offset = box_i.start_distance#/(1.0 + box_i.mid_z) actually, let's keep this in co-moving units #distancetraveled/((1.0 + box_i.snapredshift)) - box_i.box_distance/(1.0 + box_i.snapredshift) #=~ olddist/(1+z) ...
box_i.cylinder_radius_approx = ((self.square_fov_rad/2.0)*(2.0**0.5)*1.01)*distancetraveled
box_i.tot_fov_comoving = (self.square_fov_rad)*distancetraveled #small angle approx...
#print closest_ind[0]
self.boxlist.append(ReplicatedBox((Nvec-1.0)*self.L,box_i.v_egress)) #add the new box
#can update/save some of its basic properties after this
'''print i, "{:10.3f}, {:10.3f}, {:10.3f}, {:10.3f}, {:12.8f}, {:10.3f}, {:10.3f}, {:5s}".format( np.round_(distancetraveled,3),
np.round_(self.delta_b_rad*distancetraveled,3),
np.round_(self.delta_b_rad*np.interp(distancetraveled,self.cosmology.comoving_mpc_grid,self.cosmology.DA_mpc_grid),3),np.round_(np.interp(distancetraveled,self.cosmology.comoving_mpc_grid, self.cosmology.redshift_grid), 3), (self.L*np.round_(self.delta_b_rad*distancetraveled,3)**2)/(self.L**3),mid_dist, mid_z, (self.namelist)[closest_ind[0]])'''
box_i.approx_volume_comoving = (self.L*np.round_(self.square_fov_rad*distancetraveled,3)**2)/(self.L**3)
volfrac = volfrac + (self.L*np.round_(self.square_fov_rad*distancetraveled,3)**2)/(self.L**3)
#, np.round_(box_i.v_ingress-box_i.v_origin,3)
i=i+1
self.volfrac = volfrac
#print "Rough Cumulative Volume Fraction (of single box): ", self.volfrac
if __name__=="__main__":
print("Exploring some things about setting up lightcones...")
h=0.6774
L = 20.0/h
#print "L = ", L, " Mpc"
#default HUDF-ish lightcone
n = 15.0 ; m = 14.0
#print "n,m = ", n,",", m
fakez = np.logspace(-3,2,100)
# comds = np.asarray([(cc.cosmocalc(zf))['DCMR_Mpc'] for zf in fakez])
delta_a_rad = (1.0/(n*m**2))
delta_b_rad = (1.0/(m*n**2))
skyPixel_arcsec = 0.04 #arcsec
print("ideal ACS-ish scale: {:8.2f}".format(skyPixel_arcsec))
Npix_A = (delta_a_rad*(180.0/math.pi)*3600.0)/skyPixel_arcsec
Npix_B = (delta_b_rad*(180.0/math.pi)*3600.0)/skyPixel_arcsec
print("Npix_A: {:10.1f}".format(Npix_A))
print("Npix_B: {:10.1f}".format(Npix_B))
GB_per_slice = 4.0*Npix_A*Npix_B/1e9
print("GigaBytes per float: {:7.2f}".format(GB_per_slice))
redshift = np.logspace(-3, 1, 40)
#print z
Nz = (redshift.shape)[0]
#for zi in redshift:
# res = cc.cosmocalc(zi)
# print "At z= {:6.3f}, D_com= {:6.1f}; DA= {:6.1f} Mpc; DL= {:8.1f}; PS= {:3.1f} kpc/arcsec; dXz= {:5.2f}; dYz= {:5.2f}".format(
# round(zi,3), res['DCMR_Mpc'], res['DA_Mpc'], res['DL_Mpc'], res['PS_kpc'], delta_a_rad*res['DCMR_Mpc'], delta_b_rad*res['DCMR_Mpc'])
# test = cc.cosmocalc(2.0,H0=71.0,WM=0.27,WV=None)
#data = asciitable.read('gfm_snaps.txt')
#data = asciitable.read('snap_v_redshift.txt')
data = ascii.read('tng_snaps_v_redshift.txt')
zlist=data['col2'].data
namelist=np.asarray(data['col1'].data,dtype=str)
#zlist = np.array(map(float,(data['col2'])))
#zlist = np.array(map(float,(data['col2'])[:315]))
#namelist = ((data['col1'])[:315])
#namelist = np.asarray([(s)[85:89] for s in namelist])
#namelist = ((data['col1']))
#namelist = np.asarray([(str(s)) for s in namelist])
#namelist = np.asarray([(s)[84:88] for s in namelist])
#print namelist#, zlist
cosmology = Cosmology(H0=67.74,WM=0.3089,WV=1.0-0.3089,WB=0.0486)
#hudf_default = LightCone(75.0/h,cosmology,"Default Deep")
#hudf_default.BasicCone(11.0, 9.0, namelist, zlist)
#hudf_shallow = LightCone(75.0/h,cosmology,"Default Shallow")
#hudf_shallow.BasicCone(5.0, 4.0, namelist, zlist)
#hudf_narrow = LightCone(25.0/h,cosmology,"Default Deep but Narrow")
#hudf_narrow.BasicCone(11.0, 10.0, namelist, zlist, manual_fov_arcmin=1.0)
hudf_bigbox_wide = LightCone(25.0/h,cosmology,"Deep 25 Mpc")
hudf_bigbox_wide.BasicCone(12.0, 11.0, namelist, zlist, manual_dist_limit=10000.0) #z~8
hudf_bigbox_wide.export_runparams('foggie35_12_11_xyz.txt')
hudf_bigbox_wide.export_runparams('foggie35_12_11_yxz.txt', swapxy=True)
hudf_bigbox_wide.export_runparams('foggie35_12_11_zyx.txt', swapxz=True)
#hudf_bigbox_vwide = LightCone(75.0/h,cosmology,"Very Wide 75mpc repeated, 136 snaps")
#hudf_bigbox_vwide.BasicCone(6.0, 5.0, namelist, zlist, manual_dist_limit=11000.0) #z~18
#hudf_bigbox_vwide.export_runparams('hudfwide_75Mpc_6_5_xyz.txt')
#hudf_bigbox_vwide.export_runparams('hudfwide_75Mpc_6_5_yxz.txt', swapxy=True)
#hudf_bigbox_vwide.export_runparams('hudfwide_75Mpc_6_5_zyx.txt', swapxz=True)
#hudf_default.export_runparams('hudf_default_75Mpc_11_9_wrongsnaps.txt')
mpcgrid = cosmology.comoving_mpc_grid
zgrid = cosmology.redshift_grid
sizes=[75.0/0.6774,205.0/0.6774,750.0/0.6774,2000.0/0.6774]
print("{:6s},{:6.0f},{:6.0f},{:6.0f},{:6.0f}".format('box', sizes[0],sizes[1],sizes[2],sizes[3]))
m = 10.0 ; n = 11.0
print("{:6.1f},{:6.2f},{:6.2f},{:6.2f},{:6.2f}".format(1.0/(m*n**2.0)*(180.0/math.pi)*60.0,
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*sizes[0]),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*sizes[1]),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*sizes[2]),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*sizes[3]),mpcgrid,zgrid)))
m = 8.0 ; n = 9.0
print("{:6.1f},{:6.2f},{:6.2f},{:6.2f},{:6.2f}".format(1.0/(m*n**2.0)*(180.0/math.pi)*60.0,
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*sizes[0]),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*sizes[1]),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*sizes[2]),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*sizes[3]),mpcgrid,zgrid)))
m = 6.0 ; n = 7.0
print("{:6.1f},{:6.2f},{:6.2f},{:6.2f},{:6.2f}".format(1.0/(m*n**2.0)*(180.0/math.pi)*60.0,
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*sizes[0]),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*sizes[1]),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*sizes[2]),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*sizes[3]),mpcgrid,zgrid)))
m = 5.0 ; n = 6.0
print("{:6.1f},{:6.2f},{:6.2f},{:6.2f},{:6.2f}".format(1.0/(m*n**2.0)*(180.0/math.pi)*60.0,
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*sizes[0]),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*sizes[1]),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*sizes[2]),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*sizes[3]),mpcgrid,zgrid)))
m = 2.0 ; n = 3.0
print("{:6.1f},{:6.2f},{:6.2f},{:6.2f},{:6.2f}".format(1.0/(m*n**2.0)*(180.0/math.pi),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*sizes[0]),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*sizes[1]),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*sizes[2]),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*sizes[3]),mpcgrid,zgrid)))
m = 2.0 ; n = 1.0
print("{:6.1f},{:6.2f},{:6.2f},{:6.2f},{:6.2f}".format(1.0/(m*n**2.0)*(180.0/math.pi),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*sizes[0]),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*sizes[1]),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*sizes[2]),mpcgrid,zgrid),
np.interp(np.linalg.norm(np.asarray([n,m,n*m])*sizes[3]),mpcgrid,zgrid)))
| |
import yaml
import random
import sys
import pprint
from decorator import decorator
from line_profiler import LineProfiler
color_ansi = {'yellow': '\x1b[33m',
'red': '\x1b[31m',
'blue': '\x1b[34m',
'green': '\x1b[32m',
'white': '\x1b[37m',
'black': '\x1b[30m',
'purple': '\x1b[35m',
'reset all': '\x1b[0m'}
@decorator
def profile_each_line(func, *args, **kwargs):
profiler = LineProfiler()
profiled_func = profiler(func)
retval = None
try:
retval = profiled_func(*args, **kwargs)
finally:
profiler.print_stats()
return retval
def get_supported_apps(apps_path='apps/'):
"""
Returns a list of strings correspdoning to the app_id's that are fully operational in the learning library.
Usage: ::\n
app_id_list = utils.get_supported_apps()
print app_id_list
>>> ['StochasticBanditsPureExploration', 'DuelingBanditsPureExploration', 'StochasticLinearBanditsExploreExploit', 'PoolBasedTripletMDS']
"""
import os
return [d for d in next(os.walk(os.path.dirname(apps_path)))[1] if d[0] not in {'.', '_'}]
def get_app(app_id, exp_uid, db, ell):
"""
Returns an object correspoding to the app_id that contains methods like initExp,getQuery,etc.
Usage: ::\n
app = utils.get_app(app_id)
print app
>>> <next.apps.StochasticBanditsPureExploration.StochasticBanditsPureExploration.StochasticBanditsPureExploration object at 0x103c9dcd0>
"""
app_id = str(app_id) # soemtimes input is unicode formatted which causes error
next_path = 'next.apps.App'
app_module = __import__(next_path,fromlist=[''])
app_class = getattr(app_module, 'App')
return app_class(app_id, exp_uid, db, ell)
def get_app_alg(app_id,alg_id):
"""
Returns an object correspoding to the alg_id that contains methods like initExp,getQuery,etc.
Note that each algorithm (with an alg_id) is a child of an app (with an app_id), hence the app_id input
Usage: ::\n
alg = utils.get_app_alg(app_id,alg_id)
print alg
>>> <next.apps.PoolBasedTripletMDS.RandomSampling.RandomSampling.RandomSampling object at 0x103cb7e10>
"""
app_id = str(app_id) # soemtimes input is unicode formatted which causes error
alg_id = str(alg_id) # soemtimes input is unicode formatted which causes error
next_path = 'apps.{}.algs.{}'.format(app_id, alg_id, alg_id)
alg_module = __import__(next_path, fromlist=[''])
alg_class = getattr(alg_module, 'MyAlg')
return alg_class()
def getDocUID(exp_uid,alg_uid=None):
"""
Each instance of an app (with an (app_id,exp_uid) pair) and an algorithm (with an (app_id,exp_uid,alg_id,alg_uid) tuple)
gets its own namespace. This method defines that namespace given the exp_uid, or (exp_uid,alg_uid)
Usage::\n
print utils.getDocUID(exp_uid)
>>> 'eee9d58c61d580029113ba593446d23a'
print utils.getDocUID(exp_uid,alg_uid)
>>> 'eee9d58c61d580029113ba593446d23a-f081d374abac6c009f5a74877f8b9f3c'
"""
if alg_uid==None:
return exp_uid
else:
return exp_uid + "-" + alg_uid
import os
def getNewUID():
"""
Returns length 32 string of random hex that is generated from machine state - good enough for cryptography
Probability of collision is 1 in 340282366920938463463374607431768211456
Used for unique identifiers all over the system
"""
uid = os.urandom(16).encode('hex')
return uid
from datetime import datetime
def datetimeNow(format='datetime'):
"""
Returns the current datetime in the format used throughout the system.
For consistency, one should ALWAYS call this method, do not make your own call to datetime.
Usage: ::\n
utils.datetimeNow()
>>> datetime.datetime(2015, 2, 17, 11, 5, 56, 27822)
"""
date = datetime.now()
if format=='string':
return datetime2str(date)
else:
return date
def datetime2str(obj_datetime):
"""
Converts a datetime string into a datetime object in the system.
For consistency, one should never use their own method of converting to string, always use this method.
Usage: ::\n
date = utils.datetimeNow()
date_str = utils.datetime2str(date)
print date_str
>>> '2015-02-17 11:11:07.489925'
"""
return str(obj_datetime)
def str2datetime(str_time):
"""
Converts a datetime object into the string format used in the system.
For consistency, one should never use their own method of converting to string, always use this method.
Usage: ::\n
date = utils.datetimeNow()
date_str = utils.datetime2str(date)
utils.str2datetime(date_str)
"""
try:
return datetime.strptime(str_time,'%Y-%m-%d %H:%M:%S.%f')
except:
return datetime.strptime(str_time,'%Y-%m-%d %H:%M:%S')
def _get_filename(target):
return target['alt_description']
def filenames_to_ids(filenames, targets):
_to_ids = filenames_to_ids
if isinstance(filenames[0], list):
return [_to_ids(files, targets) for files in filenames]
if isinstance(filenames[0], tuple):
return tuple([_to_ids(files, targets) for files in filenames])
if isinstance(filenames[0], dict):
return {k: _to_ids(v, targets) for k, v in filenames.items()}
ids = {_get_filename(target): target['target_id'] for target in targets}
not_in_targets = set(filenames) - set(ids)
if len(not_in_targets) > 0:
msg = 'Filenames specified in init.yaml "{}" in the not found the list of targets'
raise ValueError(msg.format(not_in_targets))
return [ids[filename] for filename in filenames]
def debug_print(*args, **kwargs):
color = kwargs.get('color', 'yellow')
for a in args:
if type(a) in {str}:
lines = a.split('\n')
for line in lines:
pprint_arg = pprint.pformat(line).split('\n')
for line2 in pprint_arg:
print '{}{}{}'.format(color_ansi[color],
line2,
color_ansi['reset all'])
else:
pprint_a = pprint.pformat(a).split('\n')
for line in pprint_a:
print '{}{}{}'.format(color_ansi[color],
line,
color_ansi['reset all'])
print ''
def random_string(length=20):
letters = list('qwertyuiopasdfghkjlzxcvbnm')
s = [random.choice(letters) for _ in range(length)]
s = ''.join(s)
return s
import time
def timeit(f):
"""
Utility used to time the duration of code execution. This script can be composed with any other script.
Usage::\n
def f(n):
return n**n
def g(n):
return n,n**n
answer0,dt = timeit(f)(3)
answer1,answer2,dt = timeit(g)(3)
"""
def timed(*args, **kw):
ts = time.time()
result = f(*args, **kw)
te = time.time()
# TODO: delete these three lines. Use
# `grep -Hnri ,.*,.* = .*utils.timeit` to find all locations this function
# is are used (typically in `a, b, c, dt = utils.timeit(...)(...)`. We want
# `a, dt = utils.timeit(...)(...)`.
return result, (te-ts)
return timed
| |
"""engine.SCons.Tool.msvc
Tool-specific initialization for Microsoft Visual C/C++.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/msvc.py 3897 2009/01/13 06:45:54 scons"
import os.path
import re
import string
import SCons.Action
import SCons.Builder
import SCons.Errors
import SCons.Platform.win32
import SCons.Tool
import SCons.Tool.msvs
import SCons.Util
import SCons.Warnings
import SCons.Scanner.RC
CSuffixes = ['.c', '.C']
CXXSuffixes = ['.cc', '.cpp', '.cxx', '.c++', '.C++']
def _parse_msvc7_overrides(version,platform):
""" Parse any overridden defaults for MSVS directory locations
in MSVS .NET. """
# First, we get the shell folder for this user:
if not SCons.Util.can_read_reg:
raise SCons.Errors.InternalError, "No Windows registry module was found"
comps = ""
try:
(comps, t) = SCons.Util.RegGetValue(SCons.Util.HKEY_CURRENT_USER,
r'Software\Microsoft\Windows\CurrentVersion' +\
r'\Explorer\Shell Folders\Local AppData')
except SCons.Util.RegError:
raise SCons.Errors.InternalError, \
"The Local AppData directory was not found in the registry."
comps = comps + '\\Microsoft\\VisualStudio\\' + version + '\\VCComponents.dat'
dirs = {}
if os.path.exists(comps):
# now we parse the directories from this file, if it exists.
# We only look for entries after:
# [VC\VC_OBJECTS_PLATFORM_INFO\Win32\Directories],
# since this file could contain a number of things...
lines = None
try:
import codecs
except ImportError:
pass
else:
try:
f = codecs.open(comps, 'r', 'utf16')
encoder = codecs.getencoder('ascii')
lines = map(lambda l, e=encoder: e(l)[0], f.readlines())
except (LookupError, UnicodeError):
lines = codecs.open(comps, 'r', 'utf8').readlines()
if lines is None:
lines = open(comps, 'r').readlines()
if 'x86' == platform: platform = 'Win32'
found = 0
for line in lines:
line.strip()
if line.find(r'[VC\VC_OBJECTS_PLATFORM_INFO\%s\Directories]'%platform) >= 0:
found = 1
elif line == '' or line[:1] == '[':
found = 0
elif found == 1:
kv = line.split('=', 1)
if len(kv) == 2:
(key, val) = kv
key = key.replace(' Dirs','')
dirs[key.upper()] = val
f.close()
else:
# since the file didn't exist, we have only the defaults in
# the registry to work with.
if 'x86' == platform: platform = 'Win32'
try:
K = 'SOFTWARE\\Microsoft\\VisualStudio\\' + version
K = K + r'\VC\VC_OBJECTS_PLATFORM_INFO\%s\Directories'%platform
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE,K)
i = 0
while 1:
try:
(key,val,t) = SCons.Util.RegEnumValue(k,i)
key = key.replace(' Dirs','')
dirs[key.upper()] = val
i = i + 1
except SCons.Util.RegError:
break
except SCons.Util.RegError:
# if we got here, then we didn't find the registry entries:
raise SCons.Errors.InternalError, "Unable to find MSVC paths in the registry."
return dirs
def _parse_msvc8_overrides(version,platform,suite):
""" Parse any overridden defaults for MSVC directory locations
in MSVC 2005. """
# In VS8 the user can change the location of the settings file that
# contains the include, lib and binary paths. Try to get the location
# from registry
if not SCons.Util.can_read_reg:
raise SCons.Errors.InternalError, "No Windows registry module was found"
# XXX This code assumes anything that isn't EXPRESS uses the default
# registry key string. Is this really true for all VS suites?
if suite == 'EXPRESS':
s = '\\VCExpress\\'
else:
s = '\\VisualStudio\\'
settings_path = ""
try:
(settings_path, t) = SCons.Util.RegGetValue(SCons.Util.HKEY_CURRENT_USER,
r'Software\Microsoft' + s + version +\
r'\Profile\AutoSaveFile')
settings_path = settings_path.upper()
except SCons.Util.RegError:
raise SCons.Errors.InternalError, \
"The VS8 settings file location was not found in the registry."
# Look for potential environment variables in the settings path
if settings_path.find('%VSSPV_VISUALSTUDIO_DIR%') >= 0:
# First replace a special variable named %vsspv_visualstudio_dir%
# that is not found in the OSs environment variables...
try:
(value, t) = SCons.Util.RegGetValue(SCons.Util.HKEY_CURRENT_USER,
r'Software\Microsoft' + s + version +\
r'\VisualStudioLocation')
settings_path = settings_path.replace('%VSSPV_VISUALSTUDIO_DIR%', value)
except SCons.Util.RegError:
raise SCons.Errors.InternalError, "The VS8 settings file location was not found in the registry."
if settings_path.find('%') >= 0:
# Collect global environment variables
env_vars = {}
# Read all the global environment variables of the current user
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_CURRENT_USER, r'Environment')
i = 0
while 1:
try:
(key,val,t) = SCons.Util.RegEnumValue(k,i)
env_vars[key.upper()] = val.upper()
i = i + 1
except SCons.Util.RegError:
break
# And some more variables that are not found in the registry
env_vars['USERPROFILE'] = os.getenv('USERPROFILE')
env_vars['SystemDrive'] = os.getenv('SystemDrive')
found_var = 1
while found_var:
found_var = 0
for env_var in env_vars:
if settings_path.find(r'%' + env_var + r'%') >= 0:
settings_path = settings_path.replace(r'%' + env_var + r'%', env_vars[env_var])
found_var = 1
dirs = {}
if os.path.exists(settings_path):
# now we parse the directories from this file, if it exists.
import xml.dom.minidom
doc = xml.dom.minidom.parse(settings_path)
user_settings = doc.getElementsByTagName('UserSettings')[0]
tool_options = user_settings.getElementsByTagName('ToolsOptions')[0]
tool_options_categories = tool_options.getElementsByTagName('ToolsOptionsCategory')
environment_var_map = {
'IncludeDirectories' : 'INCLUDE',
'LibraryDirectories' : 'LIBRARY',
'ExecutableDirectories' : 'PATH',
}
for category in tool_options_categories:
category_name = category.attributes.get('name')
if category_name is not None and category_name.value == 'Projects':
subcategories = category.getElementsByTagName('ToolsOptionsSubCategory')
for subcategory in subcategories:
subcategory_name = subcategory.attributes.get('name')
if subcategory_name is not None and subcategory_name.value == 'VCDirectories':
properties = subcategory.getElementsByTagName('PropertyValue')
for property in properties:
property_name = property.attributes.get('name')
if property_name is None:
continue
var_name = environment_var_map.get(property_name)
if var_name:
data = property.childNodes[0].data
value_list = string.split(data, '|')
if len(value_list) == 1:
dirs[var_name] = value_list[0]
else:
while value_list:
dest, value = value_list[:2]
del value_list[:2]
# ToDo: Support for destinations
# other than Win32
if dest == 'Win32':
dirs[var_name] = value
break
else:
# There are no default directories in the registry for VS8 Express :(
raise SCons.Errors.InternalError, "Unable to find MSVC paths in the registry."
return dirs
def _get_msvc7_path(path, version, platform):
"""
Get Visual Studio directories from version 7 (MSVS .NET)
(it has a different registry structure than versions before it)
"""
# first, look for a customization of the default values in the
# registry: These are sometimes stored in the Local Settings area
# for Visual Studio, in a file, so we have to parse it.
dirs = _parse_msvc7_overrides(version,platform)
if dirs.has_key(path):
p = dirs[path]
else:
raise SCons.Errors.InternalError, \
"Unable to retrieve the %s path from MS VC++."%path
# collect some useful information for later expansions...
paths = SCons.Tool.msvs.get_msvs_install_dirs(version)
# expand the directory path variables that we support. If there
# is a variable we don't support, then replace that entry with
# "---Unknown Location VSInstallDir---" or something similar, to clue
# people in that we didn't find something, and so env expansion doesn't
# do weird things with the $(xxx)'s
s = re.compile('\$\(([a-zA-Z0-9_]+?)\)')
def repl(match, paths=paths):
key = string.upper(match.group(1))
if paths.has_key(key):
return paths[key]
else:
# Now look in the global environment variables
envresult = os.getenv(key)
if not envresult is None:
return envresult + '\\'
else:
return '---Unknown Location %s---' % match.group()
rv = []
for entry in p.split(os.pathsep):
entry = s.sub(repl,entry).rstrip('\n\r')
rv.append(entry)
return string.join(rv,os.pathsep)
def _get_msvc8_path(path, version, platform, suite):
"""
Get Visual Studio directories from version 8 (MSVS 2005)
(it has a different registry structure than versions before it)
"""
# first, look for a customization of the default values in the
# registry: These are sometimes stored in the Local Settings area
# for Visual Studio, in a file, so we have to parse it.
dirs = _parse_msvc8_overrides(version, platform, suite)
if dirs.has_key(path):
p = dirs[path]
else:
raise SCons.Errors.InternalError, \
"Unable to retrieve the %s path from MS VC++."%path
# collect some useful information for later expansions...
paths = SCons.Tool.msvs.get_msvs_install_dirs(version, suite)
# expand the directory path variables that we support. If there
# is a variable we don't support, then replace that entry with
# "---Unknown Location VSInstallDir---" or something similar, to clue
# people in that we didn't find something, and so env expansion doesn't
# do weird things with the $(xxx)'s
s = re.compile('\$\(([a-zA-Z0-9_]+?)\)')
def repl(match, paths=paths):
key = string.upper(match.group(1))
if paths.has_key(key):
return paths[key]
else:
return '---Unknown Location %s---' % match.group()
rv = []
for entry in p.split(os.pathsep):
entry = s.sub(repl,entry).rstrip('\n\r')
rv.append(entry)
return string.join(rv,os.pathsep)
def get_msvc_path(env, path, version):
"""
Get a list of visualstudio directories (include, lib or path).
Return a string delimited by the os.pathsep separator (';'). An
exception will be raised if unable to access the registry or
appropriate registry keys not found.
"""
if not SCons.Util.can_read_reg:
raise SCons.Errors.InternalError, "No Windows registry module was found"
# normalize the case for comparisons (since the registry is case
# insensitive)
path = string.upper(path)
if path=='LIB':
path= 'LIBRARY'
version_num, suite = SCons.Tool.msvs.msvs_parse_version(version)
if version_num >= 8.0:
platform = env.get('MSVS8_PLATFORM', 'x86')
suite = SCons.Tool.msvs.get_default_visualstudio8_suite(env)
else:
platform = 'x86'
if version_num >= 8.0:
return _get_msvc8_path(path, str(version_num), platform, suite)
elif version_num >= 7.0:
return _get_msvc7_path(path, str(version_num), platform)
path = string.upper(path + ' Dirs')
K = ('Software\\Microsoft\\Devstudio\\%s\\' +
'Build System\\Components\\Platforms\\Win32 (x86)\\Directories') % \
(version)
for base in (SCons.Util.HKEY_CURRENT_USER,
SCons.Util.HKEY_LOCAL_MACHINE):
try:
k = SCons.Util.RegOpenKeyEx(base,K)
i = 0
while 1:
try:
(p,v,t) = SCons.Util.RegEnumValue(k,i)
if string.upper(p) == path:
return v
i = i + 1
except SCons.Util.RegError:
break
except SCons.Util.RegError:
pass
# if we got here, then we didn't find the registry entries:
raise SCons.Errors.InternalError, "The %s path was not found in the registry."%path
def _get_msvc6_default_paths(version, use_mfc_dirs):
"""Return a 3-tuple of (INCLUDE, LIB, PATH) as the values of those
three environment variables that should be set in order to execute
the MSVC 6.0 tools properly, if the information wasn't available
from the registry."""
MVSdir = None
paths = {}
exe_path = ''
lib_path = ''
include_path = ''
try:
paths = SCons.Tool.msvs.get_msvs_install_dirs(version)
MVSdir = paths['VSINSTALLDIR']
except (SCons.Util.RegError, SCons.Errors.InternalError, KeyError):
if os.environ.has_key('MSDEVDIR'):
MVSdir = os.path.normpath(os.path.join(os.environ['MSDEVDIR'],'..','..'))
else:
MVSdir = r'C:\Program Files\Microsoft Visual Studio'
if MVSdir:
if SCons.Util.can_read_reg and paths.has_key('VCINSTALLDIR'):
MVSVCdir = paths['VCINSTALLDIR']
else:
MVSVCdir = os.path.join(MVSdir,'VC98')
MVSCommondir = r'%s\Common' % MVSdir
if use_mfc_dirs:
mfc_include_ = r'%s\ATL\include;%s\MFC\include;' % (MVSVCdir, MVSVCdir)
mfc_lib_ = r'%s\MFC\lib;' % MVSVCdir
else:
mfc_include_ = ''
mfc_lib_ = ''
include_path = r'%s%s\include' % (mfc_include_, MVSVCdir)
lib_path = r'%s%s\lib' % (mfc_lib_, MVSVCdir)
if os.environ.has_key('OS') and os.environ['OS'] == "Windows_NT":
osdir = 'WINNT'
else:
osdir = 'WIN95'
exe_path = r'%s\tools\%s;%s\MSDev98\bin;%s\tools;%s\bin' % (MVSCommondir, osdir, MVSCommondir, MVSCommondir, MVSVCdir)
return (include_path, lib_path, exe_path)
def _get_msvc7_default_paths(env, version, use_mfc_dirs):
"""Return a 3-tuple of (INCLUDE, LIB, PATH) as the values of those
three environment variables that should be set in order to execute
the MSVC .NET tools properly, if the information wasn't available
from the registry."""
MVSdir = None
paths = {}
exe_path = ''
lib_path = ''
include_path = ''
try:
paths = SCons.Tool.msvs.get_msvs_install_dirs(version)
MVSdir = paths['VSINSTALLDIR']
except (KeyError, SCons.Util.RegError, SCons.Errors.InternalError):
if os.environ.has_key('VSCOMNTOOLS'):
MVSdir = os.path.normpath(os.path.join(os.environ['VSCOMNTOOLS'],'..','..'))
else:
# last resort -- default install location
MVSdir = r'C:\Program Files\Microsoft Visual Studio .NET'
if MVSdir:
if SCons.Util.can_read_reg and paths.has_key('VCINSTALLDIR'):
MVSVCdir = paths['VCINSTALLDIR']
else:
MVSVCdir = os.path.join(MVSdir,'Vc7')
MVSCommondir = r'%s\Common7' % MVSdir
if use_mfc_dirs:
mfc_include_ = r'%s\atlmfc\include;' % MVSVCdir
mfc_lib_ = r'%s\atlmfc\lib;' % MVSVCdir
else:
mfc_include_ = ''
mfc_lib_ = ''
include_path = r'%s%s\include;%s\PlatformSDK\include' % (mfc_include_, MVSVCdir, MVSVCdir)
lib_path = r'%s%s\lib;%s\PlatformSDK\lib' % (mfc_lib_, MVSVCdir, MVSVCdir)
exe_path = r'%s\IDE;%s\bin;%s\Tools;%s\Tools\bin' % (MVSCommondir,MVSVCdir, MVSCommondir, MVSCommondir )
if SCons.Util.can_read_reg and paths.has_key('FRAMEWORKSDKDIR'):
include_path = include_path + r';%s\include'%paths['FRAMEWORKSDKDIR']
lib_path = lib_path + r';%s\lib'%paths['FRAMEWORKSDKDIR']
exe_path = exe_path + r';%s\bin'%paths['FRAMEWORKSDKDIR']
if SCons.Util.can_read_reg and paths.has_key('FRAMEWORKDIR') and paths.has_key('FRAMEWORKVERSION'):
exe_path = exe_path + r';%s\%s'%(paths['FRAMEWORKDIR'],paths['FRAMEWORKVERSION'])
return (include_path, lib_path, exe_path)
def _get_msvc8_default_paths(env, version, suite, use_mfc_dirs):
"""Return a 3-tuple of (INCLUDE, LIB, PATH) as the values of those
three environment variables that should be set in order to execute
the MSVC 8 tools properly, if the information wasn't available
from the registry."""
MVSdir = None
paths = {}
exe_paths = []
lib_paths = []
include_paths = []
try:
paths = SCons.Tool.msvs.get_msvs_install_dirs(version, suite)
MVSdir = paths['VSINSTALLDIR']
except (KeyError, SCons.Util.RegError, SCons.Errors.InternalError):
if os.environ.has_key('VSCOMNTOOLS'):
MVSdir = os.path.normpath(os.path.join(os.environ['VSCOMNTOOLS'],'..','..'))
else:
# last resort -- default install location
MVSdir = os.getenv('ProgramFiles') + r'\Microsoft Visual Studio 8'
if MVSdir:
if SCons.Util.can_read_reg and paths.has_key('VCINSTALLDIR'):
MVSVCdir = paths['VCINSTALLDIR']
else:
MVSVCdir = os.path.join(MVSdir,'VC')
MVSCommondir = os.path.join(MVSdir, 'Common7')
include_paths.append( os.path.join(MVSVCdir, 'include') )
lib_paths.append( os.path.join(MVSVCdir, 'lib') )
for base, subdir in [(MVSCommondir,'IDE'), (MVSVCdir,'bin'),
(MVSCommondir,'Tools'), (MVSCommondir,r'Tools\bin')]:
exe_paths.append( os.path.join( base, subdir) )
if paths.has_key('PLATFORMSDKDIR'):
PlatformSdkDir = paths['PLATFORMSDKDIR']
else:
PlatformSdkDir = os.path.join(MVSVCdir,'PlatformSDK')
platform_include_path = os.path.join( PlatformSdkDir, 'Include' )
include_paths.append( platform_include_path )
lib_paths.append( os.path.join( PlatformSdkDir, 'Lib' ) )
if use_mfc_dirs:
if paths.has_key('PLATFORMSDKDIR'):
include_paths.append( os.path.join( platform_include_path, 'mfc' ) )
include_paths.append( os.path.join( platform_include_path, 'atl' ) )
else:
atlmfc_path = os.path.join( MVSVCdir, 'atlmfc' )
include_paths.append( os.path.join( atlmfc_path, 'include' ) )
lib_paths.append( os.path.join( atlmfc_path, 'lib' ) )
if SCons.Util.can_read_reg and paths.has_key('FRAMEWORKSDKDIR'):
fwdir = paths['FRAMEWORKSDKDIR']
include_paths.append( os.path.join( fwdir, 'include' ) )
lib_paths.append( os.path.join( fwdir, 'lib' ) )
exe_paths.append( os.path.join( fwdir, 'bin' ) )
if SCons.Util.can_read_reg and paths.has_key('FRAMEWORKDIR') and paths.has_key('FRAMEWORKVERSION'):
exe_paths.append( os.path.join( paths['FRAMEWORKDIR'], paths['FRAMEWORKVERSION'] ) )
include_path = string.join( include_paths, os.pathsep )
lib_path = string.join(lib_paths, os.pathsep )
exe_path = string.join(exe_paths, os.pathsep )
return (include_path, lib_path, exe_path)
def get_msvc_paths(env, version=None, use_mfc_dirs=0):
"""Return a 3-tuple of (INCLUDE, LIB, PATH) as the values
of those three environment variables that should be set
in order to execute the MSVC tools properly."""
exe_path = ''
lib_path = ''
include_path = ''
if not version:
versions = SCons.Tool.msvs.get_visualstudio_versions()
if versions:
version = versions[0] #use highest version by default
else:
version = '6.0'
# Some of the configured directories only
# appear if the user changes them from the default.
# Therefore, we'll see if we can get the path to the MSDev
# base installation from the registry and deduce the default
# directories.
version_num, suite = SCons.Tool.msvs.msvs_parse_version(version)
if version_num >= 8.0:
suite = SCons.Tool.msvs.get_default_visualstudio8_suite(env)
defpaths = _get_msvc8_default_paths(env, version, suite, use_mfc_dirs)
elif version_num >= 7.0:
defpaths = _get_msvc7_default_paths(env, version, use_mfc_dirs)
else:
defpaths = _get_msvc6_default_paths(version, use_mfc_dirs)
try:
include_path = get_msvc_path(env, "include", version)
except (SCons.Util.RegError, SCons.Errors.InternalError):
include_path = defpaths[0]
try:
lib_path = get_msvc_path(env, "lib", version)
except (SCons.Util.RegError, SCons.Errors.InternalError):
lib_path = defpaths[1]
try:
exe_path = get_msvc_path(env, "path", version)
except (SCons.Util.RegError, SCons.Errors.InternalError):
exe_path = defpaths[2]
return (include_path, lib_path, exe_path)
def get_msvc_default_paths(env, version=None, use_mfc_dirs=0):
"""Return a 3-tuple of (INCLUDE, LIB, PATH) as the values of those
three environment variables that should be set in order to execute
the MSVC tools properly. This will only return the default
locations for the tools, not the values used by MSVS in their
directory setup area. This can help avoid problems with different
developers having different settings, and should allow the tools
to run in most cases."""
if not version and not SCons.Util.can_read_reg:
version = '6.0'
try:
if not version:
version = SCons.Tool.msvs.get_visualstudio_versions()[0] #use highest version
except KeyboardInterrupt:
raise
except:
pass
version_num, suite = SCons.Tool.msvs.msvs_parse_version(version)
if version_num >= 8.0:
suite = SCons.Tool.msvs.get_default_visualstudio8_suite(env)
return _get_msvc8_default_paths(env, version, suite, use_mfc_dirs)
elif version_num >= 7.0:
return _get_msvc7_default_paths(env, version, use_mfc_dirs)
else:
return _get_msvc6_default_paths(version, use_mfc_dirs)
def validate_vars(env):
"""Validate the PCH and PCHSTOP construction variables."""
if env.has_key('PCH') and env['PCH']:
if not env.has_key('PCHSTOP'):
raise SCons.Errors.UserError, "The PCHSTOP construction must be defined if PCH is defined."
if not SCons.Util.is_String(env['PCHSTOP']):
raise SCons.Errors.UserError, "The PCHSTOP construction variable must be a string: %r"%env['PCHSTOP']
def pch_emitter(target, source, env):
"""Adds the object file target."""
validate_vars(env)
pch = None
obj = None
for t in target:
if SCons.Util.splitext(str(t))[1] == '.pch':
pch = t
if SCons.Util.splitext(str(t))[1] == '.obj':
obj = t
if not obj:
obj = SCons.Util.splitext(str(pch))[0]+'.obj'
target = [pch, obj] # pch must be first, and obj second for the PCHCOM to work
return (target, source)
def object_emitter(target, source, env, parent_emitter):
"""Sets up the PCH dependencies for an object file."""
validate_vars(env)
parent_emitter(target, source, env)
if env.has_key('PCH') and env['PCH']:
env.Depends(target, env['PCH'])
return (target, source)
def static_object_emitter(target, source, env):
return object_emitter(target, source, env,
SCons.Defaults.StaticObjectEmitter)
def shared_object_emitter(target, source, env):
return object_emitter(target, source, env,
SCons.Defaults.SharedObjectEmitter)
pch_action = SCons.Action.Action('$PCHCOM', '$PCHCOMSTR')
pch_builder = SCons.Builder.Builder(action=pch_action, suffix='.pch',
emitter=pch_emitter,
source_scanner=SCons.Tool.SourceFileScanner)
# Logic to build .rc files into .res files (resource files)
res_scanner = SCons.Scanner.RC.RCScan()
res_action = SCons.Action.Action('$RCCOM', '$RCCOMSTR')
res_builder = SCons.Builder.Builder(action=res_action,
src_suffix='.rc',
suffix='.res',
src_builder=[],
source_scanner=res_scanner)
def msvc_batch_key(action, env, target, source):
"""
Returns a key to identify unique batches of sources for compilation.
If batching is enabled (via the $MSVC_BATCH setting), then all
target+source pairs that use the same action, defined by the same
environment, and have the same target and source directories, will
be batched.
Returning None specifies that the specified target+source should not
be batched with other compilations.
"""
b = env.subst('$MSVC_BATCH')
if b in (None, '', '0'):
# We're not using batching; return no key.
return None
t = target[0]
s = source[0]
if os.path.splitext(t.name)[0] != os.path.splitext(s.name)[0]:
# The base names are different, so this *must* be compiled
# separately; return no key.
return None
return (id(action), id(env), t.dir, s.dir)
def msvc_output_flag(target, source, env, for_signature):
"""
Returns the correct /Fo flag for batching.
If batching is disabled or there's only one source file, then we
return an /Fo string that specifies the target explicitly. Otherwise,
we return an /Fo string that just specifies the first target's
directory (where the Visual C/C++ compiler will put the .obj files).
"""
b = env.subst('$MSVC_BATCH')
if b in (None, '', '0') or len(source) == 1:
return '/Fo$TARGET'
else:
# The Visual C/C++ compiler requires a \ at the end of the /Fo
# option to indicate an output directory. We use os.sep here so
# that the test(s) for this can be run on non-Windows systems
# without having a hard-coded backslash mess up command-line
# argument parsing.
return '/Fo${TARGET.dir}' + os.sep
CAction = SCons.Action.Action("$CCCOM", "$CCCOMSTR",
batch_key=msvc_batch_key,
targets='$CHANGED_TARGETS')
ShCAction = SCons.Action.Action("$SHCCCOM", "$SHCCCOMSTR",
batch_key=msvc_batch_key,
targets='$CHANGED_TARGETS')
CXXAction = SCons.Action.Action("$CXXCOM", "$CXXCOMSTR",
batch_key=msvc_batch_key,
targets='$CHANGED_TARGETS')
ShCXXAction = SCons.Action.Action("$SHCXXCOM", "$SHCXXCOMSTR",
batch_key=msvc_batch_key,
targets='$CHANGED_TARGETS')
def generate(env):
"""Add Builders and construction variables for MSVC++ to an Environment."""
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
# TODO(batch): shouldn't reach in to cmdgen this way; necessary
# for now to bypass the checks in Builder.DictCmdGenerator.__call__()
# and allow .cc and .cpp to be compiled in the same command line.
static_obj.cmdgen.source_ext_match = False
shared_obj.cmdgen.source_ext_match = False
for suffix in CSuffixes:
static_obj.add_action(suffix, CAction)
shared_obj.add_action(suffix, ShCAction)
static_obj.add_emitter(suffix, static_object_emitter)
shared_obj.add_emitter(suffix, shared_object_emitter)
for suffix in CXXSuffixes:
static_obj.add_action(suffix, CXXAction)
shared_obj.add_action(suffix, ShCXXAction)
static_obj.add_emitter(suffix, static_object_emitter)
shared_obj.add_emitter(suffix, shared_object_emitter)
env['CCPDBFLAGS'] = SCons.Util.CLVar(['${(PDB and "/Z7") or ""}'])
env['CCPCHFLAGS'] = SCons.Util.CLVar(['${(PCH and "/Yu%s /Fp%s"%(PCHSTOP or "",File(PCH))) or ""}'])
env['_MSVC_OUTPUT_FLAG'] = msvc_output_flag
env['_CCCOMCOM'] = '$CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS $CCPCHFLAGS $CCPDBFLAGS'
env['CC'] = 'cl'
env['CCFLAGS'] = SCons.Util.CLVar('/nologo')
env['CFLAGS'] = SCons.Util.CLVar('')
env['CCCOM'] = '$CC $_MSVC_OUTPUT_FLAG /c $CHANGED_SOURCES $CFLAGS $CCFLAGS $_CCCOMCOM'
env['SHCC'] = '$CC'
env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS')
env['SHCFLAGS'] = SCons.Util.CLVar('$CFLAGS')
env['SHCCCOM'] = '$SHCC $_MSVC_OUTPUT_FLAG /c $CHANGED_SOURCES $SHCFLAGS $SHCCFLAGS $_CCCOMCOM'
env['CXX'] = '$CC'
env['CXXFLAGS'] = SCons.Util.CLVar('$( /TP $)')
env['CXXCOM'] = '$CXX $_MSVC_OUTPUT_FLAG /c $CHANGED_SOURCES $CXXFLAGS $CCFLAGS $_CCCOMCOM'
env['SHCXX'] = '$CXX'
env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS')
env['SHCXXCOM'] = '$SHCXX $_MSVC_OUTPUT_FLAG /c $CHANGED_SOURCES $SHCXXFLAGS $SHCCFLAGS $_CCCOMCOM'
env['CPPDEFPREFIX'] = '/D'
env['CPPDEFSUFFIX'] = ''
env['INCPREFIX'] = '/I'
env['INCSUFFIX'] = ''
# env.Append(OBJEMITTER = [static_object_emitter])
# env.Append(SHOBJEMITTER = [shared_object_emitter])
env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 1
env['RC'] = 'rc'
env['RCFLAGS'] = SCons.Util.CLVar('')
env['RCSUFFIXES']=['.rc','.rc2']
env['RCCOM'] = '$RC $_CPPDEFFLAGS $_CPPINCFLAGS $RCFLAGS /fo$TARGET $SOURCES'
env['BUILDERS']['RES'] = res_builder
env['OBJPREFIX'] = ''
env['OBJSUFFIX'] = '.obj'
env['SHOBJPREFIX'] = '$OBJPREFIX'
env['SHOBJSUFFIX'] = '$OBJSUFFIX'
try:
version = SCons.Tool.msvs.get_default_visualstudio_version(env)
version_num, suite = SCons.Tool.msvs.msvs_parse_version(version)
if version_num == 8.0:
suite = SCons.Tool.msvs.get_default_visualstudio8_suite(env)
use_mfc_dirs = env.get('MSVS_USE_MFC_DIRS', 0)
if env.get('MSVS_IGNORE_IDE_PATHS', 0):
_get_paths = get_msvc_default_paths
else:
_get_paths = get_msvc_paths
include_path, lib_path, exe_path = _get_paths(env, version, use_mfc_dirs)
# since other tools can set these, we just make sure that the
# relevant stuff from MSVS is in there somewhere.
env.PrependENVPath('INCLUDE', include_path)
env.PrependENVPath('LIB', lib_path)
env.PrependENVPath('PATH', exe_path)
except (SCons.Util.RegError, SCons.Errors.InternalError):
pass
env['CFILESUFFIX'] = '.c'
env['CXXFILESUFFIX'] = '.cc'
env['PCHPDBFLAGS'] = SCons.Util.CLVar(['${(PDB and "/Yd") or ""}'])
env['PCHCOM'] = '$CXX $CXXFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS /c $SOURCES /Fo${TARGETS[1]} /Yc$PCHSTOP /Fp${TARGETS[0]} $CCPDBFLAGS $PCHPDBFLAGS'
env['BUILDERS']['PCH'] = pch_builder
if not env.has_key('ENV'):
env['ENV'] = {}
if not env['ENV'].has_key('SystemRoot'): # required for dlls in the winsxs folders
env['ENV']['SystemRoot'] = SCons.Platform.win32.get_system_root()
def exists(env):
if SCons.Tool.msvs.is_msvs_installed():
# there's at least one version of MSVS installed.
return 1
else:
return env.Detect('cl')
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
from textwrap import dedent
from pants.base.cmd_line_spec_parser import CmdLineSpecParser
from pants.base.specs import DescendantAddresses
from pants.build_graph.address import Address, BuildFileAddress
from pants.build_graph.address_lookup_error import AddressLookupError
from pants.build_graph.build_file_address_mapper import BuildFileAddressMapper
from pants.build_graph.target import Target
from pants_test.base_test import BaseTest
# TODO(Eric Ayers) There are methods in BuildFileAddressMapper that are missing
# explicit unit tests: addresses_in_spec_path, spec_to_address, spec_to_addresses
class BuildFileAddressMapperTest(BaseTest):
def test_resolve(self):
build_file = self.add_to_build_file('BUILD', 'target(name="foo")')
address, addressable = self.address_mapper.resolve(Address.parse('//:foo'))
self.assertIsInstance(address, BuildFileAddress)
self.assertEqual(build_file, address.build_file)
self.assertEqual('foo', address.target_name)
self.assertEqual(address.target_name, addressable.addressed_name)
self.assertEqual(addressable.addressed_type, Target)
def test_resolve_spec(self):
self.add_to_build_file('BUILD', dedent("""
target(name='foozle')
target(name='baz')
"""))
with self.assertRaises(AddressLookupError):
self.address_mapper.resolve_spec('//:bad_spec')
dependencies_addressable = self.address_mapper.resolve_spec('//:foozle')
self.assertEqual(dependencies_addressable.addressed_type, Target)
def test_scan_addresses(self):
root_build_file = self.add_to_build_file('BUILD', 'target(name="foo")')
subdir_build_file = self.add_to_build_file('subdir/BUILD', 'target(name="bar")')
subdir_suffix_build_file = self.add_to_build_file('subdir/BUILD.suffix', 'target(name="baz")')
with open(os.path.join(self.build_root, 'BUILD.invalid.suffix'), 'w') as invalid_build_file:
invalid_build_file.write('target(name="foobar")')
self.assertEquals({BuildFileAddress(root_build_file, 'foo'),
BuildFileAddress(subdir_build_file, 'bar'),
BuildFileAddress(subdir_suffix_build_file, 'baz')},
self.address_mapper.scan_addresses())
def test_scan_addresses_with_root(self):
self.add_to_build_file('BUILD', 'target(name="foo")')
subdir_build_file = self.add_to_build_file('subdir/BUILD', 'target(name="bar")')
subdir_suffix_build_file = self.add_to_build_file('subdir/BUILD.suffix', 'target(name="baz")')
subdir = os.path.join(self.build_root, 'subdir')
self.assertEquals({BuildFileAddress(subdir_build_file, 'bar'),
BuildFileAddress(subdir_suffix_build_file, 'baz')},
self.address_mapper.scan_addresses(root=subdir))
def test_scan_addresses_with_invalid_root(self):
with self.assertRaises(BuildFileAddressMapper.InvalidRootError):
self.address_mapper.scan_addresses(root='subdir')
def test_raises_invalid_build_file_reference(self):
# reference a BUILD file that doesn't exist
with self.assertRaisesRegexp(BuildFileAddressMapper.InvalidBuildFileReference,
'^.*/non-existent-path does not contain any BUILD files.'
'\s+when translating spec //non-existent-path:a'):
self.address_mapper.spec_to_address('//non-existent-path:a')
def test_raises_address_not_in_one_build_file(self):
self.add_to_build_file('BUILD', 'target(name="foo")')
# Create an address that doesn't exist in an existing BUILD file
address = Address.parse(':bar')
with self.assertRaisesRegexp(BuildFileAddressMapper.AddressNotInBuildFile,
'^bar was not found in BUILD files from .*. '
'Perhaps you meant:'
'\s+:foo$'):
self.address_mapper.resolve(address)
def test_raises_address_not_in_two_build_files(self):
self.add_to_build_file('BUILD.1', 'target(name="foo1")')
self.add_to_build_file('BUILD.2', 'target(name="foo2")')
# Create an address that doesn't exist in an existing BUILD file
address = Address.parse(':bar')
with self.assertRaisesRegexp(BuildFileAddressMapper.AddressNotInBuildFile,
'^bar was not found in BUILD files from .*. '
'Perhaps you meant one of:'
'\s+:foo1 \(from BUILD.1\)'
'\s+:foo2 \(from BUILD.2\)$'):
self.address_mapper.resolve(address)
def test_raises_address_invalid_address_error(self):
with self.assertRaises(BuildFileAddressMapper.InvalidAddressError):
self.address_mapper.resolve_spec("../foo")
def test_raises_empty_build_file_error(self):
self.add_to_build_file('BUILD', 'pass')
with self.assertRaises(BuildFileAddressMapper.EmptyBuildFileError):
self.address_mapper.resolve_spec('//:foo')
def test_address_lookup_error_hierarchy(self):
self.assertIsInstance(BuildFileAddressMapper.AddressNotInBuildFile(), AddressLookupError)
self.assertIsInstance(BuildFileAddressMapper.EmptyBuildFileError(), AddressLookupError)
self.assertIsInstance(BuildFileAddressMapper.InvalidBuildFileReference(), AddressLookupError)
self.assertIsInstance(BuildFileAddressMapper.InvalidAddressError(), AddressLookupError)
self.assertIsInstance(BuildFileAddressMapper.BuildFileScanError(), AddressLookupError)
class BuildFileAddressMapperWithIgnoreTest(BaseTest):
@property
def build_ignore_patterns(self):
return ['subdir']
def test_scan_from_address_mapper(self):
root_build_file = self.add_to_build_file('BUILD', 'target(name="foo")')
self.add_to_build_file('subdir/BUILD', 'target(name="bar")')
self.assertEquals({BuildFileAddress(root_build_file, 'foo')}, self.address_mapper.scan_addresses())
def test_scan_from_context(self):
self.add_to_build_file('BUILD', 'target(name="foo")')
self.add_to_build_file('subdir/BUILD', 'target(name="bar")')
graph = self.context().scan()
self.assertEquals([target.address.spec for target in graph.targets()], ['//:foo'])
class BuildFileAddressMapperScanTest(BaseTest):
NO_FAIL_FAST_RE = re.compile(r"""^--------------------
.*
Exception message: name 'a_is_bad' is not defined
while executing BUILD file BuildFile\(bad/a/BUILD, FileSystemProjectTree\(.*\)\)
Loading addresses from 'bad/a' failed\.
.*
Exception message: name 'b_is_bad' is not defined
while executing BUILD file BuildFile\(bad/b/BUILD, FileSystemProjectTree\(.*\)\)
Loading addresses from 'bad/b' failed\.
Invalid BUILD files for \[::\]$""", re.DOTALL)
FAIL_FAST_RE = """^name 'a_is_bad' is not defined
while executing BUILD file BuildFile\(bad/a/BUILD\, FileSystemProjectTree\(.*\)\)
Loading addresses from 'bad/a' failed.$"""
def setUp(self):
super(BuildFileAddressMapperScanTest, self).setUp()
def add_target(path, name):
self.add_to_build_file(path, 'target(name="{name}")\n'.format(name=name))
add_target('BUILD', 'root')
add_target('a', 'a')
add_target('a', 'b')
add_target('a/b', 'b')
add_target('a/b', 'c')
self._spec_parser = CmdLineSpecParser(self.build_root)
def test_bad_build_files(self):
self.add_to_build_file('bad/a', 'a_is_bad')
self.add_to_build_file('bad/b', 'b_is_bad')
with self.assertRaisesRegexp(AddressLookupError, self.NO_FAIL_FAST_RE):
list(self.address_mapper.scan_specs([DescendantAddresses('')], fail_fast=False))
def test_bad_build_files_fail_fast(self):
self.add_to_build_file('bad/a', 'a_is_bad')
self.add_to_build_file('bad/b', 'b_is_bad')
with self.assertRaisesRegexp(AddressLookupError, self.FAIL_FAST_RE):
list(self.address_mapper.scan_specs([DescendantAddresses('')], fail_fast=True))
def test_normal(self):
self.assert_scanned([':root'], expected=[':root'])
self.assert_scanned(['//:root'], expected=[':root'])
self.assert_scanned(['a'], expected=['a'])
self.assert_scanned(['a:a'], expected=['a'])
self.assert_scanned(['a/b'], expected=['a/b'])
self.assert_scanned(['a/b:b'], expected=['a/b'])
self.assert_scanned(['a/b:c'], expected=['a/b:c'])
def test_sibling(self):
self.assert_scanned([':'], expected=[':root'])
self.assert_scanned(['//:'], expected=[':root'])
self.assert_scanned(['a:'], expected=['a', 'a:b'])
self.assert_scanned(['//a:'], expected=['a', 'a:b'])
self.assert_scanned(['a/b:'], expected=['a/b', 'a/b:c'])
self.assert_scanned(['//a/b:'], expected=['a/b', 'a/b:c'])
def test_sibling_or_descendents(self):
self.assert_scanned(['::'], expected=[':root', 'a', 'a:b', 'a/b', 'a/b:c'])
self.assert_scanned(['//::'], expected=[':root', 'a', 'a:b', 'a/b', 'a/b:c'])
self.assert_scanned(['a::'], expected=['a', 'a:b', 'a/b', 'a/b:c'])
self.assert_scanned(['//a::'], expected=['a', 'a:b', 'a/b', 'a/b:c'])
self.assert_scanned(['a/b::'], expected=['a/b', 'a/b:c'])
self.assert_scanned(['//a/b::'], expected=['a/b', 'a/b:c'])
def test_cmd_line_affordances(self):
self.assert_scanned(['./:root'], expected=[':root'])
self.assert_scanned(['//./:root'], expected=[':root'])
self.assert_scanned(['//./a/../:root'], expected=[':root'])
self.assert_scanned([os.path.join(self.build_root, './a/../:root')],
expected=[':root'])
self.assert_scanned(['a/'], expected=['a'])
self.assert_scanned(['./a/'], expected=['a'])
self.assert_scanned([os.path.join(self.build_root, './a/')], expected=['a'])
self.assert_scanned(['a/b/:b'], expected=['a/b'])
self.assert_scanned(['./a/b/:b'], expected=['a/b'])
self.assert_scanned([os.path.join(self.build_root, './a/b/:b')], expected=['a/b'])
def test_cmd_line_spec_list(self):
self.assert_scanned(['a', 'a/b'], expected=['a', 'a/b'])
self.assert_scanned(['::'], expected=[':root', 'a', 'a:b', 'a/b', 'a/b:c'])
def test_does_not_exist(self):
with self.assertRaises(AddressLookupError):
self.assert_scanned(['c'], expected=[])
with self.assertRaises(AddressLookupError):
self.assert_scanned(['c:'], expected=[])
with self.assertRaises(AddressLookupError):
self.assert_scanned(['c::'], expected=[])
def test_build_ignore_patterns(self):
expected_specs = [':root', 'a', 'a:b', 'a/b', 'a/b:c']
# This bogus BUILD file gets in the way of parsing.
self.add_to_build_file('some/dir', 'COMPLETELY BOGUS BUILDFILE)\n')
with self.assertRaises(AddressLookupError):
self.assert_scanned(['::'], expected=expected_specs)
address_mapper_with_ignore = BuildFileAddressMapper(self.build_file_parser,
self.project_tree,
build_ignore_patterns=['some'])
self.assert_scanned(['::'], expected=expected_specs, address_mapper=address_mapper_with_ignore)
def test_exclude_target_regexps(self):
address_mapper_with_exclude = BuildFileAddressMapper(self.build_file_parser,
self.project_tree,
exclude_target_regexps=[r'.*:b.*'])
self.assert_scanned(['::'], expected=[':root', 'a', 'a/b:c'],
address_mapper=address_mapper_with_exclude)
def assert_scanned(self, specs_strings, expected, address_mapper=None):
"""Parse and scan the given specs."""
address_mapper = address_mapper or self.address_mapper
def sort(addresses):
return sorted(addresses, key=lambda address: address.spec)
specs = [self._spec_parser.parse_spec(s) for s in specs_strings]
self.assertEqual(sort(Address.parse(addr) for addr in expected),
sort(address_mapper.scan_specs(specs)))
| |
from django.http import HttpRequest
import mock
from nose.tools import assert_false
import amo
from amo.tests import TestCase, req_factory_factory
from amo.urlresolvers import reverse
from addons.models import Addon, AddonUser
from users.models import UserProfile
from .acl import (action_allowed, check_addon_ownership, check_ownership,
check_reviewer, match_rules)
def test_match_rules():
"""
Unit tests for the match_rules method.
"""
rules = (
'*:*',
'Editors:*,Admin:EditAnyAddon,Admin:flagged,Admin:addons,'
'Admin:EditAnyCollection',
'Tests:*,Admin:serverstatus,Admin:users',
'Admin:EditAnyAddon,Admin:EditAnyLocale,Editors:*,'
'Admin:lists,Admin:applications,Admin:addons,Localizers:*',
'Admin:EditAnyAddon',
'Admin:ViewAnyStats,Admin:ViewAnyCollectionStats',
'Admin:ViewAnyStats',
'Editors:*,Admin:features',
'Admin:Statistics',
'Admin:Features,Editors:*',
'Admin:%',
'Admin:*',
'Admin:Foo',
'Admin:Bar',
)
for rule in rules:
assert match_rules(rule, 'Admin', '%'), "%s != Admin:%%" % rule
rules = (
'Doctors:*',
'Stats:View',
'CollectionStats:View',
'Addons:Review',
'Personas:Review',
'Locales:Edit',
'Locale.de:Edit',
'Reviews:Edit',
'None:None',
)
for rule in rules:
assert not match_rules(rule, 'Admin', '%'), \
"%s == Admin:%% and shouldn't" % rule
def test_anonymous_user():
# Fake request must not have .groups, just like an anonymous user.
fake_request = HttpRequest()
assert_false(action_allowed(fake_request, amo.FIREFOX, 'Admin:%'))
class ACLTestCase(TestCase):
"""Test some basic ACLs by going to various locked pages on AMO."""
fixtures = ['access/login.json']
def test_admin_login_anon(self):
# Login form for anonymous user on the admin page.
url = '/en-US/admin/models/'
r = self.client.get(url)
self.assertRedirects(r, '%s?to=%s' % (reverse('users.login'), url))
class TestHasPerm(TestCase):
fixtures = ['base/apps', 'base/users', 'base/addon_3615']
def setUp(self):
assert self.client.login(username='del@icio.us', password='password')
self.user = UserProfile.objects.get(email='del@icio.us')
self.addon = Addon.objects.get(id=3615)
self.au = AddonUser.objects.get(addon=self.addon, user=self.user)
assert self.au.role == amo.AUTHOR_ROLE_OWNER
self.request = mock.Mock()
self.request.groups = ()
self.request.amo_user = self.user
self.request.user.is_authenticated.return_value = True
def login_admin(self):
assert self.client.login(username='admin@mozilla.com',
password='password')
return UserProfile.objects.get(email='admin@mozilla.com')
def test_anonymous(self):
self.request.user.is_authenticated.return_value = False
self.client.logout()
assert not check_addon_ownership(self.request, self.addon)
def test_admin(self):
self.request.amo_user = self.login_admin()
self.request.groups = self.request.amo_user.groups.all()
assert check_addon_ownership(self.request, self.addon)
assert check_addon_ownership(self.request, self.addon, admin=True)
assert not check_addon_ownership(self.request, self.addon, admin=False)
def test_require_author(self):
assert check_ownership(self.request, self.addon, require_author=True)
def test_require_author_when_admin(self):
self.request.amo_user = self.login_admin()
self.request.groups = self.request.amo_user.groups.all()
assert check_ownership(self.request, self.addon, require_author=False)
assert not check_ownership(self.request, self.addon,
require_author=True)
def test_disabled(self):
self.addon.update(status=amo.STATUS_DISABLED)
assert not check_addon_ownership(self.request, self.addon)
self.test_admin()
def test_deleted(self):
self.addon.update(status=amo.STATUS_DELETED)
assert not check_addon_ownership(self.request, self.addon)
self.request.amo_user = self.login_admin()
self.request.groups = self.request.amo_user.groups.all()
assert not check_addon_ownership(self.request, self.addon)
def test_ignore_disabled(self):
self.addon.update(status=amo.STATUS_DISABLED)
assert check_addon_ownership(self.request, self.addon,
ignore_disabled=True)
def test_owner(self):
assert check_addon_ownership(self.request, self.addon)
self.au.role = amo.AUTHOR_ROLE_DEV
self.au.save()
assert not check_addon_ownership(self.request, self.addon)
self.au.role = amo.AUTHOR_ROLE_VIEWER
self.au.save()
assert not check_addon_ownership(self.request, self.addon)
self.au.role = amo.AUTHOR_ROLE_SUPPORT
self.au.save()
assert not check_addon_ownership(self.request, self.addon)
def test_dev(self):
assert check_addon_ownership(self.request, self.addon, dev=True)
self.au.role = amo.AUTHOR_ROLE_DEV
self.au.save()
assert check_addon_ownership(self.request, self.addon, dev=True)
self.au.role = amo.AUTHOR_ROLE_VIEWER
self.au.save()
assert not check_addon_ownership(self.request, self.addon, dev=True)
self.au.role = amo.AUTHOR_ROLE_SUPPORT
self.au.save()
assert not check_addon_ownership(self.request, self.addon, dev=True)
def test_viewer(self):
assert check_addon_ownership(self.request, self.addon, viewer=True)
self.au.role = amo.AUTHOR_ROLE_DEV
self.au.save()
assert check_addon_ownership(self.request, self.addon, viewer=True)
self.au.role = amo.AUTHOR_ROLE_VIEWER
self.au.save()
assert check_addon_ownership(self.request, self.addon, viewer=True)
self.au.role = amo.AUTHOR_ROLE_SUPPORT
self.au.save()
assert check_addon_ownership(self.request, self.addon, viewer=True)
def test_support(self):
assert check_addon_ownership(self.request, self.addon, viewer=True)
self.au.role = amo.AUTHOR_ROLE_DEV
self.au.save()
assert not check_addon_ownership(self.request, self.addon,
support=True)
self.au.role = amo.AUTHOR_ROLE_VIEWER
self.au.save()
assert not check_addon_ownership(self.request, self.addon,
support=True)
self.au.role = amo.AUTHOR_ROLE_SUPPORT
self.au.save()
assert check_addon_ownership(self.request, self.addon, support=True)
class TestCheckReviewer(TestCase):
fixtures = ['base/user_2519']
def setUp(self):
self.user = UserProfile.objects.get()
def test_no_perm(self):
req = req_factory_factory('noop', user=self.user)
assert not check_reviewer(req)
assert not check_reviewer(req, only='addon')
assert not check_reviewer(req, only='persona')
def test_perm_addons(self):
self.grant_permission(self.user, 'Addons:Review')
req = req_factory_factory('noop', user=self.user)
assert check_reviewer(req)
assert check_reviewer(req, only='addon')
assert not check_reviewer(req, only='persona')
def test_perm_themes(self):
self.grant_permission(self.user, 'Personas:Review')
req = req_factory_factory('noop', user=self.user)
assert check_reviewer(req)
assert not check_reviewer(req, only='addon')
assert check_reviewer(req, only='persona')
| |
#!/usr/bin/env python
import xml.etree.ElementTree as ET
class brocade_tunnels(object):
"""Auto generated class.
"""
def __init__(self, **kwargs):
self._callback = kwargs.pop('callback')
def nsx_controller_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
nsx_controller = ET.SubElement(config, "nsx-controller", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name = ET.SubElement(nsx_controller, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def nsx_controller_connection_addr_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
nsx_controller = ET.SubElement(config, "nsx-controller", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(nsx_controller, "name")
name_key.text = kwargs.pop('name')
connection_addr = ET.SubElement(nsx_controller, "connection-addr")
address = ET.SubElement(connection_addr, "address")
address.text = kwargs.pop('address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def nsx_controller_connection_addr_port(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
nsx_controller = ET.SubElement(config, "nsx-controller", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(nsx_controller, "name")
name_key.text = kwargs.pop('name')
connection_addr = ET.SubElement(nsx_controller, "connection-addr")
port = ET.SubElement(connection_addr, "port")
port.text = kwargs.pop('port')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def nsx_controller_connection_addr_method(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
nsx_controller = ET.SubElement(config, "nsx-controller", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(nsx_controller, "name")
name_key.text = kwargs.pop('name')
connection_addr = ET.SubElement(nsx_controller, "connection-addr")
method = ET.SubElement(connection_addr, "method")
method.text = kwargs.pop('method')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def nsx_controller_reconnect_interval(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
nsx_controller = ET.SubElement(config, "nsx-controller", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(nsx_controller, "name")
name_key.text = kwargs.pop('name')
reconnect_interval = ET.SubElement(nsx_controller, "reconnect-interval")
reconnect_interval.text = kwargs.pop('reconnect_interval')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def nsx_controller_activate(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
nsx_controller = ET.SubElement(config, "nsx-controller", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(nsx_controller, "name")
name_key.text = kwargs.pop('name')
activate = ET.SubElement(nsx_controller, "activate")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name = ET.SubElement(overlay_gateway, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_gw_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
gw_type = ET.SubElement(overlay_gateway, "gw-type")
gw_type.text = kwargs.pop('gw_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_ip_interface_ve_ve_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
ip = ET.SubElement(overlay_gateway, "ip")
interface = ET.SubElement(ip, "interface")
ve = ET.SubElement(interface, "ve")
ve_id = ET.SubElement(ve, "ve-id")
ve_id.text = kwargs.pop('ve_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_ip_interface_ve_vrrp_extended_group(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
ip = ET.SubElement(overlay_gateway, "ip")
interface = ET.SubElement(ip, "interface")
ve = ET.SubElement(interface, "ve")
vrrp_extended_group = ET.SubElement(ve, "vrrp-extended-group")
vrrp_extended_group.text = kwargs.pop('vrrp_extended_group')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_ip_interface_loopback_loopback_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
ip = ET.SubElement(overlay_gateway, "ip")
interface = ET.SubElement(ip, "interface")
loopback = ET.SubElement(interface, "loopback")
loopback_id = ET.SubElement(loopback, "loopback-id")
loopback_id.text = kwargs.pop('loopback_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_attach_rbridge_id_rb_add(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
attach = ET.SubElement(overlay_gateway, "attach")
rbridge_id = ET.SubElement(attach, "rbridge-id")
rb_add = ET.SubElement(rbridge_id, "rb-add")
rb_add.text = kwargs.pop('rb_add')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_attach_rbridge_id_rb_remove(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
attach = ET.SubElement(overlay_gateway, "attach")
rbridge_id = ET.SubElement(attach, "rbridge-id")
rb_remove = ET.SubElement(rbridge_id, "rb-remove")
rb_remove.text = kwargs.pop('rb_remove')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_attach_vlan_vid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
attach = ET.SubElement(overlay_gateway, "attach")
vlan = ET.SubElement(attach, "vlan")
mac_key = ET.SubElement(vlan, "mac")
mac_key.text = kwargs.pop('mac')
vid = ET.SubElement(vlan, "vid")
vid.text = kwargs.pop('vid')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_attach_vlan_mac(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
attach = ET.SubElement(overlay_gateway, "attach")
vlan = ET.SubElement(attach, "vlan")
vid_key = ET.SubElement(vlan, "vid")
vid_key.text = kwargs.pop('vid')
mac = ET.SubElement(vlan, "mac")
mac.text = kwargs.pop('mac')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_map_vlan_vni_mapping_vid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
map = ET.SubElement(overlay_gateway, "map")
vlan_vni_mapping = ET.SubElement(map, "vlan-vni-mapping")
vid = ET.SubElement(vlan_vni_mapping, "vid")
vid.text = kwargs.pop('vid')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_map_vlan_vni_mapping_vni(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
map = ET.SubElement(overlay_gateway, "map")
vlan_vni_mapping = ET.SubElement(map, "vlan-vni-mapping")
vid_key = ET.SubElement(vlan_vni_mapping, "vid")
vid_key.text = kwargs.pop('vid')
vni = ET.SubElement(vlan_vni_mapping, "vni")
vni.text = kwargs.pop('vni')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_map_vlan_vni_auto(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
map = ET.SubElement(overlay_gateway, "map")
vlan = ET.SubElement(map, "vlan")
vni = ET.SubElement(vlan, "vni")
auto = ET.SubElement(vni, "auto")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_site_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
site = ET.SubElement(overlay_gateway, "site")
name = ET.SubElement(site, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_site_tunnel_dst_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
site = ET.SubElement(overlay_gateway, "site")
name_key = ET.SubElement(site, "name")
name_key.text = kwargs.pop('name')
tunnel_dst = ET.SubElement(site, "tunnel-dst")
address = ET.SubElement(tunnel_dst, "address")
address.text = kwargs.pop('address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_site_extend_vlan_add(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
site = ET.SubElement(overlay_gateway, "site")
name_key = ET.SubElement(site, "name")
name_key.text = kwargs.pop('name')
extend = ET.SubElement(site, "extend")
vlan = ET.SubElement(extend, "vlan")
add = ET.SubElement(vlan, "add")
add.text = kwargs.pop('add')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_site_extend_vlan_remove(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
site = ET.SubElement(overlay_gateway, "site")
name_key = ET.SubElement(site, "name")
name_key.text = kwargs.pop('name')
extend = ET.SubElement(site, "extend")
vlan = ET.SubElement(extend, "vlan")
remove = ET.SubElement(vlan, "remove")
remove.text = kwargs.pop('remove')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_site_shutdown(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
site = ET.SubElement(overlay_gateway, "site")
name_key = ET.SubElement(site, "name")
name_key.text = kwargs.pop('name')
shutdown = ET.SubElement(site, "shutdown")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_enable_statistics_stats_direction(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
enable = ET.SubElement(overlay_gateway, "enable")
statistics = ET.SubElement(enable, "statistics")
stats_direction = ET.SubElement(statistics, "stats-direction")
stats_direction.text = kwargs.pop('stats_direction')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_enable_statistics_vlan_action(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
enable = ET.SubElement(overlay_gateway, "enable")
statistics = ET.SubElement(enable, "statistics")
vlan_action = ET.SubElement(statistics, "vlan-action")
vlan_action.text = kwargs.pop('vlan_action')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_enable_statistics_vlan_list(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
enable = ET.SubElement(overlay_gateway, "enable")
statistics = ET.SubElement(enable, "statistics")
vlan_list = ET.SubElement(statistics, "vlan-list")
vlan_list.text = kwargs.pop('vlan_list')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_monitor_session(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
monitor = ET.SubElement(overlay_gateway, "monitor")
session = ET.SubElement(monitor, "session")
session.text = kwargs.pop('session')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_monitor_direction(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
monitor = ET.SubElement(overlay_gateway, "monitor")
session_key = ET.SubElement(monitor, "session")
session_key.text = kwargs.pop('session')
direction = ET.SubElement(monitor, "direction")
direction.text = kwargs.pop('direction')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_monitor_remote_endpoint(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
monitor = ET.SubElement(overlay_gateway, "monitor")
session_key = ET.SubElement(monitor, "session")
session_key.text = kwargs.pop('session')
remote_endpoint = ET.SubElement(monitor, "remote-endpoint")
remote_endpoint.text = kwargs.pop('remote_endpoint')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_monitor_vlan_leaf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
monitor = ET.SubElement(overlay_gateway, "monitor")
session_key = ET.SubElement(monitor, "session")
session_key.text = kwargs.pop('session')
vlan_leaf = ET.SubElement(monitor, "vlan-leaf")
vlan_leaf.text = kwargs.pop('vlan_leaf')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_monitor_vlan_add_remove(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
monitor = ET.SubElement(overlay_gateway, "monitor")
session_key = ET.SubElement(monitor, "session")
session_key.text = kwargs.pop('session')
vlan_add_remove = ET.SubElement(monitor, "vlan-add-remove")
vlan_add_remove.text = kwargs.pop('vlan_add_remove')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_monitor_vlan_range(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
monitor = ET.SubElement(overlay_gateway, "monitor")
session_key = ET.SubElement(monitor, "session")
session_key.text = kwargs.pop('session')
vlan_range = ET.SubElement(monitor, "vlan-range")
vlan_range.text = kwargs.pop('vlan_range')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_sflow_sflow_profile_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
sflow = ET.SubElement(overlay_gateway, "sflow")
sflow_profile_name = ET.SubElement(sflow, "sflow-profile-name")
sflow_profile_name.text = kwargs.pop('sflow_profile_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_sflow_sflow_remote_endpoint(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
sflow = ET.SubElement(overlay_gateway, "sflow")
sflow_profile_name_key = ET.SubElement(sflow, "sflow-profile-name")
sflow_profile_name_key.text = kwargs.pop('sflow_profile_name')
sflow_remote_endpoint = ET.SubElement(sflow, "sflow-remote-endpoint")
sflow_remote_endpoint.text = kwargs.pop('sflow_remote_endpoint')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_sflow_sflow_vlan_action(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
sflow = ET.SubElement(overlay_gateway, "sflow")
sflow_profile_name_key = ET.SubElement(sflow, "sflow-profile-name")
sflow_profile_name_key.text = kwargs.pop('sflow_profile_name')
sflow_vlan_action = ET.SubElement(sflow, "sflow-vlan-action")
sflow_vlan_action.text = kwargs.pop('sflow_vlan_action')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_sflow_sflow_vlan_range(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
sflow = ET.SubElement(overlay_gateway, "sflow")
sflow_profile_name_key = ET.SubElement(sflow, "sflow-profile-name")
sflow_profile_name_key.text = kwargs.pop('sflow_profile_name')
sflow_vlan_range = ET.SubElement(sflow, "sflow-vlan-range")
sflow_vlan_range.text = kwargs.pop('sflow_vlan_range')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_access_lists_mac_in_cg_mac_acl_in_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
access_lists = ET.SubElement(overlay_gateway, "access-lists")
mac = ET.SubElement(access_lists, "mac")
in_cg = ET.SubElement(mac, "in")
mac_acl_in_name = ET.SubElement(in_cg, "mac-acl-in-name")
mac_acl_in_name.text = kwargs.pop('mac_acl_in_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_access_lists_mac_in_cg_mac_acl_in_dir(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
access_lists = ET.SubElement(overlay_gateway, "access-lists")
mac = ET.SubElement(access_lists, "mac")
in_cg = ET.SubElement(mac, "in")
mac_acl_in_dir = ET.SubElement(in_cg, "mac-acl-in-dir")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_access_lists_mac_out_mac_acl_out_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
access_lists = ET.SubElement(overlay_gateway, "access-lists")
mac = ET.SubElement(access_lists, "mac")
out = ET.SubElement(mac, "out")
mac_acl_out_name = ET.SubElement(out, "mac-acl-out-name")
mac_acl_out_name.text = kwargs.pop('mac_acl_out_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_access_lists_mac_out_mac_acl_out_dir(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
access_lists = ET.SubElement(overlay_gateway, "access-lists")
mac = ET.SubElement(access_lists, "mac")
out = ET.SubElement(mac, "out")
mac_acl_out_dir = ET.SubElement(out, "mac-acl-out-dir")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_access_lists_ipv4_in_cg_ipv4_acl_in_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
access_lists = ET.SubElement(overlay_gateway, "access-lists")
ipv4 = ET.SubElement(access_lists, "ipv4")
in_cg = ET.SubElement(ipv4, "in")
ipv4_acl_in_name = ET.SubElement(in_cg, "ipv4-acl-in-name")
ipv4_acl_in_name.text = kwargs.pop('ipv4_acl_in_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_access_lists_ipv4_in_cg_ipv4_acl_in_dir(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
access_lists = ET.SubElement(overlay_gateway, "access-lists")
ipv4 = ET.SubElement(access_lists, "ipv4")
in_cg = ET.SubElement(ipv4, "in")
ipv4_acl_in_dir = ET.SubElement(in_cg, "ipv4-acl-in-dir")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_access_lists_ipv4_out_ipv4_acl_out_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
access_lists = ET.SubElement(overlay_gateway, "access-lists")
ipv4 = ET.SubElement(access_lists, "ipv4")
out = ET.SubElement(ipv4, "out")
ipv4_acl_out_name = ET.SubElement(out, "ipv4-acl-out-name")
ipv4_acl_out_name.text = kwargs.pop('ipv4_acl_out_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_access_lists_ipv4_out_ipv4_acl_out_dir(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
access_lists = ET.SubElement(overlay_gateway, "access-lists")
ipv4 = ET.SubElement(access_lists, "ipv4")
out = ET.SubElement(ipv4, "out")
ipv4_acl_out_dir = ET.SubElement(out, "ipv4-acl-out-dir")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_access_lists_ipv6_in_cg_ipv6_acl_in_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
access_lists = ET.SubElement(overlay_gateway, "access-lists")
ipv6 = ET.SubElement(access_lists, "ipv6")
in_cg = ET.SubElement(ipv6, "in")
ipv6_acl_in_name = ET.SubElement(in_cg, "ipv6-acl-in-name")
ipv6_acl_in_name.text = kwargs.pop('ipv6_acl_in_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_access_lists_ipv6_in_cg_ipv6_acl_in_dir(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
access_lists = ET.SubElement(overlay_gateway, "access-lists")
ipv6 = ET.SubElement(access_lists, "ipv6")
in_cg = ET.SubElement(ipv6, "in")
ipv6_acl_in_dir = ET.SubElement(in_cg, "ipv6-acl-in-dir")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_access_lists_ipv6_out_ipv6_acl_out_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
access_lists = ET.SubElement(overlay_gateway, "access-lists")
ipv6 = ET.SubElement(access_lists, "ipv6")
out = ET.SubElement(ipv6, "out")
ipv6_acl_out_name = ET.SubElement(out, "ipv6-acl-out-name")
ipv6_acl_out_name.text = kwargs.pop('ipv6_acl_out_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_access_lists_ipv6_out_ipv6_acl_out_dir(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
access_lists = ET.SubElement(overlay_gateway, "access-lists")
ipv6 = ET.SubElement(access_lists, "ipv6")
out = ET.SubElement(ipv6, "out")
ipv6_acl_out_dir = ET.SubElement(out, "ipv6-acl-out-dir")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_activate(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
activate = ET.SubElement(overlay_gateway, "activate")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def nsx_controller_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
nsx_controller = ET.SubElement(config, "nsx-controller", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name = ET.SubElement(nsx_controller, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def nsx_controller_connection_addr_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
nsx_controller = ET.SubElement(config, "nsx-controller", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(nsx_controller, "name")
name_key.text = kwargs.pop('name')
connection_addr = ET.SubElement(nsx_controller, "connection-addr")
address = ET.SubElement(connection_addr, "address")
address.text = kwargs.pop('address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def nsx_controller_connection_addr_port(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
nsx_controller = ET.SubElement(config, "nsx-controller", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(nsx_controller, "name")
name_key.text = kwargs.pop('name')
connection_addr = ET.SubElement(nsx_controller, "connection-addr")
port = ET.SubElement(connection_addr, "port")
port.text = kwargs.pop('port')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def nsx_controller_connection_addr_method(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
nsx_controller = ET.SubElement(config, "nsx-controller", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(nsx_controller, "name")
name_key.text = kwargs.pop('name')
connection_addr = ET.SubElement(nsx_controller, "connection-addr")
method = ET.SubElement(connection_addr, "method")
method.text = kwargs.pop('method')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def nsx_controller_reconnect_interval(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
nsx_controller = ET.SubElement(config, "nsx-controller", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(nsx_controller, "name")
name_key.text = kwargs.pop('name')
reconnect_interval = ET.SubElement(nsx_controller, "reconnect-interval")
reconnect_interval.text = kwargs.pop('reconnect_interval')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def nsx_controller_activate(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
nsx_controller = ET.SubElement(config, "nsx-controller", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(nsx_controller, "name")
name_key.text = kwargs.pop('name')
activate = ET.SubElement(nsx_controller, "activate")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name = ET.SubElement(overlay_gateway, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_gw_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
gw_type = ET.SubElement(overlay_gateway, "gw-type")
gw_type.text = kwargs.pop('gw_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_ip_interface_ve_ve_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
ip = ET.SubElement(overlay_gateway, "ip")
interface = ET.SubElement(ip, "interface")
ve = ET.SubElement(interface, "ve")
ve_id = ET.SubElement(ve, "ve-id")
ve_id.text = kwargs.pop('ve_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_ip_interface_ve_vrrp_extended_group(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
ip = ET.SubElement(overlay_gateway, "ip")
interface = ET.SubElement(ip, "interface")
ve = ET.SubElement(interface, "ve")
vrrp_extended_group = ET.SubElement(ve, "vrrp-extended-group")
vrrp_extended_group.text = kwargs.pop('vrrp_extended_group')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_ip_interface_loopback_loopback_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
ip = ET.SubElement(overlay_gateway, "ip")
interface = ET.SubElement(ip, "interface")
loopback = ET.SubElement(interface, "loopback")
loopback_id = ET.SubElement(loopback, "loopback-id")
loopback_id.text = kwargs.pop('loopback_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_attach_rbridge_id_rb_add(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
attach = ET.SubElement(overlay_gateway, "attach")
rbridge_id = ET.SubElement(attach, "rbridge-id")
rb_add = ET.SubElement(rbridge_id, "rb-add")
rb_add.text = kwargs.pop('rb_add')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_attach_rbridge_id_rb_remove(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
attach = ET.SubElement(overlay_gateway, "attach")
rbridge_id = ET.SubElement(attach, "rbridge-id")
rb_remove = ET.SubElement(rbridge_id, "rb-remove")
rb_remove.text = kwargs.pop('rb_remove')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_attach_vlan_vid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
attach = ET.SubElement(overlay_gateway, "attach")
vlan = ET.SubElement(attach, "vlan")
mac_key = ET.SubElement(vlan, "mac")
mac_key.text = kwargs.pop('mac')
vid = ET.SubElement(vlan, "vid")
vid.text = kwargs.pop('vid')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_attach_vlan_mac(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
attach = ET.SubElement(overlay_gateway, "attach")
vlan = ET.SubElement(attach, "vlan")
vid_key = ET.SubElement(vlan, "vid")
vid_key.text = kwargs.pop('vid')
mac = ET.SubElement(vlan, "mac")
mac.text = kwargs.pop('mac')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_map_vlan_vni_mapping_vid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
map = ET.SubElement(overlay_gateway, "map")
vlan_vni_mapping = ET.SubElement(map, "vlan-vni-mapping")
vid = ET.SubElement(vlan_vni_mapping, "vid")
vid.text = kwargs.pop('vid')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_map_vlan_vni_mapping_vni(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
map = ET.SubElement(overlay_gateway, "map")
vlan_vni_mapping = ET.SubElement(map, "vlan-vni-mapping")
vid_key = ET.SubElement(vlan_vni_mapping, "vid")
vid_key.text = kwargs.pop('vid')
vni = ET.SubElement(vlan_vni_mapping, "vni")
vni.text = kwargs.pop('vni')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_map_vlan_vni_auto(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
map = ET.SubElement(overlay_gateway, "map")
vlan = ET.SubElement(map, "vlan")
vni = ET.SubElement(vlan, "vni")
auto = ET.SubElement(vni, "auto")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_site_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
site = ET.SubElement(overlay_gateway, "site")
name = ET.SubElement(site, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_site_tunnel_dst_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
site = ET.SubElement(overlay_gateway, "site")
name_key = ET.SubElement(site, "name")
name_key.text = kwargs.pop('name')
tunnel_dst = ET.SubElement(site, "tunnel-dst")
address = ET.SubElement(tunnel_dst, "address")
address.text = kwargs.pop('address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_site_extend_vlan_add(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
site = ET.SubElement(overlay_gateway, "site")
name_key = ET.SubElement(site, "name")
name_key.text = kwargs.pop('name')
extend = ET.SubElement(site, "extend")
vlan = ET.SubElement(extend, "vlan")
add = ET.SubElement(vlan, "add")
add.text = kwargs.pop('add')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_site_extend_vlan_remove(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
site = ET.SubElement(overlay_gateway, "site")
name_key = ET.SubElement(site, "name")
name_key.text = kwargs.pop('name')
extend = ET.SubElement(site, "extend")
vlan = ET.SubElement(extend, "vlan")
remove = ET.SubElement(vlan, "remove")
remove.text = kwargs.pop('remove')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_site_shutdown(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
site = ET.SubElement(overlay_gateway, "site")
name_key = ET.SubElement(site, "name")
name_key.text = kwargs.pop('name')
shutdown = ET.SubElement(site, "shutdown")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_enable_statistics_stats_direction(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
enable = ET.SubElement(overlay_gateway, "enable")
statistics = ET.SubElement(enable, "statistics")
stats_direction = ET.SubElement(statistics, "stats-direction")
stats_direction.text = kwargs.pop('stats_direction')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_enable_statistics_vlan_action(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
enable = ET.SubElement(overlay_gateway, "enable")
statistics = ET.SubElement(enable, "statistics")
vlan_action = ET.SubElement(statistics, "vlan-action")
vlan_action.text = kwargs.pop('vlan_action')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_enable_statistics_vlan_list(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
enable = ET.SubElement(overlay_gateway, "enable")
statistics = ET.SubElement(enable, "statistics")
vlan_list = ET.SubElement(statistics, "vlan-list")
vlan_list.text = kwargs.pop('vlan_list')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_monitor_session(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
monitor = ET.SubElement(overlay_gateway, "monitor")
session = ET.SubElement(monitor, "session")
session.text = kwargs.pop('session')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_monitor_direction(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
monitor = ET.SubElement(overlay_gateway, "monitor")
session_key = ET.SubElement(monitor, "session")
session_key.text = kwargs.pop('session')
direction = ET.SubElement(monitor, "direction")
direction.text = kwargs.pop('direction')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_monitor_remote_endpoint(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
monitor = ET.SubElement(overlay_gateway, "monitor")
session_key = ET.SubElement(monitor, "session")
session_key.text = kwargs.pop('session')
remote_endpoint = ET.SubElement(monitor, "remote-endpoint")
remote_endpoint.text = kwargs.pop('remote_endpoint')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_monitor_vlan_leaf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
monitor = ET.SubElement(overlay_gateway, "monitor")
session_key = ET.SubElement(monitor, "session")
session_key.text = kwargs.pop('session')
vlan_leaf = ET.SubElement(monitor, "vlan-leaf")
vlan_leaf.text = kwargs.pop('vlan_leaf')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_monitor_vlan_add_remove(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
monitor = ET.SubElement(overlay_gateway, "monitor")
session_key = ET.SubElement(monitor, "session")
session_key.text = kwargs.pop('session')
vlan_add_remove = ET.SubElement(monitor, "vlan-add-remove")
vlan_add_remove.text = kwargs.pop('vlan_add_remove')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_monitor_vlan_range(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
monitor = ET.SubElement(overlay_gateway, "monitor")
session_key = ET.SubElement(monitor, "session")
session_key.text = kwargs.pop('session')
vlan_range = ET.SubElement(monitor, "vlan-range")
vlan_range.text = kwargs.pop('vlan_range')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_sflow_sflow_profile_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
sflow = ET.SubElement(overlay_gateway, "sflow")
sflow_profile_name = ET.SubElement(sflow, "sflow-profile-name")
sflow_profile_name.text = kwargs.pop('sflow_profile_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_sflow_sflow_remote_endpoint(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
sflow = ET.SubElement(overlay_gateway, "sflow")
sflow_profile_name_key = ET.SubElement(sflow, "sflow-profile-name")
sflow_profile_name_key.text = kwargs.pop('sflow_profile_name')
sflow_remote_endpoint = ET.SubElement(sflow, "sflow-remote-endpoint")
sflow_remote_endpoint.text = kwargs.pop('sflow_remote_endpoint')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_sflow_sflow_vlan_action(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
sflow = ET.SubElement(overlay_gateway, "sflow")
sflow_profile_name_key = ET.SubElement(sflow, "sflow-profile-name")
sflow_profile_name_key.text = kwargs.pop('sflow_profile_name')
sflow_vlan_action = ET.SubElement(sflow, "sflow-vlan-action")
sflow_vlan_action.text = kwargs.pop('sflow_vlan_action')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_sflow_sflow_vlan_range(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
sflow = ET.SubElement(overlay_gateway, "sflow")
sflow_profile_name_key = ET.SubElement(sflow, "sflow-profile-name")
sflow_profile_name_key.text = kwargs.pop('sflow_profile_name')
sflow_vlan_range = ET.SubElement(sflow, "sflow-vlan-range")
sflow_vlan_range.text = kwargs.pop('sflow_vlan_range')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_access_lists_mac_in_cg_mac_acl_in_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
access_lists = ET.SubElement(overlay_gateway, "access-lists")
mac = ET.SubElement(access_lists, "mac")
in_cg = ET.SubElement(mac, "in")
mac_acl_in_name = ET.SubElement(in_cg, "mac-acl-in-name")
mac_acl_in_name.text = kwargs.pop('mac_acl_in_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_access_lists_mac_in_cg_mac_acl_in_dir(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
access_lists = ET.SubElement(overlay_gateway, "access-lists")
mac = ET.SubElement(access_lists, "mac")
in_cg = ET.SubElement(mac, "in")
mac_acl_in_dir = ET.SubElement(in_cg, "mac-acl-in-dir")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_access_lists_mac_out_mac_acl_out_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
access_lists = ET.SubElement(overlay_gateway, "access-lists")
mac = ET.SubElement(access_lists, "mac")
out = ET.SubElement(mac, "out")
mac_acl_out_name = ET.SubElement(out, "mac-acl-out-name")
mac_acl_out_name.text = kwargs.pop('mac_acl_out_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_access_lists_mac_out_mac_acl_out_dir(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
access_lists = ET.SubElement(overlay_gateway, "access-lists")
mac = ET.SubElement(access_lists, "mac")
out = ET.SubElement(mac, "out")
mac_acl_out_dir = ET.SubElement(out, "mac-acl-out-dir")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_access_lists_ipv4_in_cg_ipv4_acl_in_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
access_lists = ET.SubElement(overlay_gateway, "access-lists")
ipv4 = ET.SubElement(access_lists, "ipv4")
in_cg = ET.SubElement(ipv4, "in")
ipv4_acl_in_name = ET.SubElement(in_cg, "ipv4-acl-in-name")
ipv4_acl_in_name.text = kwargs.pop('ipv4_acl_in_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_access_lists_ipv4_in_cg_ipv4_acl_in_dir(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
access_lists = ET.SubElement(overlay_gateway, "access-lists")
ipv4 = ET.SubElement(access_lists, "ipv4")
in_cg = ET.SubElement(ipv4, "in")
ipv4_acl_in_dir = ET.SubElement(in_cg, "ipv4-acl-in-dir")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_access_lists_ipv4_out_ipv4_acl_out_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
access_lists = ET.SubElement(overlay_gateway, "access-lists")
ipv4 = ET.SubElement(access_lists, "ipv4")
out = ET.SubElement(ipv4, "out")
ipv4_acl_out_name = ET.SubElement(out, "ipv4-acl-out-name")
ipv4_acl_out_name.text = kwargs.pop('ipv4_acl_out_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_access_lists_ipv4_out_ipv4_acl_out_dir(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
access_lists = ET.SubElement(overlay_gateway, "access-lists")
ipv4 = ET.SubElement(access_lists, "ipv4")
out = ET.SubElement(ipv4, "out")
ipv4_acl_out_dir = ET.SubElement(out, "ipv4-acl-out-dir")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_access_lists_ipv6_in_cg_ipv6_acl_in_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
access_lists = ET.SubElement(overlay_gateway, "access-lists")
ipv6 = ET.SubElement(access_lists, "ipv6")
in_cg = ET.SubElement(ipv6, "in")
ipv6_acl_in_name = ET.SubElement(in_cg, "ipv6-acl-in-name")
ipv6_acl_in_name.text = kwargs.pop('ipv6_acl_in_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_access_lists_ipv6_in_cg_ipv6_acl_in_dir(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
access_lists = ET.SubElement(overlay_gateway, "access-lists")
ipv6 = ET.SubElement(access_lists, "ipv6")
in_cg = ET.SubElement(ipv6, "in")
ipv6_acl_in_dir = ET.SubElement(in_cg, "ipv6-acl-in-dir")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_access_lists_ipv6_out_ipv6_acl_out_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
access_lists = ET.SubElement(overlay_gateway, "access-lists")
ipv6 = ET.SubElement(access_lists, "ipv6")
out = ET.SubElement(ipv6, "out")
ipv6_acl_out_name = ET.SubElement(out, "ipv6-acl-out-name")
ipv6_acl_out_name.text = kwargs.pop('ipv6_acl_out_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_access_lists_ipv6_out_ipv6_acl_out_dir(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
access_lists = ET.SubElement(overlay_gateway, "access-lists")
ipv6 = ET.SubElement(access_lists, "ipv6")
out = ET.SubElement(ipv6, "out")
ipv6_acl_out_dir = ET.SubElement(out, "ipv6-acl-out-dir")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def overlay_gateway_activate(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
activate = ET.SubElement(overlay_gateway, "activate")
callback = kwargs.pop('callback', self._callback)
return callback(config)
| |
"""Test the Blink config flow."""
from blinkpy.auth import LoginError
from blinkpy.blinkpy import BlinkSetupError
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.components.blink import DOMAIN
from tests.async_mock import Mock, patch
from tests.common import MockConfigEntry
async def test_form(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch("homeassistant.components.blink.config_flow.Auth.startup"), patch(
"homeassistant.components.blink.config_flow.Auth.check_key_required",
return_value=False,
), patch(
"homeassistant.components.blink.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.blink.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"username": "blink@example.com", "password": "example"},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "blink"
assert result2["result"].unique_id == "blink@example.com"
assert result2["data"] == {
"username": "blink@example.com",
"password": "example",
"device_id": "Home Assistant",
"token": None,
"host": None,
"account_id": None,
"client_id": None,
"region_id": None,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_2fa(hass):
"""Test we get the 2fa form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("homeassistant.components.blink.config_flow.Auth.startup"), patch(
"homeassistant.components.blink.config_flow.Auth.check_key_required",
return_value=True,
), patch(
"homeassistant.components.blink.async_setup", return_value=True
) as mock_setup:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"username": "blink@example.com", "password": "example"},
)
assert result2["type"] == "form"
assert result2["step_id"] == "2fa"
with patch("homeassistant.components.blink.config_flow.Auth.startup"), patch(
"homeassistant.components.blink.config_flow.Auth.check_key_required",
return_value=False,
), patch(
"homeassistant.components.blink.config_flow.Auth.send_auth_key",
return_value=True,
), patch(
"homeassistant.components.blink.config_flow.Blink.setup_urls",
return_value=True,
), patch(
"homeassistant.components.blink.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.blink.async_setup_entry", return_value=True
) as mock_setup_entry:
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"], {"pin": "1234"}
)
await hass.async_block_till_done()
assert result3["type"] == "create_entry"
assert result3["title"] == "blink"
assert result3["result"].unique_id == "blink@example.com"
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_2fa_connect_error(hass):
"""Test we report a connect error during 2fa setup."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("homeassistant.components.blink.config_flow.Auth.startup"), patch(
"homeassistant.components.blink.config_flow.Auth.check_key_required",
return_value=True,
), patch("homeassistant.components.blink.async_setup", return_value=True):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"username": "blink@example.com", "password": "example"},
)
assert result2["type"] == "form"
assert result2["step_id"] == "2fa"
with patch("homeassistant.components.blink.config_flow.Auth.startup"), patch(
"homeassistant.components.blink.config_flow.Auth.check_key_required",
return_value=False,
), patch(
"homeassistant.components.blink.config_flow.Auth.send_auth_key",
return_value=True,
), patch(
"homeassistant.components.blink.config_flow.Blink.setup_urls",
side_effect=BlinkSetupError,
), patch(
"homeassistant.components.blink.async_setup", return_value=True
), patch(
"homeassistant.components.blink.async_setup_entry", return_value=True
):
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"], {"pin": "1234"}
)
assert result3["type"] == "form"
assert result3["errors"] == {"base": "cannot_connect"}
async def test_form_2fa_invalid_key(hass):
"""Test we report an error if key is invalid."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("homeassistant.components.blink.config_flow.Auth.startup"), patch(
"homeassistant.components.blink.config_flow.Auth.check_key_required",
return_value=True,
), patch("homeassistant.components.blink.async_setup", return_value=True):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"username": "blink@example.com", "password": "example"},
)
assert result2["type"] == "form"
assert result2["step_id"] == "2fa"
with patch("homeassistant.components.blink.config_flow.Auth.startup",), patch(
"homeassistant.components.blink.config_flow.Auth.check_key_required",
return_value=False,
), patch(
"homeassistant.components.blink.config_flow.Auth.send_auth_key",
return_value=False,
), patch(
"homeassistant.components.blink.config_flow.Blink.setup_urls",
return_value=True,
), patch(
"homeassistant.components.blink.async_setup", return_value=True
), patch(
"homeassistant.components.blink.async_setup_entry", return_value=True
):
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"], {"pin": "1234"}
)
assert result3["type"] == "form"
assert result3["errors"] == {"base": "invalid_access_token"}
async def test_form_2fa_unknown_error(hass):
"""Test we report an unknown error during 2fa setup."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("homeassistant.components.blink.config_flow.Auth.startup"), patch(
"homeassistant.components.blink.config_flow.Auth.check_key_required",
return_value=True,
), patch("homeassistant.components.blink.async_setup", return_value=True):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"username": "blink@example.com", "password": "example"},
)
assert result2["type"] == "form"
assert result2["step_id"] == "2fa"
with patch("homeassistant.components.blink.config_flow.Auth.startup"), patch(
"homeassistant.components.blink.config_flow.Auth.check_key_required",
return_value=False,
), patch(
"homeassistant.components.blink.config_flow.Auth.send_auth_key",
return_value=True,
), patch(
"homeassistant.components.blink.config_flow.Blink.setup_urls",
side_effect=KeyError,
), patch(
"homeassistant.components.blink.async_setup", return_value=True
), patch(
"homeassistant.components.blink.async_setup_entry", return_value=True
):
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"], {"pin": "1234"}
)
assert result3["type"] == "form"
assert result3["errors"] == {"base": "unknown"}
async def test_form_invalid_auth(hass):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.blink.config_flow.Auth.startup",
side_effect=LoginError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {"username": "blink@example.com", "password": "example"}
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_unknown_error(hass):
"""Test we handle unknown error at startup."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.blink.config_flow.Auth.startup",
side_effect=KeyError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {"username": "blink@example.com", "password": "example"}
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
async def test_reauth_shows_user_step(hass):
"""Test reauth shows the user form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "reauth"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
async def test_options_flow(hass):
"""Test config flow options."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={"username": "blink@example.com", "password": "example"},
options={},
entry_id=1,
version=2,
)
config_entry.add_to_hass(hass)
mock_auth = Mock(
startup=Mock(return_value=True), check_key_required=Mock(return_value=False)
)
mock_blink = Mock()
with patch("homeassistant.components.blink.Auth", return_value=mock_auth), patch(
"homeassistant.components.blink.Blink", return_value=mock_blink
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(
config_entry.entry_id, context={"show_advanced_options": False}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "simple_options"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={"scan_interval": 5},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {"scan_interval": 5}
assert mock_blink.refresh_rate == 5
| |
# favorite.py
from error import PostgresError, WeasylError
import macro as m
import define as d
import welcome
import frienduser
import ignoreuser
import collection
from weasyl import media
def select_submit_query(userid, rating, otherid=None, backid=None, nextid=None, config=None):
if config is None:
config = d.get_config(userid)
statement = [
" FROM favorite fa INNER JOIN"
" submission su ON fa.targetid = su.submitid"
" INNER JOIN profile pr ON su.userid = pr.userid"
" WHERE fa.type = 's' AND su.settings !~ 'h'"]
if userid:
# filter own content in SFW mode
if d.is_sfw_mode():
statement.append(" AND (su.rating <= %i)" % (rating,))
else:
statement.append(" AND (su.userid = %i OR su.rating <= %i)" % (userid, rating))
statement.append(m.MACRO_IGNOREUSER % (userid, "su"))
statement.append(m.MACRO_BLOCKTAG_SUBMIT % (userid, userid))
statement.append(m.MACRO_FRIENDUSER_SUBMIT % (userid, userid, userid))
else:
statement.append(" AND su.rating <= %i" % (rating,))
statement.append(" AND su.settings !~ 'f'")
if otherid:
statement.append(" AND fa.userid = %i" % otherid)
if backid:
statement.append(" AND fa.unixtime > "
"(SELECT unixtime FROM favorite WHERE (userid, targetid, type) = (%i, %i, 's'))"
% (otherid, backid))
elif nextid:
statement.append(" AND fa.unixtime < "
"(SELECT unixtime FROM favorite WHERE (userid, targetid, type) = (%i, %i, 's'))"
% (otherid, nextid))
return statement
def select_submit_count(userid, rating, otherid=None, backid=None, nextid=None, config=None):
statement = ["SELECT COUNT(submitid) "]
statement.extend(select_submit_query(userid, rating, otherid, backid, nextid, config))
return d.execute("".join(statement))[0][0]
def select_submit(userid, rating, limit, otherid=None, backid=None, nextid=None, config=None):
statement = ["SELECT su.submitid, su.title, su.rating, fa.unixtime, su.userid, pr.username, su.subtype"]
statement.extend(select_submit_query(userid, rating, otherid, backid, nextid, config))
statement.append(" ORDER BY fa.unixtime%s LIMIT %i" % ("" if backid else " DESC", limit))
query = [{
"contype": 10,
"submitid": i[0],
"title": i[1],
"rating": i[2],
"unixtime": i[3],
"userid": i[4],
"username": i[5],
"subtype": i[6],
} for i in d.execute("".join(statement))]
media.populate_with_submission_media(query)
return query[::-1] if backid else query
def select_char(userid, rating, limit, otherid=None, backid=None, nextid=None, config=None):
if config is None:
config = d.get_config(userid)
query = []
statement = ["""
SELECT ch.charid, ch.char_name, ch.rating, fa.unixtime, ch.userid, pr.username, ch.settings
FROM favorite fa
INNER JOIN character ch ON fa.targetid = ch.charid
INNER JOIN profile pr ON ch.userid = pr.userid
WHERE fa.type = 'f'
AND ch.settings !~ 'h'
"""]
if userid:
# filter own content in SFW mode
if d.is_sfw_mode():
statement.append(" AND (ch.rating <= %i)" % (rating,))
else:
statement.append(" AND (ch.userid = %i OR ch.rating <= %i)" % (userid, rating))
statement.append(m.MACRO_FRIENDUSER_CHARACTER % (userid, userid, userid))
statement.append(m.MACRO_IGNOREUSER % (userid, "ch"))
statement.append(m.MACRO_BLOCKTAG_CHAR % (userid, userid))
else:
statement.append(" AND ch.rating <= %i AND ch.settings !~ 'f'" % (rating,))
if otherid:
statement.append(" AND fa.userid = %i" % (otherid,))
if backid:
statement.append(" AND fa.unixtime > "
"(SELECT unixtime FROM favorite WHERE (userid, targetid, type) = (%i, %i, 'f'))"
% (otherid, backid))
elif nextid:
statement.append(" AND fa.unixtime < "
"(SELECT unixtime FROM favorite WHERE (userid, targetid, type) = (%i, %i, 'f'))"
% (otherid, nextid))
statement.append(" ORDER BY fa.unixtime%s LIMIT %i" % ("" if backid else " DESC", limit))
from weasyl import character
query = [{
"contype": 20,
"charid": i[0],
"title": i[1],
"rating": i[2],
"unixtime": i[3],
"userid": i[4],
"username": i[5],
"sub_media": character.fake_media_items(i[0], i[4], d.get_sysname(i[5]), i[6]),
} for i in d.execute("".join(statement))]
return query[::-1] if backid else query
def select_journal(userid, rating, limit, otherid=None, backid=None, nextid=None, config=None):
if config is None:
config = d.get_config(userid)
query = []
statement = ["""
SELECT jo.journalid, jo.title, jo.rating, fa.unixtime, jo.userid, pr.username, pr.config
FROM favorite fa
INNER JOIN journal jo ON fa.targetid = jo.journalid
INNER JOIN profile pr ON jo.userid = pr.userid
WHERE fa.type = 'j'
AND jo.settings !~ 'h'
"""]
if userid:
# filter own content in SFW mode
if d.is_sfw_mode():
statement.append(" AND (jo.rating <= %i)" % (rating,))
else:
statement.append(" AND (jo.userid = %i OR jo.rating <= %i)" % (userid, rating))
statement.append(m.MACRO_FRIENDUSER_JOURNAL % (userid, userid, userid))
statement.append(m.MACRO_IGNOREUSER % (userid, "jo"))
statement.append(m.MACRO_BLOCKTAG_JOURNAL % (userid, userid))
else:
statement.append(" AND jo.rating <= %i AND jo.settings !~ 'f'" % (rating,))
if otherid:
statement.append(" AND fa.userid = %i" % (otherid,))
if backid:
statement.append(" AND fa.unixtime > "
"(SELECT unixtime FROM favorite WHERE (userid, targetid, type) = (%i, %i, 'j'))"
% (otherid, backid))
elif nextid:
statement.append(" AND fa.unixtime < "
"(SELECT unixtime FROM favorite WHERE (userid, targetid, type) = (%i, %i, 'j'))"
% (otherid, nextid))
statement.append(" ORDER BY fa.unixtime%s LIMIT %i" % ("" if backid else " DESC", limit))
query = [{
"contype": 30,
"journalid": i[0],
"title": i[1],
"rating": i[2],
"unixtime": i[3],
"userid": i[4],
"username": i[5],
} for i in d.execute("".join(statement))]
media.populate_with_user_media(query)
return query[::-1] if backid else query
def insert(userid, submitid=None, charid=None, journalid=None):
if submitid:
content_table, id_field, target = "submission", "submitid", submitid
elif charid:
content_table, id_field, target = "character", "charid", charid
else:
content_table, id_field, target = "journal", "journalid", journalid
query = d.execute("SELECT userid, settings FROM %s WHERE %s = %i",
[content_table, id_field, target], options="single")
if not query:
raise WeasylError("TargetRecordMissing")
elif userid == query[0]:
raise WeasylError("CannotSelfFavorite")
elif "f" in query[1] and not frienduser.check(userid, query[0]):
raise WeasylError("FriendsOnly")
elif ignoreuser.check(userid, query[0]):
raise WeasylError("YouIgnored")
elif ignoreuser.check(query[0], userid):
raise WeasylError("contentOwnerIgnoredYou")
try:
d.execute("INSERT INTO favorite VALUES (%i, %i, '%s', %i)", [
userid, d.get_targetid(submitid, charid, journalid),
"s" if submitid else "f" if charid else "j", d.get_time()
])
except PostgresError:
raise WeasylError("favoriteRecordExists")
# create a list of users to notify
notified = set(collection.find_owners(submitid))
# conditions under which "other" should be notified
def can_notify(other):
other_jsonb = d.get_profile_settings(other)
allow_notify = other_jsonb.allow_collection_notifs
not_ignored = not ignoreuser.check(other, userid)
return allow_notify and not_ignored
notified = set(filter(can_notify, notified))
# always notify for own content
notified.add(query[0])
for other in notified:
welcome.favorite_insert(userid, submitid=submitid, charid=charid, journalid=journalid, otherid=other)
def remove(userid, submitid=None, charid=None, journalid=None):
d.execute("DELETE FROM favorite WHERE (userid, targetid, type) = (%i, %i, '%s')",
[userid, d.get_targetid(submitid, charid, journalid), "s" if submitid else "f" if charid else "j"])
welcome.favorite_remove(userid, submitid=submitid, charid=charid, journalid=journalid)
def check(userid, submitid=None, charid=None, journalid=None):
if not userid:
return False
return d.execute(
"""
SELECT EXISTS (
SELECT 0 FROM favorite
WHERE (userid, targetid, type) = (%i, %i, '%s')
)
""", [
userid, d.get_targetid(submitid, charid, journalid),
"s" if submitid else "f" if charid else "j"
], options="bool")
def count(id, contenttype='submission'):
"""Fetches the count of favorites on some content.
Args:
id (int): ID of the content to get the count for.
contenttype (str): Type of content to fetch. It accepts one of the following:
submission, journal, or character
Returns:
An int with the number of favorites.
"""
if contenttype == 'submission':
querytype = 's'
elif contenttype == 'journal':
querytype = 'j'
elif contenttype == 'character':
querytype = 'f'
else:
raise ValueError("type should be one of 'submission', 'journal', or 'character'")
return d.engine.execute(
"SELECT COUNT(*) FROM favorite WHERE targetid = %s AND type = %s",
[id, querytype]).scalar()
| |
# -*- coding: utf-8 -*-
import simplejson as json
import hashlib
import hmac
import logging
from dropbox.client import DropboxClient
from dropbox.client import DropboxOAuth2Flow
from flask import request
from flask import session
from flask import redirect
from flask import url_for
from flask import abort
from flask import render_template
from flask import flash
from flask import jsonify
import stripe
from validate_email import validate_email
from app import analytics
from app import app
from app import csrf
from app import db
from app.decorators import login_required_ajax
from app.kindleboxer import kindlebox
from app.kindleboxer import upload_welcome_pdf
from app.models import User
from app.models import KindleName
logging.basicConfig()
log = logging.getLogger()
log.setLevel(logging.INFO)
log.info("Starting app log...")
DEBUG = app.config.get('DEBUG', False)
DROPBOX_APP_KEY = app.config.get('DROPBOX_APP_KEY', '')
DROPBOX_APP_SECRET = app.config.get('DROPBOX_APP_SECRET', '')
KINDLE_DOMAINS = ('kindle.com', 'kindle.cn')
stripe.api_key = app.config.get('STRIPE_API_KEY', '')
@app.errorhandler(404)
def page_not_found(e):
return render_kindlebox_template('error.html'), 404
@app.errorhandler(400)
def error(e):
return render_kindlebox_template('error.html'), 400
@app.route('/start')
def splash():
return render_kindlebox_template('splash.html',
donate=request.args.get('donate'))
@app.route('/about')
def about():
return render_kindlebox_template('about.html')
@app.route('/')
def home():
dropbox_id = session.get('dropbox_id')
if dropbox_id is None and not request.args.get('redirect'):
return redirect(url_for('splash', **request.args))
if request.MOBILE:
return render_kindlebox_template('mobile/index.html')
# Use a blank user if no one's logged in.
user = User.query.filter_by(dropbox_id=dropbox_id).first()
if user is None:
user = User(dropbox_id)
response = {
'added_bookmarklet': user.added_bookmarklet,
'active': user.active,
'emailer': user.emailer,
'app_url': app.config['APP_URL'],
'mobile': request.MOBILE,
'donate': request.args.get('donate')
}
return render_kindlebox_template('index.html', **response)
@app.route('/added-bookmarklet', methods=['POST'])
@login_required_ajax
def added_bookmarklet(user):
user.set_added_bookmarklet()
db.session.commit()
return jsonify({
'success': True,
})
def _logout():
session.pop('dropbox_id', None)
@app.route('/login')
def login():
# _logout()
return redirect(get_auth_flow().start())
@app.route('/logout')
def logout():
_logout()
return redirect(url_for('home', redirect=True))
def free_kindle_name(kindle_name):
n = kindle_name.split('@')
n[1] = 'free.' + n[1]
return '@'.join(n)
def validate_kindle_name(kindle_name):
# Check for duplicates? Might end up blocking real users...
kindle_name = kindle_name.lower()
if kindle_name.endswith('@free.kindle.com'):
kindle_name = kindle_name[:-len('@free.kindle.com')] + '@kindle.com'
if (any(kindle_name.endswith(kindle_domain) for kindle_domain in
KINDLE_DOMAINS) and
validate_email(kindle_name)):
return free_kindle_name(kindle_name)
return None
@app.route('/activate', methods=['POST'])
def activate():
dropbox_id = session.get('dropbox_id')
if dropbox_id is None:
log.warn("Error activating, user with dropbox id {0} wasn't logged "
"in".format(dropbox_id))
abort(400)
user = User.query.filter_by(dropbox_id=dropbox_id).first()
if user is None:
log.warn("Error activating, user with dropbox id {0} doesn't "
"exist".format(dropbox_id))
abort(400)
if not user.active:
if 'kindle_names' not in request.form:
log.warn("Error activating, user with dropbox id {0} submitted "
"no kindle names".format(dropbox_id))
abort(400)
# Add all the Kindle usernames.
form_kindle_names = request.form.get('kindle_names')
try:
kindle_names = json.loads(form_kindle_names)
except json.JSONDecodeError:
log.warn("Error activating, user with dropbox id {dropbox_id} "
"submitted invalid kindle names "
"{kindle_names}".format(dropbox_id=dropbox_id,
kindle_names=form_kindle_names))
abort(400)
if type(kindle_names) != list:
log.warn("Error activating, user with dropbox id {dropbox_id} did "
"not submit list of kindle names "
"{kindle_names}".format(dropbox_id=dropbox_id,
kindle_names=kindle_names))
abort(400)
log.info("User with dropbox id {dropbox_id} submitting list of kindle "
"names {kindle_names}".format(dropbox_id=dropbox_id,
kindle_names=kindle_names))
for kindle_name in kindle_names:
kindle_name = validate_kindle_name(kindle_name)
if kindle_name is None:
continue
kindle_name_row = KindleName(user.id, kindle_name)
db.session.add(kindle_name_row)
db.session.flush()
# TODO: Return an error to the client
if user.kindle_names.first() is None:
log.warn("Error activating, user with dropbox id {0} submitted "
"invalid kindle names".format(dropbox_id))
abort(400)
user.set_active(True)
db.session.commit()
try:
upload_welcome_pdf.delay(user.dropbox_id)
except:
log.error("Unable to add upload welcome PDF task for dropbox id "
"{0}".format(user.dropbox_id), exc_info=True)
pass
analytics.track(str(user.id), 'Activated account')
return redirect(url_for('home'))
@app.route('/deactivate', methods=['POST'])
@login_required_ajax
def deactivate(user):
if user.active:
user.kindle_names.delete()
user.set_active(False)
db.session.commit()
analytics.track(str(user.id), 'Deactivated account')
return jsonify({
'success': True,
})
@app.route('/dropbox-auth-finish')
def dropbox_auth_finish():
"""
Finish Dropbox auth. If successful, user is now logged in. If the dropbox
ID is new, register a new user.
"""
try:
access_token, dropbox_id, url_state = (get_auth_flow().
finish(request.args))
except DropboxOAuth2Flow.BadRequestException, e:
abort(400)
except DropboxOAuth2Flow.BadStateException, e:
abort(400)
except DropboxOAuth2Flow.CsrfException, e:
abort(403)
except DropboxOAuth2Flow.NotApprovedException, e:
flash('Not approved? Why not, bro?')
return redirect(url_for('home'))
except DropboxOAuth2Flow.ProviderException, e:
app.logger.exception("Auth error" + e)
abort(403)
if dropbox_id is None:
return redirect(url_for('home'))
user = User.query.filter_by(dropbox_id=dropbox_id).first()
new_user = user is None
if user is None:
user = User(dropbox_id)
user.set_new_emailer()
db.session.add(user)
user.access_token = access_token
(user.name, user.email) = get_dropbox_name_email(access_token)
db.session.commit()
if new_user:
analytics.track(str(user.id), 'Registered')
analytics.track(str(user.id), 'Logged in')
session['dropbox_id'] = user.dropbox_id
return redirect(url_for('home'))
@app.route('/dropbox-unlink')
def dropbox_unlink():
dropbox_id = session.get('dropbox_id')
if dropbox_id is None:
abort(403)
user = User.query.filter_by(dropbox_id=dropbox_id).first()
for attribute in ['active', 'access_token', 'cursor']:
setattr(user, attribute, None)
db.session.commit()
_logout()
return redirect(url_for('home'))
@csrf.exempt
@app.route('/dropbox-webhook', methods=['GET', 'POST'])
def verify():
if request.method != 'POST':
return request.args.get('challenge', '')
signature = request.headers.get('X-Dropbox-Signature')
if signature != hmac.new(DROPBOX_APP_SECRET, request.data,
hashlib.sha256).hexdigest():
abort(403)
for dropbox_id in json.loads(request.data)['delta']['users']:
kindlebox.delay(dropbox_id)
return ''
@app.route('/donate', methods=['POST'])
def donate():
"""
Handle donation request with Stripe. POST request must include stripe
token, an amont, and optionally an email address.
"""
token = request.form.get('stripeToken')
if not token:
return jsonify({
'success': False,
'message': "Need Stripe token",
})
amount = request.form.get('amount')
try:
amount = int(100 * float(amount))
except:
return jsonify({
'success': False,
'message': "Invalid donation amount.",
})
email_address = request.form.get('emailAddress')
try:
charge = stripe.Charge.create(
amount=amount,
currency="usd",
source=token,
description=email_address,
receipt_email=email_address,
)
return jsonify({
'success': True,
'message': "",
})
except stripe.CardError, e:
return jsonify({
'success': False,
'message': "Your card has been declined.",
})
return jsonify({
'success': False,
'message': 'Something wonky happened.',
})
def get_auth_flow():
if DEBUG:
redirect_uri = url_for('dropbox_auth_finish', _external=True)
else:
redirect_uri = url_for('dropbox_auth_finish', _external=True,
_scheme="https")
return DropboxOAuth2Flow(DROPBOX_APP_KEY, DROPBOX_APP_SECRET, redirect_uri,
session, 'dropbox-auth-csrf-token')
def get_dropbox_name_email(access_token):
client = DropboxClient(access_token)
meta = client.account_info()
return (meta.get('display_name', '').split(' ')[0], meta.get('email'))
def get_logged_in_info():
logged_in_info = {
'logged_in': False,
'name': '',
}
dropbox_id = session.get('dropbox_id')
if dropbox_id is not None:
user = User.query.filter_by(dropbox_id=dropbox_id).first()
if user is not None:
logged_in_info['logged_in'] = True
logged_in_info['user_id'] = user.id
logged_in_info['name'] = user.name
return logged_in_info
def render_kindlebox_template(template, **args):
args['dev'] = app.config.get('DEV', False)
args['STRIPE_PUBLIC_KEY'] = app.config.get('STRIPE_PUBLIC_KEY', '')
args['show_donations_modal'] = args.get('donate')
args.update(get_logged_in_info())
return render_template(template, **args)
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import itertools
import six
from six.moves import range
from heat.common import exception
from heat.common import grouputils
from heat.common.i18n import _
from heat.common import timeutils
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources import stack_resource
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.engine import support
from heat.engine import template
template_template = {
"heat_template_version": "2015-04-30",
"resources": {}
}
class ResourceGroup(stack_resource.StackResource):
"""
A resource that creates one or more identically configured nested
resources.
In addition to the `refs` attribute, this resource implements synthetic
attributes that mirror those of the resources in the group. When
getting an attribute from this resource, however, a list of attribute
values for each resource in the group is returned. To get attribute values
for a single resource in the group, synthetic attributes of the form
`resource.{resource index}.{attribute name}` can be used. The resource ID
of a particular resource in the group can be obtained via the synthetic
attribute `resource.{resource index}`.
While each resource in the group will be identically configured, this
resource does allow for some index-based customization of the properties
of the resources in the group. For example::
resources:
my_indexed_group:
type: OS::Heat::ResourceGroup
properties:
count: 3
resource_def:
type: OS::Nova::Server
properties:
# create a unique name for each server
# using its index in the group
name: my_server_%index%
image: CentOS 6.5
flavor: 4GB Performance
would result in a group of three servers having the same image and flavor,
but names of `my_server_0`, `my_server_1`, and `my_server_2`. The variable
used for substitution can be customized by using the `index_var` property.
"""
support_status = support.SupportStatus(version='2014.1')
PROPERTIES = (
COUNT, INDEX_VAR, RESOURCE_DEF, REMOVAL_POLICIES
) = (
'count', 'index_var', 'resource_def', 'removal_policies'
)
_RESOURCE_DEF_KEYS = (
RESOURCE_DEF_TYPE, RESOURCE_DEF_PROPERTIES, RESOURCE_DEF_METADATA,
) = (
'type', 'properties', 'metadata',
)
_REMOVAL_POLICIES_KEYS = (
REMOVAL_RSRC_LIST,
) = (
'resource_list',
)
_ROLLING_UPDATES_SCHEMA_KEYS = (
MIN_IN_SERVICE, MAX_BATCH_SIZE, PAUSE_TIME,
) = (
'min_in_service', 'max_batch_size', 'pause_time',
)
_UPDATE_POLICY_SCHEMA_KEYS = (ROLLING_UPDATE,) = ('rolling_update',)
ATTRIBUTES = (
REFS, ATTR_ATTRIBUTES,
) = (
'refs', 'attributes',
)
properties_schema = {
COUNT: properties.Schema(
properties.Schema.INTEGER,
_('The number of resources to create.'),
default=1,
constraints=[
constraints.Range(min=0),
],
update_allowed=True
),
INDEX_VAR: properties.Schema(
properties.Schema.STRING,
_('A variable that this resource will use to replace with the '
'current index of a given resource in the group. Can be used, '
'for example, to customize the name property of grouped '
'servers in order to differentiate them when listed with '
'nova client.'),
default="%index%",
constraints=[
constraints.Length(min=3)
],
support_status=support.SupportStatus(version='2014.2')
),
RESOURCE_DEF: properties.Schema(
properties.Schema.MAP,
_('Resource definition for the resources in the group. The value '
'of this property is the definition of a resource just as if '
'it had been declared in the template itself.'),
schema={
RESOURCE_DEF_TYPE: properties.Schema(
properties.Schema.STRING,
_('The type of the resources in the group'),
required=True
),
RESOURCE_DEF_PROPERTIES: properties.Schema(
properties.Schema.MAP,
_('Property values for the resources in the group')
),
RESOURCE_DEF_METADATA: properties.Schema(
properties.Schema.MAP,
_('Supplied metadata for the resources in the group'),
support_status=support.SupportStatus(version='5.0.0')
),
},
required=True,
update_allowed=True
),
REMOVAL_POLICIES: properties.Schema(
properties.Schema.LIST,
_('Policies for removal of resources on update'),
schema=properties.Schema(
properties.Schema.MAP,
_('Policy to be processed when doing an update which '
'requires removal of specific resources.'),
schema={
REMOVAL_RSRC_LIST: properties.Schema(
properties.Schema.LIST,
_("List of resources to be removed "
"when doing an update which requires removal of "
"specific resources. "
"The resource may be specified several ways: "
"(1) The resource name, as in the nested stack, "
"(2) The resource reference returned from "
"get_resource in a template, as available via "
"the 'refs' attribute "
"Note this is destructive on update when specified; "
"even if the count is not being reduced, and once "
"a resource name is removed, it's name is never "
"reused in subsequent updates"
),
default=[]
),
},
),
update_allowed=True,
default=[],
support_status=support.SupportStatus(version='2015.1')
),
}
attributes_schema = {
REFS: attributes.Schema(
_("A list of resource IDs for the resources in the group"),
type=attributes.Schema.LIST
),
ATTR_ATTRIBUTES: attributes.Schema(
_("A map of resource names to the specified attribute of each "
"individual resource. "
"Requires heat_template_version: 2014-10-16."),
support_status=support.SupportStatus(version='2014.2'),
type=attributes.Schema.MAP
),
}
rolling_update_schema = {
MIN_IN_SERVICE: properties.Schema(
properties.Schema.INTEGER,
_('The minimum number of resources in service while '
'rolling updates are being executed.'),
constraints=[constraints.Range(min=0)],
default=0),
MAX_BATCH_SIZE: properties.Schema(
properties.Schema.INTEGER,
_('The maximum number of resources to replace at once.'),
constraints=[constraints.Range(min=0)],
default=1),
PAUSE_TIME: properties.Schema(
properties.Schema.NUMBER,
_('The number of seconds to wait between batches of '
'updates.'),
constraints=[constraints.Range(min=0)],
default=0),
}
update_policy_schema = {
ROLLING_UPDATE: properties.Schema(
properties.Schema.MAP,
schema=rolling_update_schema,
support_status=support.SupportStatus(version='5.0.0')
)
}
def __init__(self, name, json_snippet, stack):
super(ResourceGroup, self).__init__(name, json_snippet, stack)
self.update_policy = self.t.update_policy(self.update_policy_schema,
self.context)
def get_size(self):
return self.properties.get(self.COUNT)
def validate(self):
"""
Validation for update_policy
"""
super(ResourceGroup, self).validate()
if self.update_policy is not None:
self.update_policy.validate()
policy_name = self.ROLLING_UPDATE
if (policy_name in self.update_policy and
self.update_policy[policy_name] is not None):
pause_time = self.update_policy[policy_name][self.PAUSE_TIME]
if pause_time > 3600:
msg = _('Maximum %(arg1)s allowed is 1hr(3600s),'
' provided %(arg2)s seconds.') % dict(
arg1=self.PAUSE_TIME,
arg2=pause_time)
raise ValueError(msg)
def validate_nested_stack(self):
# Only validate the resource definition (which may be a
# nested template) if count is non-zero, to enable folks
# to disable features via a zero count if they wish
if not self.get_size():
return
test_tmpl = self._assemble_nested(["0"], include_all=True)
val_templ = template.Template(test_tmpl)
res_def = val_templ.resource_definitions(self.stack)["0"]
# make sure we can resolve the nested resource type
try:
self.stack.env.get_class(res_def.resource_type)
except exception.TemplateNotFound:
# its a template resource
pass
try:
name = "%s-%s" % (self.stack.name, self.name)
nested_stack = self._parse_nested_stack(
name,
test_tmpl,
self.child_params())
nested_stack.strict_validate = False
nested_stack.validate()
except Exception as ex:
msg = _("Failed to validate: %s") % six.text_type(ex)
raise exception.StackValidationFailed(message=msg)
def _name_blacklist(self):
"""Resolve the remove_policies to names for removal."""
nested = self.nested()
# To avoid reusing names after removal, we store a comma-separated
# blacklist in the resource data
db_rsrc_names = self.data().get('name_blacklist')
if db_rsrc_names:
current_blacklist = db_rsrc_names.split(',')
else:
current_blacklist = []
# Now we iterate over the removal policies, and update the blacklist
# with any additional names
rsrc_names = set(current_blacklist)
if nested:
for r in self.properties[self.REMOVAL_POLICIES]:
if self.REMOVAL_RSRC_LIST in r:
# Tolerate string or int list values
for n in r[self.REMOVAL_RSRC_LIST]:
str_n = six.text_type(n)
if str_n in nested:
rsrc_names.add(str_n)
continue
rsrc = nested.resource_by_refid(str_n)
if rsrc:
rsrc_names.add(rsrc.name)
# If the blacklist has changed, update the resource data
if rsrc_names != set(current_blacklist):
self.data_set('name_blacklist', ','.join(rsrc_names))
return rsrc_names
def _resource_names(self, size=None):
name_blacklist = self._name_blacklist()
if size is None:
size = self.get_size()
def is_blacklisted(name):
return name in name_blacklist
candidates = six.moves.map(six.text_type, itertools.count())
return itertools.islice(six.moves.filterfalse(is_blacklisted,
candidates),
size)
def _get_resources(self):
"""Get templates for resources."""
return [(resource.name, resource.t.render_hot())
for resource in grouputils.get_members(self)]
def _count_black_listed(self):
"""Get black list count"""
return len(self._name_blacklist()
& set(grouputils.get_member_names(self)))
def handle_create(self):
names = self._resource_names()
self.create_with_template(self._assemble_nested(names),
{},
self.stack.timeout_mins)
def _run_to_completion(self, template, timeout):
updater = self.update_with_template(template, {},
timeout)
while not super(ResourceGroup,
self).check_update_complete(updater):
yield
def check_update_complete(self, checkers):
for checker in checkers:
if not checker.started():
checker.start()
if not checker.step():
return False
return True
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if tmpl_diff:
# parse update policy
if rsrc_defn.UPDATE_POLICY in tmpl_diff:
up = json_snippet.update_policy(self.update_policy_schema,
self.context)
self.update_policy = up
checkers = []
self.properties = json_snippet.properties(self.properties_schema,
self.context)
if prop_diff and self.RESOURCE_DEF in prop_diff:
updaters = self._try_rolling_update()
if updaters:
checkers.extend(updaters)
resizer = scheduler.TaskRunner(
self._run_to_completion,
self._assemble_nested_for_size(self.get_size()),
self.stack.timeout_mins)
checkers.append(resizer)
checkers[0].start()
return checkers
def _assemble_nested_for_size(self, new_capacity):
new_names = self._resource_names(new_capacity)
return self._assemble_nested(new_names)
def FnGetAtt(self, key, *path):
if key.startswith("resource."):
return grouputils.get_nested_attrs(self, key, False, *path)
names = self._resource_names()
if key == self.REFS:
vals = [grouputils.get_rsrc_id(self, key, False, n) for n in names]
return attributes.select_from_attribute(vals, path)
if key == self.ATTR_ATTRIBUTES:
if not path:
raise exception.InvalidTemplateAttribute(
resource=self.name, key=key)
return dict((n, grouputils.get_rsrc_attr(
self, key, False, n, *path)) for n in names)
path = [key] + list(path)
return [grouputils.get_rsrc_attr(self, key, False, n, *path)
for n in names]
def _build_resource_definition(self, include_all=False):
res_def = self.properties[self.RESOURCE_DEF]
if res_def[self.RESOURCE_DEF_PROPERTIES] is None:
res_def[self.RESOURCE_DEF_PROPERTIES] = {}
if res_def[self.RESOURCE_DEF_METADATA] is None:
del res_def[self.RESOURCE_DEF_METADATA]
if not include_all:
resource_def_props = res_def[self.RESOURCE_DEF_PROPERTIES]
clean = dict((k, v) for k, v in resource_def_props.items()
if v is not None)
res_def[self.RESOURCE_DEF_PROPERTIES] = clean
return res_def
def _handle_repl_val(self, res_name, val):
repl_var = self.properties[self.INDEX_VAR]
recurse = lambda x: self._handle_repl_val(res_name, x)
if isinstance(val, six.string_types):
return val.replace(repl_var, res_name)
elif isinstance(val, collections.Mapping):
return dict(zip(val, map(recurse, six.itervalues(val))))
elif isinstance(val, collections.Sequence):
return map(recurse, val)
return val
def _do_prop_replace(self, res_name, res_def_template):
res_def = copy.deepcopy(res_def_template)
props = res_def[self.RESOURCE_DEF_PROPERTIES]
if props:
props = self._handle_repl_val(res_name, props)
res_def[self.RESOURCE_DEF_PROPERTIES] = props
return res_def
def _assemble_nested(self, names, include_all=False):
res_def = self._build_resource_definition(include_all)
resources = dict((k, self._do_prop_replace(k, res_def))
for k in names)
child_template = copy.deepcopy(template_template)
child_template['resources'] = resources
return child_template
def _assemble_for_rolling_update(self, names, name_blacklist,
include_all=False):
old_resources = self._get_resources()
res_def = self._build_resource_definition(include_all)
child_template = copy.deepcopy(template_template)
resources = dict((k, v)
for k, v in old_resources if k not in name_blacklist)
resources.update(dict((k, self._do_prop_replace(k, res_def))
for k in names))
child_template['resources'] = resources
return child_template
def _try_rolling_update(self):
if self.update_policy[self.ROLLING_UPDATE]:
policy = self.update_policy[self.ROLLING_UPDATE]
return self._replace(policy[self.MIN_IN_SERVICE],
policy[self.MAX_BATCH_SIZE],
policy[self.PAUSE_TIME])
def _update_timeout(self, efft_capacity, efft_bat_sz, pause_sec):
batch_cnt = (efft_capacity + efft_bat_sz - 1) // efft_bat_sz
if pause_sec * (batch_cnt - 1) >= self.stack.timeout_secs():
msg = _('The current %s will result in stack update '
'timeout.') % rsrc_defn.UPDATE_POLICY
raise ValueError(msg)
update_timeout = self.stack.timeout_secs() - (
pause_sec * (batch_cnt - 1))
return update_timeout
def _replace(self, min_in_service, batch_size, pause_sec):
def pause_between_batch(pause_sec):
duration = timeutils.Duration(pause_sec)
while not duration.expired():
yield
def get_batched_names(names, batch_size):
for i in range(0, len(names), batch_size):
yield names[0:i + batch_size]
# blacklisted names exiting and new
name_blacklist = self._name_blacklist()
# blacklist count existing
num_blacklist = self._count_black_listed()
# current capacity not including existing blacklisted
curr_cap = len(self.nested()) - num_blacklist if self.nested() else 0
# final capacity expected after replace
capacity = min(curr_cap, self.get_size())
efft_bat_sz = min(batch_size, capacity)
efft_min_sz = min(min_in_service, capacity)
# effective capacity taking into account min_in_service and batch_size
efft_capacity = max(capacity - efft_bat_sz, efft_min_sz) + efft_bat_sz
# Reset effective capacity, if there are enough resources
if efft_capacity <= curr_cap:
efft_capacity = capacity
if efft_capacity > 0:
update_timeout = self._update_timeout(efft_capacity,
efft_bat_sz, pause_sec)
checkers = []
remainder = efft_capacity
# filtered names for effective capacity
new_names = self._resource_names(efft_capacity)
# batched names in reverse order, we've to add new
# resources if required before modifing existing
batched_names = get_batched_names(list(new_names)[::-1], efft_bat_sz)
while remainder > 0:
checkers.append(scheduler.TaskRunner(
self._run_to_completion,
self._assemble_for_rolling_update(next(batched_names),
name_blacklist),
update_timeout))
remainder -= efft_bat_sz
if remainder > 0 and pause_sec > 0:
checkers.append(scheduler.TaskRunner(pause_between_batch,
pause_sec))
return checkers
def child_template(self):
names = self._resource_names()
return self._assemble_nested(names)
def child_params(self):
return {}
def handle_adopt(self, resource_data):
names = self._resource_names()
if names:
return self.create_with_template(self._assemble_nested(names),
{},
adopt_data=resource_data)
def resource_mapping():
return {
'OS::Heat::ResourceGroup': ResourceGroup,
}
| |
import unittest
import socket
import struct
from scapy.layers.inet import IP, ICMP, TCP, UDP
from scapy.layers.ipsec import SecurityAssociation, ESP
from scapy.layers.l2 import Ether
from scapy.packet import raw, Raw
from scapy.layers.inet6 import IPv6, ICMPv6EchoRequest, IPv6ExtHdrHopByHop, \
IPv6ExtHdrFragment, IPv6ExtHdrDestOpt
from framework import VppTestCase, VppTestRunner
from util import ppp, reassemble4, fragment_rfc791, fragment_rfc8200
from vpp_papi import VppEnum
from vpp_ipsec import VppIpsecSpd, VppIpsecSpdEntry, \
VppIpsecSpdItfBinding
from ipaddress import ip_address
from re import search
from os import popen
class IPsecIPv4Params:
addr_type = socket.AF_INET
addr_any = "0.0.0.0"
addr_bcast = "255.255.255.255"
addr_len = 32
is_ipv6 = 0
def __init__(self):
self.remote_tun_if_host = '1.1.1.1'
self.remote_tun_if_host6 = '1111::1'
self.scapy_tun_sa_id = 100
self.scapy_tun_spi = 1000
self.vpp_tun_sa_id = 200
self.vpp_tun_spi = 2000
self.scapy_tra_sa_id = 300
self.scapy_tra_spi = 3000
self.vpp_tra_sa_id = 400
self.vpp_tra_spi = 4000
self.outer_hop_limit = 64
self.inner_hop_limit = 255
self.outer_flow_label = 0
self.inner_flow_label = 0x12345
self.auth_algo_vpp_id = (VppEnum.vl_api_ipsec_integ_alg_t.
IPSEC_API_INTEG_ALG_SHA1_96)
self.auth_algo = 'HMAC-SHA1-96' # scapy name
self.auth_key = b'C91KUR9GYMm5GfkEvNjX'
self.crypt_algo_vpp_id = (VppEnum.vl_api_ipsec_crypto_alg_t.
IPSEC_API_CRYPTO_ALG_AES_CBC_128)
self.crypt_algo = 'AES-CBC' # scapy name
self.crypt_key = b'JPjyOWBeVEQiMe7h'
self.salt = 0
self.flags = 0
self.nat_header = None
self.tun_flags = (VppEnum.vl_api_tunnel_encap_decap_flags_t.
TUNNEL_API_ENCAP_DECAP_FLAG_NONE)
self.dscp = 0
self.async_mode = False
class IPsecIPv6Params:
addr_type = socket.AF_INET6
addr_any = "0::0"
addr_bcast = "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"
addr_len = 128
is_ipv6 = 1
def __init__(self):
self.remote_tun_if_host = '1111:1111:1111:1111:1111:1111:1111:1111'
self.remote_tun_if_host4 = '1.1.1.1'
self.scapy_tun_sa_id = 500
self.scapy_tun_spi = 3001
self.vpp_tun_sa_id = 600
self.vpp_tun_spi = 3000
self.scapy_tra_sa_id = 700
self.scapy_tra_spi = 4001
self.vpp_tra_sa_id = 800
self.vpp_tra_spi = 4000
self.outer_hop_limit = 64
self.inner_hop_limit = 255
self.outer_flow_label = 0
self.inner_flow_label = 0x12345
self.auth_algo_vpp_id = (VppEnum.vl_api_ipsec_integ_alg_t.
IPSEC_API_INTEG_ALG_SHA1_96)
self.auth_algo = 'HMAC-SHA1-96' # scapy name
self.auth_key = b'C91KUR9GYMm5GfkEvNjX'
self.crypt_algo_vpp_id = (VppEnum.vl_api_ipsec_crypto_alg_t.
IPSEC_API_CRYPTO_ALG_AES_CBC_128)
self.crypt_algo = 'AES-CBC' # scapy name
self.crypt_key = b'JPjyOWBeVEQiMe7h'
self.salt = 0
self.flags = 0
self.nat_header = None
self.tun_flags = (VppEnum.vl_api_tunnel_encap_decap_flags_t.
TUNNEL_API_ENCAP_DECAP_FLAG_NONE)
self.dscp = 0
self.async_mode = False
def mk_scapy_crypt_key(p):
if p.crypt_algo in ("AES-GCM", "AES-CTR"):
return p.crypt_key + struct.pack("!I", p.salt)
else:
return p.crypt_key
def config_tun_params(p, encryption_type, tun_if):
ip_class_by_addr_type = {socket.AF_INET: IP, socket.AF_INET6: IPv6}
esn_en = bool(p.flags & (VppEnum.vl_api_ipsec_sad_flags_t.
IPSEC_API_SAD_FLAG_USE_ESN))
p.tun_dst = tun_if.remote_addr[p.addr_type]
p.tun_src = tun_if.local_addr[p.addr_type]
crypt_key = mk_scapy_crypt_key(p)
p.scapy_tun_sa = SecurityAssociation(
encryption_type, spi=p.vpp_tun_spi,
crypt_algo=p.crypt_algo,
crypt_key=crypt_key,
auth_algo=p.auth_algo, auth_key=p.auth_key,
tunnel_header=ip_class_by_addr_type[p.addr_type](
src=p.tun_dst,
dst=p.tun_src),
nat_t_header=p.nat_header,
esn_en=esn_en)
p.vpp_tun_sa = SecurityAssociation(
encryption_type, spi=p.scapy_tun_spi,
crypt_algo=p.crypt_algo,
crypt_key=crypt_key,
auth_algo=p.auth_algo, auth_key=p.auth_key,
tunnel_header=ip_class_by_addr_type[p.addr_type](
dst=p.tun_dst,
src=p.tun_src),
nat_t_header=p.nat_header,
esn_en=esn_en)
def config_tra_params(p, encryption_type):
esn_en = bool(p.flags & (VppEnum.vl_api_ipsec_sad_flags_t.
IPSEC_API_SAD_FLAG_USE_ESN))
crypt_key = mk_scapy_crypt_key(p)
p.scapy_tra_sa = SecurityAssociation(
encryption_type,
spi=p.vpp_tra_spi,
crypt_algo=p.crypt_algo,
crypt_key=crypt_key,
auth_algo=p.auth_algo,
auth_key=p.auth_key,
nat_t_header=p.nat_header,
esn_en=esn_en)
p.vpp_tra_sa = SecurityAssociation(
encryption_type,
spi=p.scapy_tra_spi,
crypt_algo=p.crypt_algo,
crypt_key=crypt_key,
auth_algo=p.auth_algo,
auth_key=p.auth_key,
nat_t_header=p.nat_header,
esn_en=esn_en)
class TemplateIpsec(VppTestCase):
"""
TRANSPORT MODE::
------ encrypt ---
|tra_if| <-------> |VPP|
------ decrypt ---
TUNNEL MODE::
------ encrypt --- plain ---
|tun_if| <------- |VPP| <------ |pg1|
------ --- ---
------ decrypt --- plain ---
|tun_if| -------> |VPP| ------> |pg1|
------ --- ---
"""
tun_spd_id = 1
tra_spd_id = 2
def ipsec_select_backend(self):
""" empty method to be overloaded when necessary """
pass
@classmethod
def setUpClass(cls):
super(TemplateIpsec, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TemplateIpsec, cls).tearDownClass()
def setup_params(self):
if not hasattr(self, 'ipv4_params'):
self.ipv4_params = IPsecIPv4Params()
if not hasattr(self, 'ipv6_params'):
self.ipv6_params = IPsecIPv6Params()
self.params = {self.ipv4_params.addr_type: self.ipv4_params,
self.ipv6_params.addr_type: self.ipv6_params}
def config_interfaces(self):
self.create_pg_interfaces(range(3))
self.interfaces = list(self.pg_interfaces)
for i in self.interfaces:
i.admin_up()
i.config_ip4()
i.resolve_arp()
i.config_ip6()
i.resolve_ndp()
def setUp(self):
super(TemplateIpsec, self).setUp()
self.setup_params()
self.vpp_esp_protocol = (VppEnum.vl_api_ipsec_proto_t.
IPSEC_API_PROTO_ESP)
self.vpp_ah_protocol = (VppEnum.vl_api_ipsec_proto_t.
IPSEC_API_PROTO_AH)
self.config_interfaces()
self.ipsec_select_backend()
def unconfig_interfaces(self):
for i in self.interfaces:
i.admin_down()
i.unconfig_ip4()
i.unconfig_ip6()
def tearDown(self):
super(TemplateIpsec, self).tearDown()
self.unconfig_interfaces()
def show_commands_at_teardown(self):
self.logger.info(self.vapi.cli("show hardware"))
def gen_encrypt_pkts(self, p, sa, sw_intf, src, dst, count=1,
payload_size=54):
return [Ether(src=sw_intf.remote_mac, dst=sw_intf.local_mac) /
sa.encrypt(IP(src=src, dst=dst) /
ICMP() / Raw(b'X' * payload_size))
for i in range(count)]
def gen_encrypt_pkts6(self, p, sa, sw_intf, src, dst, count=1,
payload_size=54):
return [Ether(src=sw_intf.remote_mac, dst=sw_intf.local_mac) /
sa.encrypt(IPv6(src=src, dst=dst,
hlim=p.inner_hop_limit,
fl=p.inner_flow_label) /
ICMPv6EchoRequest(id=0, seq=1,
data='X' * payload_size))
for i in range(count)]
def gen_pkts(self, sw_intf, src, dst, count=1, payload_size=54):
return [Ether(src=sw_intf.remote_mac, dst=sw_intf.local_mac) /
IP(src=src, dst=dst) / ICMP() / Raw(b'X' * payload_size)
for i in range(count)]
def gen_pkts6(self, p, sw_intf, src, dst, count=1, payload_size=54):
return [Ether(src=sw_intf.remote_mac, dst=sw_intf.local_mac) /
IPv6(src=src, dst=dst,
hlim=p.inner_hop_limit, fl=p.inner_flow_label) /
ICMPv6EchoRequest(id=0, seq=1, data='X' * payload_size)
for i in range(count)]
class IpsecTcp(object):
def verify_tcp_checksum(self):
# start http cli server listener on http://0.0.0.0:80
self.vapi.cli("http cli server")
p = self.params[socket.AF_INET]
send = (Ether(src=self.tun_if.remote_mac, dst=self.tun_if.local_mac) /
p.scapy_tun_sa.encrypt(IP(src=p.remote_tun_if_host,
dst=self.tun_if.local_ip4) /
TCP(flags='S', dport=80)))
self.logger.debug(ppp("Sending packet:", send))
recv = self.send_and_expect(self.tun_if, [send], self.tun_if)
recv = recv[0]
decrypted = p.vpp_tun_sa.decrypt(recv[IP])
self.assert_packet_checksums_valid(decrypted)
class IpsecTcpTests(IpsecTcp):
def test_tcp_checksum(self):
""" verify checksum correctness for vpp generated packets """
self.verify_tcp_checksum()
class IpsecTra4(object):
""" verify methods for Transport v4 """
def get_replay_counts(self, p):
replay_node_name = ('/err/%s/SA replayed packet' %
self.tra4_decrypt_node_name[0])
count = self.statistics.get_err_counter(replay_node_name)
if p.async_mode:
replay_post_node_name = ('/err/%s/SA replayed packet' %
self.tra4_decrypt_node_name[p.async_mode])
count += self.statistics.get_err_counter(replay_post_node_name)
return count
def get_hash_failed_counts(self, p):
if ESP == self.encryption_type and p.crypt_algo == "AES-GCM":
hash_failed_node_name = ('/err/%s/ESP decryption failed' %
self.tra4_decrypt_node_name[p.async_mode])
else:
hash_failed_node_name = ('/err/%s/Integrity check failed' %
self.tra4_decrypt_node_name[p.async_mode])
count = self.statistics.get_err_counter(hash_failed_node_name)
if p.async_mode:
count += self.statistics.get_err_counter(
'/err/crypto-dispatch/bad-hmac')
return count
def verify_hi_seq_num(self):
p = self.params[socket.AF_INET]
saf = VppEnum.vl_api_ipsec_sad_flags_t
esn_on = p.vpp_tra_sa.esn_en
ar_on = p.flags & saf.IPSEC_API_SAD_FLAG_USE_ANTI_REPLAY
seq_cycle_node_name = \
('/err/%s/sequence number cycled (packet dropped)' %
self.tra4_encrypt_node_name)
replay_count = self.get_replay_counts(p)
hash_failed_count = self.get_hash_failed_counts(p)
seq_cycle_count = self.statistics.get_err_counter(seq_cycle_node_name)
# a few packets so we get the rx seq number above the window size and
# thus can simulate a wrap with an out of window packet
pkts = [(Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=seq))
for seq in range(63, 80)]
recv_pkts = self.send_and_expect(self.tra_if, pkts, self.tra_if)
# these 4 packets will all choose seq-num 0 to decrpyt since none
# are out of window when first checked. however, once #200 has
# decrypted it will move the window to 200 and has #81 is out of
# window. this packet should be dropped.
pkts = [(Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=200)),
(Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=81)),
(Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=201)),
(Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=202))]
# if anti-replay is off then we won't drop #81
n_rx = 3 if ar_on else 4
self.send_and_expect(self.tra_if, pkts, self.tra_if, n_rx=n_rx)
# this packet is one before the wrap
pkts = [(Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=203))]
recv_pkts = self.send_and_expect(self.tra_if, pkts, self.tra_if)
# move the window over half way to a wrap
pkts = [(Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=0x80000001))]
recv_pkts = self.send_and_expect(self.tra_if, pkts, self.tra_if)
# anti-replay will drop old packets, no anti-replay will not
pkts = [(Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=0x44000001))]
if ar_on:
self.send_and_assert_no_replies(self.tra_if, pkts)
else:
recv_pkts = self.send_and_expect(self.tra_if, pkts, self.tra_if)
if esn_on:
#
# validate wrapping the ESN
#
# wrap scapy's TX SA SN
p.scapy_tra_sa.seq_num = 0x100000005
# send a packet that wraps the window for both AR and no AR
pkts = [(Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=0x100000005))]
rxs = self.send_and_expect(self.tra_if, pkts, self.tra_if)
for rx in rxs:
decrypted = p.vpp_tra_sa.decrypt(rx[0][IP])
# move the window forward to half way to the next wrap
pkts = [(Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=0x180000005))]
rxs = self.send_and_expect(self.tra_if, pkts, self.tra_if)
# a packet less than 2^30 from the current position is:
# - AR: out of window and dropped
# - non-AR: accepted
pkts = [(Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=0x170000005))]
if ar_on:
self.send_and_assert_no_replies(self.tra_if, pkts)
else:
self.send_and_expect(self.tra_if, pkts, self.tra_if)
# a packet more than 2^30 from the current position is:
# - AR: out of window and dropped
# - non-AR: considered a wrap, but since it's not a wrap
# it won't decrpyt and so will be dropped
pkts = [(Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=0x130000005))]
self.send_and_assert_no_replies(self.tra_if, pkts)
# a packet less than 2^30 from the current position and is a
# wrap; (the seq is currently at 0x180000005).
# - AR: out of window so considered a wrap, so accepted
# - non-AR: not considered a wrap, so won't decrypt
p.scapy_tra_sa.seq_num = 0x260000005
pkts = [(Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=0x260000005))]
if ar_on:
self.send_and_expect(self.tra_if, pkts, self.tra_if)
else:
self.send_and_assert_no_replies(self.tra_if, pkts)
#
# window positions are different now for AR/non-AR
# move non-AR forward
#
if not ar_on:
# a packet more than 2^30 from the current position and is a
# wrap; (the seq is currently at 0x180000005).
# - AR: accepted
# - non-AR: not considered a wrap, so won't decrypt
pkts = [(Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=0x200000005)),
(Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=0x200000006))]
self.send_and_expect(self.tra_if, pkts, self.tra_if)
pkts = [(Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=0x260000005))]
self.send_and_expect(self.tra_if, pkts, self.tra_if)
def verify_tra_anti_replay(self):
p = self.params[socket.AF_INET]
esn_en = p.vpp_tra_sa.esn_en
seq_cycle_node_name = \
('/err/%s/sequence number cycled (packet dropped)' %
self.tra4_encrypt_node_name)
replay_count = self.get_replay_counts(p)
hash_failed_count = self.get_hash_failed_counts(p)
seq_cycle_count = self.statistics.get_err_counter(seq_cycle_node_name)
if ESP == self.encryption_type:
undersize_node_name = ('/err/%s/undersized packet' %
self.tra4_decrypt_node_name[0])
undersize_count = self.statistics.get_err_counter(
undersize_node_name)
#
# send packets with seq numbers 1->34
# this means the window size is still in Case B (see RFC4303
# Appendix A)
#
# for reasons i haven't investigated Scapy won't create a packet with
# seq_num=0
#
pkts = [(Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=seq))
for seq in range(1, 34)]
recv_pkts = self.send_and_expect(self.tra_if, pkts, self.tra_if)
# replayed packets are dropped
self.send_and_assert_no_replies(self.tra_if, pkts, timeout=0.2)
replay_count += len(pkts)
self.assertEqual(self.get_replay_counts(p), replay_count)
#
# now send a batch of packets all with the same sequence number
# the first packet in the batch is legitimate, the rest bogus
#
self.vapi.cli("clear error")
self.vapi.cli("clear node counters")
pkts = (Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=35))
recv_pkts = self.send_and_expect(self.tra_if, pkts * 8,
self.tra_if, n_rx=1)
replay_count += 7
self.assertEqual(self.get_replay_counts(p), replay_count)
#
# now move the window over to 257 (more than one byte) and into Case A
#
self.vapi.cli("clear error")
pkt = (Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=257))
recv_pkts = self.send_and_expect(self.tra_if, [pkt], self.tra_if)
# replayed packets are dropped
self.send_and_assert_no_replies(self.tra_if, pkt * 3, timeout=0.2)
replay_count += 3
self.assertEqual(self.get_replay_counts(p), replay_count)
# the window size is 64 packets
# in window are still accepted
pkt = (Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=200))
recv_pkts = self.send_and_expect(self.tra_if, [pkt], self.tra_if)
# a packet that does not decrypt does not move the window forward
bogus_sa = SecurityAssociation(self.encryption_type,
p.vpp_tra_spi,
crypt_algo=p.crypt_algo,
crypt_key=mk_scapy_crypt_key(p)[::-1],
auth_algo=p.auth_algo,
auth_key=p.auth_key[::-1])
pkt = (Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
bogus_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=350))
self.send_and_assert_no_replies(self.tra_if, pkt * 17, timeout=0.2)
hash_failed_count += 17
self.assertEqual(self.get_hash_failed_counts(p), hash_failed_count)
# a malformed 'runt' packet
# created by a mis-constructed SA
if (ESP == self.encryption_type and p.crypt_algo != "NULL"):
bogus_sa = SecurityAssociation(self.encryption_type,
p.vpp_tra_spi)
pkt = (Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
bogus_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=350))
self.send_and_assert_no_replies(self.tra_if, pkt * 17, timeout=0.2)
undersize_count += 17
self.assert_error_counter_equal(undersize_node_name,
undersize_count)
# which we can determine since this packet is still in the window
pkt = (Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=234))
self.send_and_expect(self.tra_if, [pkt], self.tra_if)
#
# out of window are dropped
# this is Case B. So VPP will consider this to be a high seq num wrap
# and so the decrypt attempt will fail
#
pkt = (Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=17))
self.send_and_assert_no_replies(self.tra_if, pkt * 17, timeout=0.2)
if esn_en:
# an out of window error with ESN looks like a high sequence
# wrap. but since it isn't then the verify will fail.
hash_failed_count += 17
self.assertEqual(self.get_hash_failed_counts(p), hash_failed_count)
else:
replay_count += 17
self.assertEqual(self.get_replay_counts(p), replay_count)
# valid packet moves the window over to 258
pkt = (Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=258))
rx = self.send_and_expect(self.tra_if, [pkt], self.tra_if)
decrypted = p.vpp_tra_sa.decrypt(rx[0][IP])
#
# move VPP's SA TX seq-num to just before the seq-number wrap.
# then fire in a packet that VPP should drop on TX because it
# causes the TX seq number to wrap; unless we're using extened sequence
# numbers.
#
self.vapi.cli("test ipsec sa %d seq 0xffffffff" % p.scapy_tra_sa_id)
self.logger.info(self.vapi.ppcli("show ipsec sa 0"))
self.logger.info(self.vapi.ppcli("show ipsec sa 1"))
pkts = [(Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=seq))
for seq in range(259, 280)]
if esn_en:
rxs = self.send_and_expect(self.tra_if, pkts, self.tra_if)
#
# in order for scapy to decrypt its SA's high order number needs
# to wrap
#
p.vpp_tra_sa.seq_num = 0x100000000
for rx in rxs:
decrypted = p.vpp_tra_sa.decrypt(rx[0][IP])
#
# wrap scapy's TX high sequence number. VPP is in case B, so it
# will consider this a high seq wrap also.
# The low seq num we set it to will place VPP's RX window in Case A
#
p.scapy_tra_sa.seq_num = 0x100000005
pkt = (Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=0x100000005))
rx = self.send_and_expect(self.tra_if, [pkt], self.tra_if)
decrypted = p.vpp_tra_sa.decrypt(rx[0][IP])
#
# A packet that has seq num between (2^32-64) and 5 is within
# the window
#
p.scapy_tra_sa.seq_num = 0xfffffffd
pkt = (Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=0xfffffffd))
rx = self.send_and_expect(self.tra_if, [pkt], self.tra_if)
decrypted = p.vpp_tra_sa.decrypt(rx[0][IP])
#
# While in case A we cannot wrap the high sequence number again
# because VPP will consider this packet to be one that moves the
# window forward
#
pkt = (Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=0x200000999))
self.send_and_assert_no_replies(self.tra_if, [pkt], self.tra_if,
timeout=0.2)
hash_failed_count += 1
self.assertEqual(self.get_hash_failed_counts(p), hash_failed_count)
#
# but if we move the window forward to case B, then we can wrap
# again
#
p.scapy_tra_sa.seq_num = 0x100000555
pkt = (Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=0x100000555))
rx = self.send_and_expect(self.tra_if, [pkt], self.tra_if)
decrypted = p.vpp_tra_sa.decrypt(rx[0][IP])
p.scapy_tra_sa.seq_num = 0x200000444
pkt = (Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=0x200000444))
rx = self.send_and_expect(self.tra_if, [pkt], self.tra_if)
decrypted = p.vpp_tra_sa.decrypt(rx[0][IP])
else:
#
# without ESN TX sequence numbers can't wrap and packets are
# dropped from here on out.
#
self.send_and_assert_no_replies(self.tra_if, pkts, timeout=0.2)
seq_cycle_count += len(pkts)
self.assert_error_counter_equal(seq_cycle_node_name,
seq_cycle_count)
# move the security-associations seq number on to the last we used
self.vapi.cli("test ipsec sa %d seq 0x15f" % p.scapy_tra_sa_id)
p.scapy_tra_sa.seq_num = 351
p.vpp_tra_sa.seq_num = 351
def verify_tra_lost(self):
p = self.params[socket.AF_INET]
esn_en = p.vpp_tra_sa.esn_en
#
# send packets with seq numbers 1->34
# this means the window size is still in Case B (see RFC4303
# Appendix A)
#
# for reasons i haven't investigated Scapy won't create a packet with
# seq_num=0
#
pkts = [(Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=seq))
for seq in range(1, 3)]
self.send_and_expect(self.tra_if, pkts, self.tra_if)
self.assertEqual(p.tra_sa_out.get_lost(), 0)
# skip a sequence number
pkts = [(Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=seq))
for seq in range(4, 6)]
self.send_and_expect(self.tra_if, pkts, self.tra_if)
self.assertEqual(p.tra_sa_out.get_lost(), 0)
# the lost packet are counted untill we get up past the first
# sizeof(replay_window) packets
pkts = [(Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=seq))
for seq in range(6, 100)]
self.send_and_expect(self.tra_if, pkts, self.tra_if)
self.assertEqual(p.tra_sa_out.get_lost(), 1)
# lost of holes in the sequence
pkts = [(Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=seq))
for seq in range(100, 200, 2)]
self.send_and_expect(self.tra_if, pkts, self.tra_if, n_rx=50)
pkts = [(Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=seq))
for seq in range(200, 300)]
self.send_and_expect(self.tra_if, pkts, self.tra_if)
self.assertEqual(p.tra_sa_out.get_lost(), 51)
# a big hole in the seq number space
pkts = [(Ether(src=self.tra_if.remote_mac,
dst=self.tra_if.local_mac) /
p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4) /
ICMP(),
seq_num=seq))
for seq in range(400, 500)]
self.send_and_expect(self.tra_if, pkts, self.tra_if)
self.assertEqual(p.tra_sa_out.get_lost(), 151)
def verify_tra_basic4(self, count=1, payload_size=54):
""" ipsec v4 transport basic test """
self.vapi.cli("clear errors")
self.vapi.cli("clear ipsec sa")
try:
p = self.params[socket.AF_INET]
send_pkts = self.gen_encrypt_pkts(p, p.scapy_tra_sa, self.tra_if,
src=self.tra_if.remote_ip4,
dst=self.tra_if.local_ip4,
count=count,
payload_size=payload_size)
recv_pkts = self.send_and_expect(self.tra_if, send_pkts,
self.tra_if)
for rx in recv_pkts:
self.assertEqual(len(rx) - len(Ether()), rx[IP].len)
self.assert_packet_checksums_valid(rx)
try:
decrypted = p.vpp_tra_sa.decrypt(rx[IP])
self.assert_packet_checksums_valid(decrypted)
except:
self.logger.debug(ppp("Unexpected packet:", rx))
raise
finally:
self.logger.info(self.vapi.ppcli("show error"))
self.logger.info(self.vapi.ppcli("show ipsec all"))
pkts = p.tra_sa_in.get_stats()['packets']
self.assertEqual(pkts, count,
"incorrect SA in counts: expected %d != %d" %
(count, pkts))
pkts = p.tra_sa_out.get_stats()['packets']
self.assertEqual(pkts, count,
"incorrect SA out counts: expected %d != %d" %
(count, pkts))
self.assertEqual(p.tra_sa_out.get_lost(), 0)
self.assertEqual(p.tra_sa_in.get_lost(), 0)
self.assert_packet_counter_equal(self.tra4_encrypt_node_name, count)
self.assert_packet_counter_equal(self.tra4_decrypt_node_name[0], count)
class IpsecTra4Tests(IpsecTra4):
""" UT test methods for Transport v4 """
def test_tra_anti_replay(self):
""" ipsec v4 transport anti-replay test """
self.verify_tra_anti_replay()
def test_tra_lost(self):
""" ipsec v4 transport lost packet test """
self.verify_tra_lost()
def test_tra_basic(self, count=1):
""" ipsec v4 transport basic test """
self.verify_tra_basic4(count=1)
def test_tra_burst(self):
""" ipsec v4 transport burst test """
self.verify_tra_basic4(count=257)
class IpsecTra6(object):
""" verify methods for Transport v6 """
def verify_tra_basic6(self, count=1, payload_size=54):
self.vapi.cli("clear errors")
self.vapi.cli("clear ipsec sa")
try:
p = self.params[socket.AF_INET6]
send_pkts = self.gen_encrypt_pkts6(p, p.scapy_tra_sa, self.tra_if,
src=self.tra_if.remote_ip6,
dst=self.tra_if.local_ip6,
count=count,
payload_size=payload_size)
recv_pkts = self.send_and_expect(self.tra_if, send_pkts,
self.tra_if)
for rx in recv_pkts:
self.assertEqual(len(rx) - len(Ether()) - len(IPv6()),
rx[IPv6].plen)
try:
decrypted = p.vpp_tra_sa.decrypt(rx[IPv6])
self.assert_packet_checksums_valid(decrypted)
except:
self.logger.debug(ppp("Unexpected packet:", rx))
raise
finally:
self.logger.info(self.vapi.ppcli("show error"))
self.logger.info(self.vapi.ppcli("show ipsec all"))
pkts = p.tra_sa_in.get_stats()['packets']
self.assertEqual(pkts, count,
"incorrect SA in counts: expected %d != %d" %
(count, pkts))
pkts = p.tra_sa_out.get_stats()['packets']
self.assertEqual(pkts, count,
"incorrect SA out counts: expected %d != %d" %
(count, pkts))
self.assert_packet_counter_equal(self.tra6_encrypt_node_name, count)
self.assert_packet_counter_equal(self.tra6_decrypt_node_name[0], count)
def gen_encrypt_pkts_ext_hdrs6(self, sa, sw_intf, src, dst, count=1,
payload_size=54):
return [Ether(src=sw_intf.remote_mac, dst=sw_intf.local_mac) /
sa.encrypt(IPv6(src=src, dst=dst) /
ICMPv6EchoRequest(id=0, seq=1,
data='X' * payload_size))
for i in range(count)]
def gen_pkts_ext_hdrs6(self, sw_intf, src, dst, count=1, payload_size=54):
return [Ether(src=sw_intf.remote_mac, dst=sw_intf.local_mac) /
IPv6(src=src, dst=dst) /
IPv6ExtHdrHopByHop() /
IPv6ExtHdrFragment(id=2, offset=200) /
Raw(b'\xff' * 200)
for i in range(count)]
def verify_tra_encrypted6(self, p, sa, rxs):
decrypted = []
for rx in rxs:
self.assert_packet_checksums_valid(rx)
try:
decrypt_pkt = p.vpp_tra_sa.decrypt(rx[IPv6])
decrypted.append(decrypt_pkt)
self.assert_equal(decrypt_pkt.src, self.tra_if.local_ip6)
self.assert_equal(decrypt_pkt.dst, self.tra_if.remote_ip6)
except:
self.logger.debug(ppp("Unexpected packet:", rx))
try:
self.logger.debug(ppp("Decrypted packet:", decrypt_pkt))
except:
pass
raise
return decrypted
def verify_tra_66_ext_hdrs(self, p):
count = 63
#
# check we can decrypt with options
#
tx = self.gen_encrypt_pkts_ext_hdrs6(p.scapy_tra_sa, self.tra_if,
src=self.tra_if.remote_ip6,
dst=self.tra_if.local_ip6,
count=count)
self.send_and_expect(self.tra_if, tx, self.tra_if)
#
# injecting a packet from ourselves to be routed of box is a hack
# but it matches an outbout policy, alors je ne regrette rien
#
# one extension before ESP
tx = (Ether(src=self.pg2.remote_mac, dst=self.pg2.local_mac) /
IPv6(src=self.tra_if.local_ip6,
dst=self.tra_if.remote_ip6) /
IPv6ExtHdrFragment(id=2, offset=200) /
Raw(b'\xff' * 200))
rxs = self.send_and_expect(self.pg2, [tx], self.tra_if)
dcs = self.verify_tra_encrypted6(p, p.vpp_tra_sa, rxs)
for dc in dcs:
# for reasons i'm not going to investigate scapy does not
# created the correct headers after decrypt. but reparsing
# the ipv6 packet fixes it
dc = IPv6(raw(dc[IPv6]))
self.assert_equal(dc[IPv6ExtHdrFragment].id, 2)
# two extensions before ESP
tx = (Ether(src=self.pg2.remote_mac, dst=self.pg2.local_mac) /
IPv6(src=self.tra_if.local_ip6,
dst=self.tra_if.remote_ip6) /
IPv6ExtHdrHopByHop() /
IPv6ExtHdrFragment(id=2, offset=200) /
Raw(b'\xff' * 200))
rxs = self.send_and_expect(self.pg2, [tx], self.tra_if)
dcs = self.verify_tra_encrypted6(p, p.vpp_tra_sa, rxs)
for dc in dcs:
dc = IPv6(raw(dc[IPv6]))
self.assertTrue(dc[IPv6ExtHdrHopByHop])
self.assert_equal(dc[IPv6ExtHdrFragment].id, 2)
# two extensions before ESP, one after
tx = (Ether(src=self.pg2.remote_mac, dst=self.pg2.local_mac) /
IPv6(src=self.tra_if.local_ip6,
dst=self.tra_if.remote_ip6) /
IPv6ExtHdrHopByHop() /
IPv6ExtHdrFragment(id=2, offset=200) /
IPv6ExtHdrDestOpt() /
Raw(b'\xff' * 200))
rxs = self.send_and_expect(self.pg2, [tx], self.tra_if)
dcs = self.verify_tra_encrypted6(p, p.vpp_tra_sa, rxs)
for dc in dcs:
dc = IPv6(raw(dc[IPv6]))
self.assertTrue(dc[IPv6ExtHdrDestOpt])
self.assertTrue(dc[IPv6ExtHdrHopByHop])
self.assert_equal(dc[IPv6ExtHdrFragment].id, 2)
class IpsecTra6Tests(IpsecTra6):
""" UT test methods for Transport v6 """
def test_tra_basic6(self):
""" ipsec v6 transport basic test """
self.verify_tra_basic6(count=1)
def test_tra_burst6(self):
""" ipsec v6 transport burst test """
self.verify_tra_basic6(count=257)
class IpsecTra6ExtTests(IpsecTra6):
def test_tra_ext_hdrs_66(self):
""" ipsec 6o6 tra extension headers test """
self.verify_tra_66_ext_hdrs(self.params[socket.AF_INET6])
class IpsecTra46Tests(IpsecTra4Tests, IpsecTra6Tests):
""" UT test methods for Transport v6 and v4"""
pass
class IpsecTun4(object):
""" verify methods for Tunnel v4 """
def verify_counters4(self, p, count, n_frags=None, worker=None):
if not n_frags:
n_frags = count
if (hasattr(p, "spd_policy_in_any")):
pkts = p.spd_policy_in_any.get_stats(worker)['packets']
self.assertEqual(pkts, count,
"incorrect SPD any policy: expected %d != %d" %
(count, pkts))
if (hasattr(p, "tun_sa_in")):
pkts = p.tun_sa_in.get_stats(worker)['packets']
self.assertEqual(pkts, count,
"incorrect SA in counts: expected %d != %d" %
(count, pkts))
pkts = p.tun_sa_out.get_stats(worker)['packets']
self.assertEqual(pkts, n_frags,
"incorrect SA out counts: expected %d != %d" %
(count, pkts))
self.assert_packet_counter_equal(self.tun4_encrypt_node_name, n_frags)
self.assert_packet_counter_equal(self.tun4_decrypt_node_name[0], count)
def verify_decrypted(self, p, rxs):
for rx in rxs:
self.assert_equal(rx[IP].src, p.remote_tun_if_host)
self.assert_equal(rx[IP].dst, self.pg1.remote_ip4)
self.assert_packet_checksums_valid(rx)
def verify_esp_padding(self, sa, esp_payload, decrypt_pkt):
align = sa.crypt_algo.block_size
if align < 4:
align = 4
exp_len = (len(decrypt_pkt) + 2 + (align - 1)) & ~(align - 1)
exp_len += sa.crypt_algo.iv_size
exp_len += sa.crypt_algo.icv_size or sa.auth_algo.icv_size
self.assertEqual(exp_len, len(esp_payload))
def verify_encrypted(self, p, sa, rxs):
decrypt_pkts = []
for rx in rxs:
if p.nat_header:
self.assertEqual(rx[UDP].dport, 4500)
self.assert_packet_checksums_valid(rx)
self.assertEqual(len(rx) - len(Ether()), rx[IP].len)
try:
rx_ip = rx[IP]
decrypt_pkt = p.vpp_tun_sa.decrypt(rx_ip)
if not decrypt_pkt.haslayer(IP):
decrypt_pkt = IP(decrypt_pkt[Raw].load)
if rx_ip.proto == socket.IPPROTO_ESP:
self.verify_esp_padding(sa, rx_ip[ESP].data, decrypt_pkt)
decrypt_pkts.append(decrypt_pkt)
self.assert_equal(decrypt_pkt.src, self.pg1.remote_ip4)
self.assert_equal(decrypt_pkt.dst, p.remote_tun_if_host)
except:
self.logger.debug(ppp("Unexpected packet:", rx))
try:
self.logger.debug(ppp("Decrypted packet:", decrypt_pkt))
except:
pass
raise
pkts = reassemble4(decrypt_pkts)
for pkt in pkts:
self.assert_packet_checksums_valid(pkt)
def verify_tun_44(self, p, count=1, payload_size=64, n_rx=None):
self.vapi.cli("clear errors")
self.vapi.cli("clear ipsec counters")
self.vapi.cli("clear ipsec sa")
if not n_rx:
n_rx = count
try:
send_pkts = self.gen_encrypt_pkts(p, p.scapy_tun_sa, self.tun_if,
src=p.remote_tun_if_host,
dst=self.pg1.remote_ip4,
count=count,
payload_size=payload_size)
recv_pkts = self.send_and_expect(self.tun_if, send_pkts, self.pg1)
self.verify_decrypted(p, recv_pkts)
send_pkts = self.gen_pkts(self.pg1, src=self.pg1.remote_ip4,
dst=p.remote_tun_if_host, count=count,
payload_size=payload_size)
recv_pkts = self.send_and_expect(self.pg1, send_pkts,
self.tun_if, n_rx)
self.verify_encrypted(p, p.vpp_tun_sa, recv_pkts)
for rx in recv_pkts:
self.assertEqual(rx[IP].src, p.tun_src)
self.assertEqual(rx[IP].dst, p.tun_dst)
finally:
self.logger.info(self.vapi.ppcli("show error"))
self.logger.info(self.vapi.ppcli("show ipsec all"))
self.logger.info(self.vapi.ppcli("show ipsec sa 0"))
self.logger.info(self.vapi.ppcli("show ipsec sa 4"))
self.verify_counters4(p, count, n_rx)
def verify_tun_dropped_44(self, p, count=1, payload_size=64, n_rx=None):
self.vapi.cli("clear errors")
if not n_rx:
n_rx = count
try:
send_pkts = self.gen_encrypt_pkts(p, p.scapy_tun_sa, self.tun_if,
src=p.remote_tun_if_host,
dst=self.pg1.remote_ip4,
count=count)
self.send_and_assert_no_replies(self.tun_if, send_pkts)
send_pkts = self.gen_pkts(self.pg1, src=self.pg1.remote_ip4,
dst=p.remote_tun_if_host, count=count,
payload_size=payload_size)
self.send_and_assert_no_replies(self.pg1, send_pkts)
finally:
self.logger.info(self.vapi.ppcli("show error"))
self.logger.info(self.vapi.ppcli("show ipsec all"))
def verify_tun_reass_44(self, p):
self.vapi.cli("clear errors")
self.vapi.ip_reassembly_enable_disable(
sw_if_index=self.tun_if.sw_if_index, enable_ip4=True)
try:
send_pkts = self.gen_encrypt_pkts(p, p.scapy_tun_sa, self.tun_if,
src=p.remote_tun_if_host,
dst=self.pg1.remote_ip4,
payload_size=1900,
count=1)
send_pkts = fragment_rfc791(send_pkts[0], 1400)
recv_pkts = self.send_and_expect(self.tun_if, send_pkts,
self.pg1, n_rx=1)
self.verify_decrypted(p, recv_pkts)
send_pkts = self.gen_pkts(self.pg1, src=self.pg1.remote_ip4,
dst=p.remote_tun_if_host, count=1)
recv_pkts = self.send_and_expect(self.pg1, send_pkts,
self.tun_if)
self.verify_encrypted(p, p.vpp_tun_sa, recv_pkts)
finally:
self.logger.info(self.vapi.ppcli("show error"))
self.logger.info(self.vapi.ppcli("show ipsec all"))
self.verify_counters4(p, 1, 1)
self.vapi.ip_reassembly_enable_disable(
sw_if_index=self.tun_if.sw_if_index, enable_ip4=False)
def verify_tun_64(self, p, count=1):
self.vapi.cli("clear errors")
self.vapi.cli("clear ipsec sa")
try:
send_pkts = self.gen_encrypt_pkts6(p, p.scapy_tun_sa, self.tun_if,
src=p.remote_tun_if_host6,
dst=self.pg1.remote_ip6,
count=count)
recv_pkts = self.send_and_expect(self.tun_if, send_pkts, self.pg1)
for recv_pkt in recv_pkts:
self.assert_equal(recv_pkt[IPv6].src, p.remote_tun_if_host6)
self.assert_equal(recv_pkt[IPv6].dst, self.pg1.remote_ip6)
self.assert_packet_checksums_valid(recv_pkt)
send_pkts = self.gen_pkts6(p, self.pg1, src=self.pg1.remote_ip6,
dst=p.remote_tun_if_host6, count=count)
recv_pkts = self.send_and_expect(self.pg1, send_pkts, self.tun_if)
for recv_pkt in recv_pkts:
try:
decrypt_pkt = p.vpp_tun_sa.decrypt(recv_pkt[IP])
if not decrypt_pkt.haslayer(IPv6):
decrypt_pkt = IPv6(decrypt_pkt[Raw].load)
self.assert_equal(decrypt_pkt.src, self.pg1.remote_ip6)
self.assert_equal(decrypt_pkt.dst, p.remote_tun_if_host6)
self.assert_packet_checksums_valid(decrypt_pkt)
except:
self.logger.error(ppp("Unexpected packet:", recv_pkt))
try:
self.logger.debug(
ppp("Decrypted packet:", decrypt_pkt))
except:
pass
raise
finally:
self.logger.info(self.vapi.ppcli("show error"))
self.logger.info(self.vapi.ppcli("show ipsec all"))
self.verify_counters4(p, count)
def verify_keepalive(self, p):
# the sizeof Raw is calculated to pad to the minimum ehternet
# frame size of 64 btyes
pkt = (Ether(src=self.tun_if.remote_mac, dst=self.tun_if.local_mac) /
IP(src=p.remote_tun_if_host, dst=self.tun_if.local_ip4) /
UDP(sport=333, dport=4500) /
Raw(b'\xff') /
Padding(0 * 21))
self.send_and_assert_no_replies(self.tun_if, pkt*31)
self.assert_error_counter_equal(
'/err/%s/NAT Keepalive' % self.tun4_input_node, 31)
pkt = (Ether(src=self.tun_if.remote_mac, dst=self.tun_if.local_mac) /
IP(src=p.remote_tun_if_host, dst=self.tun_if.local_ip4) /
UDP(sport=333, dport=4500) /
Raw(b'\xfe'))
self.send_and_assert_no_replies(self.tun_if, pkt*31)
self.assert_error_counter_equal(
'/err/%s/Too Short' % self.tun4_input_node, 31)
pkt = (Ether(src=self.tun_if.remote_mac, dst=self.tun_if.local_mac) /
IP(src=p.remote_tun_if_host, dst=self.tun_if.local_ip4) /
UDP(sport=333, dport=4500) /
Raw(b'\xfe') /
Padding(0 * 21))
self.send_and_assert_no_replies(self.tun_if, pkt*31)
self.assert_error_counter_equal(
'/err/%s/Too Short' % self.tun4_input_node, 62)
class IpsecTun4Tests(IpsecTun4):
""" UT test methods for Tunnel v4 """
def test_tun_basic44(self):
""" ipsec 4o4 tunnel basic test """
self.verify_tun_44(self.params[socket.AF_INET], count=1)
self.tun_if.admin_down()
self.tun_if.resolve_arp()
self.tun_if.admin_up()
self.verify_tun_44(self.params[socket.AF_INET], count=1)
def test_tun_reass_basic44(self):
""" ipsec 4o4 tunnel basic reassembly test """
self.verify_tun_reass_44(self.params[socket.AF_INET])
def test_tun_burst44(self):
""" ipsec 4o4 tunnel burst test """
self.verify_tun_44(self.params[socket.AF_INET], count=127)
class IpsecTun6(object):
""" verify methods for Tunnel v6 """
def verify_counters6(self, p_in, p_out, count, worker=None):
if (hasattr(p_in, "tun_sa_in")):
pkts = p_in.tun_sa_in.get_stats(worker)['packets']
self.assertEqual(pkts, count,
"incorrect SA in counts: expected %d != %d" %
(count, pkts))
if (hasattr(p_out, "tun_sa_out")):
pkts = p_out.tun_sa_out.get_stats(worker)['packets']
self.assertEqual(pkts, count,
"incorrect SA out counts: expected %d != %d" %
(count, pkts))
self.assert_packet_counter_equal(self.tun6_encrypt_node_name, count)
self.assert_packet_counter_equal(self.tun6_decrypt_node_name[0], count)
def verify_decrypted6(self, p, rxs):
for rx in rxs:
self.assert_equal(rx[IPv6].src, p.remote_tun_if_host)
self.assert_equal(rx[IPv6].dst, self.pg1.remote_ip6)
self.assert_packet_checksums_valid(rx)
def verify_encrypted6(self, p, sa, rxs):
for rx in rxs:
self.assert_packet_checksums_valid(rx)
self.assertEqual(len(rx) - len(Ether()) - len(IPv6()),
rx[IPv6].plen)
self.assert_equal(rx[IPv6].hlim, p.outer_hop_limit)
if p.outer_flow_label:
self.assert_equal(rx[IPv6].fl, p.outer_flow_label)
try:
decrypt_pkt = p.vpp_tun_sa.decrypt(rx[IPv6])
if not decrypt_pkt.haslayer(IPv6):
decrypt_pkt = IPv6(decrypt_pkt[Raw].load)
self.assert_packet_checksums_valid(decrypt_pkt)
self.assert_equal(decrypt_pkt.src, self.pg1.remote_ip6)
self.assert_equal(decrypt_pkt.dst, p.remote_tun_if_host)
self.assert_equal(decrypt_pkt.hlim, p.inner_hop_limit - 1)
self.assert_equal(decrypt_pkt.fl, p.inner_flow_label)
except:
self.logger.debug(ppp("Unexpected packet:", rx))
try:
self.logger.debug(ppp("Decrypted packet:", decrypt_pkt))
except:
pass
raise
def verify_drop_tun_tx_66(self, p_in, count=1, payload_size=64):
self.vapi.cli("clear errors")
self.vapi.cli("clear ipsec sa")
send_pkts = self.gen_pkts6(p_in, self.pg1, src=self.pg1.remote_ip6,
dst=p_in.remote_tun_if_host, count=count,
payload_size=payload_size)
self.send_and_assert_no_replies(self.tun_if, send_pkts)
self.logger.info(self.vapi.cli("sh punt stats"))
def verify_drop_tun_rx_66(self, p_in, count=1, payload_size=64):
self.vapi.cli("clear errors")
self.vapi.cli("clear ipsec sa")
send_pkts = self.gen_encrypt_pkts6(p_in, p_in.scapy_tun_sa,
self.tun_if,
src=p_in.remote_tun_if_host,
dst=self.pg1.remote_ip6,
count=count)
self.send_and_assert_no_replies(self.tun_if, send_pkts)
def verify_drop_tun_66(self, p_in, count=1, payload_size=64):
self.verify_drop_tun_tx_66(p_in, count=count,
payload_size=payload_size)
self.verify_drop_tun_rx_66(p_in, count=count,
payload_size=payload_size)
def verify_tun_66(self, p_in, p_out=None, count=1, payload_size=64):
self.vapi.cli("clear errors")
self.vapi.cli("clear ipsec sa")
if not p_out:
p_out = p_in
try:
send_pkts = self.gen_encrypt_pkts6(p_in, p_in.scapy_tun_sa,
self.tun_if,
src=p_in.remote_tun_if_host,
dst=self.pg1.remote_ip6,
count=count,
payload_size=payload_size)
recv_pkts = self.send_and_expect(self.tun_if, send_pkts, self.pg1)
self.verify_decrypted6(p_in, recv_pkts)
send_pkts = self.gen_pkts6(p_in, self.pg1, src=self.pg1.remote_ip6,
dst=p_out.remote_tun_if_host,
count=count,
payload_size=payload_size)
recv_pkts = self.send_and_expect(self.pg1, send_pkts, self.tun_if)
self.verify_encrypted6(p_out, p_out.vpp_tun_sa, recv_pkts)
for rx in recv_pkts:
self.assertEqual(rx[IPv6].src, p_out.tun_src)
self.assertEqual(rx[IPv6].dst, p_out.tun_dst)
finally:
self.logger.info(self.vapi.ppcli("show error"))
self.logger.info(self.vapi.ppcli("show ipsec all"))
self.verify_counters6(p_in, p_out, count)
def verify_tun_reass_66(self, p):
self.vapi.cli("clear errors")
self.vapi.ip_reassembly_enable_disable(
sw_if_index=self.tun_if.sw_if_index, enable_ip6=True)
try:
send_pkts = self.gen_encrypt_pkts6(p, p.scapy_tun_sa, self.tun_if,
src=p.remote_tun_if_host,
dst=self.pg1.remote_ip6,
count=1,
payload_size=1850)
send_pkts = fragment_rfc8200(send_pkts[0], 1, 1400, self.logger)
recv_pkts = self.send_and_expect(self.tun_if, send_pkts,
self.pg1, n_rx=1)
self.verify_decrypted6(p, recv_pkts)
send_pkts = self.gen_pkts6(p, self.pg1, src=self.pg1.remote_ip6,
dst=p.remote_tun_if_host,
count=1,
payload_size=64)
recv_pkts = self.send_and_expect(self.pg1, send_pkts,
self.tun_if)
self.verify_encrypted6(p, p.vpp_tun_sa, recv_pkts)
finally:
self.logger.info(self.vapi.ppcli("show error"))
self.logger.info(self.vapi.ppcli("show ipsec all"))
self.verify_counters6(p, p, 1)
self.vapi.ip_reassembly_enable_disable(
sw_if_index=self.tun_if.sw_if_index, enable_ip6=False)
def verify_tun_46(self, p, count=1):
""" ipsec 4o6 tunnel basic test """
self.vapi.cli("clear errors")
self.vapi.cli("clear ipsec sa")
try:
send_pkts = self.gen_encrypt_pkts(p, p.scapy_tun_sa, self.tun_if,
src=p.remote_tun_if_host4,
dst=self.pg1.remote_ip4,
count=count)
recv_pkts = self.send_and_expect(self.tun_if, send_pkts, self.pg1)
for recv_pkt in recv_pkts:
self.assert_equal(recv_pkt[IP].src, p.remote_tun_if_host4)
self.assert_equal(recv_pkt[IP].dst, self.pg1.remote_ip4)
self.assert_packet_checksums_valid(recv_pkt)
send_pkts = self.gen_pkts(self.pg1, src=self.pg1.remote_ip4,
dst=p.remote_tun_if_host4,
count=count)
recv_pkts = self.send_and_expect(self.pg1, send_pkts, self.tun_if)
for recv_pkt in recv_pkts:
try:
decrypt_pkt = p.vpp_tun_sa.decrypt(recv_pkt[IPv6])
if not decrypt_pkt.haslayer(IP):
decrypt_pkt = IP(decrypt_pkt[Raw].load)
self.assert_equal(decrypt_pkt.src, self.pg1.remote_ip4)
self.assert_equal(decrypt_pkt.dst, p.remote_tun_if_host4)
self.assert_packet_checksums_valid(decrypt_pkt)
except:
self.logger.debug(ppp("Unexpected packet:", recv_pkt))
try:
self.logger.debug(ppp("Decrypted packet:",
decrypt_pkt))
except:
pass
raise
finally:
self.logger.info(self.vapi.ppcli("show error"))
self.logger.info(self.vapi.ppcli("show ipsec all"))
self.verify_counters6(p, p, count)
class IpsecTun6Tests(IpsecTun6):
""" UT test methods for Tunnel v6 """
def test_tun_basic66(self):
""" ipsec 6o6 tunnel basic test """
self.verify_tun_66(self.params[socket.AF_INET6], count=1)
def test_tun_reass_basic66(self):
""" ipsec 6o6 tunnel basic reassembly test """
self.verify_tun_reass_66(self.params[socket.AF_INET6])
def test_tun_burst66(self):
""" ipsec 6o6 tunnel burst test """
self.verify_tun_66(self.params[socket.AF_INET6], count=257)
class IpsecTun6HandoffTests(IpsecTun6):
""" UT test methods for Tunnel v6 with multiple workers """
vpp_worker_count = 2
def test_tun_handoff_66(self):
""" ipsec 6o6 tunnel worker hand-off test """
self.vapi.cli("clear errors")
self.vapi.cli("clear ipsec sa")
N_PKTS = 15
p = self.params[socket.AF_INET6]
# inject alternately on worker 0 and 1. all counts on the SA
# should be against worker 0
for worker in [0, 1, 0, 1]:
send_pkts = self.gen_encrypt_pkts6(p, p.scapy_tun_sa, self.tun_if,
src=p.remote_tun_if_host,
dst=self.pg1.remote_ip6,
count=N_PKTS)
recv_pkts = self.send_and_expect(self.tun_if, send_pkts,
self.pg1, worker=worker)
self.verify_decrypted6(p, recv_pkts)
send_pkts = self.gen_pkts6(p, self.pg1, src=self.pg1.remote_ip6,
dst=p.remote_tun_if_host,
count=N_PKTS)
recv_pkts = self.send_and_expect(self.pg1, send_pkts,
self.tun_if, worker=worker)
self.verify_encrypted6(p, p.vpp_tun_sa, recv_pkts)
# all counts against the first worker that was used
self.verify_counters6(p, p, 4*N_PKTS, worker=0)
class IpsecTun4HandoffTests(IpsecTun4):
""" UT test methods for Tunnel v4 with multiple workers """
vpp_worker_count = 2
def test_tun_handooff_44(self):
""" ipsec 4o4 tunnel worker hand-off test """
self.vapi.cli("clear errors")
self.vapi.cli("clear ipsec sa")
N_PKTS = 15
p = self.params[socket.AF_INET]
# inject alternately on worker 0 and 1. all counts on the SA
# should be against worker 0
for worker in [0, 1, 0, 1]:
send_pkts = self.gen_encrypt_pkts(p, p.scapy_tun_sa, self.tun_if,
src=p.remote_tun_if_host,
dst=self.pg1.remote_ip4,
count=N_PKTS)
recv_pkts = self.send_and_expect(self.tun_if, send_pkts,
self.pg1, worker=worker)
self.verify_decrypted(p, recv_pkts)
send_pkts = self.gen_pkts(self.pg1, src=self.pg1.remote_ip4,
dst=p.remote_tun_if_host,
count=N_PKTS)
recv_pkts = self.send_and_expect(self.pg1, send_pkts,
self.tun_if, worker=worker)
self.verify_encrypted(p, p.vpp_tun_sa, recv_pkts)
# all counts against the first worker that was used
self.verify_counters4(p, 4*N_PKTS, worker=0)
class IpsecTun46Tests(IpsecTun4Tests, IpsecTun6Tests):
""" UT test methods for Tunnel v6 & v4 """
pass
class IPSecIPv4Fwd(VppTestCase):
""" Test IPSec by capturing and verifying IPv4 forwarded pkts """
@classmethod
def setUpConstants(cls):
super(IPSecIPv4Fwd, cls).setUpConstants()
def setUp(self):
super(IPSecIPv4Fwd, self).setUp()
# store SPD objects so we can remove configs on tear down
self.spd_objs = []
self.spd_policies = []
def tearDown(self):
# remove SPD policies
for obj in self.spd_policies:
obj.remove_vpp_config()
self.spd_policies = []
# remove SPD items (interface bindings first, then SPD)
for obj in reversed(self.spd_objs):
obj.remove_vpp_config()
self.spd_objs = []
# close down pg intfs
for pg in self.pg_interfaces:
pg.unconfig_ip4()
pg.admin_down()
super(IPSecIPv4Fwd, self).tearDown()
def create_interfaces(self, num_ifs=2):
# create interfaces pg0 ... pg<num_ifs>
self.create_pg_interfaces(range(num_ifs))
for pg in self.pg_interfaces:
# put the interface up
pg.admin_up()
# configure IPv4 address on the interface
pg.config_ip4()
# resolve ARP, so that we know VPP MAC
pg.resolve_arp()
self.logger.info(self.vapi.ppcli("show int addr"))
def spd_create_and_intf_add(self, spd_id, pg_list):
spd = VppIpsecSpd(self, spd_id)
spd.add_vpp_config()
self.spd_objs.append(spd)
for pg in pg_list:
spdItf = VppIpsecSpdItfBinding(self, spd, pg)
spdItf.add_vpp_config()
self.spd_objs.append(spdItf)
def get_policy(self, policy_type):
e = VppEnum.vl_api_ipsec_spd_action_t
if policy_type == "protect":
return e.IPSEC_API_SPD_ACTION_PROTECT
elif policy_type == "bypass":
return e.IPSEC_API_SPD_ACTION_BYPASS
elif policy_type == "discard":
return e.IPSEC_API_SPD_ACTION_DISCARD
else:
raise Exception("Invalid policy type: %s", policy_type)
def spd_add_rem_policy(self, spd_id, src_if, dst_if,
proto, is_out, priority, policy_type,
remove=False, all_ips=False):
spd = VppIpsecSpd(self, spd_id)
if all_ips:
src_range_low = ip_address("0.0.0.0")
src_range_high = ip_address("255.255.255.255")
dst_range_low = ip_address("0.0.0.0")
dst_range_high = ip_address("255.255.255.255")
else:
src_range_low = src_if.remote_ip4
src_range_high = src_if.remote_ip4
dst_range_low = dst_if.remote_ip4
dst_range_high = dst_if.remote_ip4
spdEntry = VppIpsecSpdEntry(self, spd, 0,
src_range_low,
src_range_high,
dst_range_low,
dst_range_high,
proto,
priority=priority,
policy=self.get_policy(policy_type),
is_outbound=is_out)
if(remove is False):
spdEntry.add_vpp_config()
self.spd_policies.append(spdEntry)
else:
spdEntry.remove_vpp_config()
self.spd_policies.remove(spdEntry)
self.logger.info(self.vapi.ppcli("show ipsec all"))
return spdEntry
def create_stream(self, src_if, dst_if, pkt_count,
src_prt=1234, dst_prt=5678):
packets = []
for i in range(pkt_count):
# create packet info stored in the test case instance
info = self.create_packet_info(src_if, dst_if)
# convert the info into packet payload
payload = self.info_to_payload(info)
# create the packet itself
p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
IP(src=src_if.remote_ip4, dst=dst_if.remote_ip4) /
UDP(sport=src_prt, dport=dst_prt) /
Raw(payload))
# store a copy of the packet in the packet info
info.data = p.copy()
# append the packet to the list
packets.append(p)
# return the created packet list
return packets
def verify_capture(self, src_if, dst_if, capture):
packet_info = None
for packet in capture:
try:
ip = packet[IP]
udp = packet[UDP]
# convert the payload to packet info object
payload_info = self.payload_to_info(packet)
# make sure the indexes match
self.assert_equal(payload_info.src, src_if.sw_if_index,
"source sw_if_index")
self.assert_equal(payload_info.dst, dst_if.sw_if_index,
"destination sw_if_index")
packet_info = self.get_next_packet_info_for_interface2(
src_if.sw_if_index,
dst_if.sw_if_index,
packet_info)
# make sure we didn't run out of saved packets
self.assertIsNotNone(packet_info)
self.assert_equal(payload_info.index, packet_info.index,
"packet info index")
saved_packet = packet_info.data # fetch the saved packet
# assert the values match
self.assert_equal(ip.src, saved_packet[IP].src,
"IP source address")
# ... more assertions here
self.assert_equal(udp.sport, saved_packet[UDP].sport,
"UDP source port")
except Exception as e:
self.logger.error(ppp("Unexpected or invalid packet:",
packet))
raise
remaining_packet = self.get_next_packet_info_for_interface2(
src_if.sw_if_index,
dst_if.sw_if_index,
packet_info)
self.assertIsNone(remaining_packet,
"Interface %s: Packet expected from interface "
"%s didn't arrive" % (dst_if.name, src_if.name))
def verify_policy_match(self, pkt_count, spdEntry):
self.logger.info(
"XXXX %s %s", str(spdEntry), str(spdEntry.get_stats()))
matched_pkts = spdEntry.get_stats().get('packets')
self.logger.info(
"Policy %s matched: %d pkts", str(spdEntry), matched_pkts)
self.assert_equal(pkt_count, matched_pkts)
class SpdFlowCacheTemplate(IPSecIPv4Fwd):
@classmethod
def setUpConstants(cls):
super(SpdFlowCacheTemplate, cls).setUpConstants()
# Override this method with required cmdline parameters e.g.
# cls.vpp_cmdline.extend(["ipsec", "{",
# "ipv4-outbound-spd-flow-cache on",
# "}"])
# cls.logger.info("VPP modified cmdline is %s" % " "
# .join(cls.vpp_cmdline))
def setUp(self):
super(SpdFlowCacheTemplate, self).setUp()
def tearDown(self):
super(SpdFlowCacheTemplate, self).tearDown()
def get_spd_flow_cache_entries(self):
""" 'show ipsec spd' output:
ip4-outbound-spd-flow-cache-entries: 0
"""
show_ipsec_reply = self.vapi.cli("show ipsec spd")
# match the relevant section of 'show ipsec spd' output
regex_match = re.search(
'ip4-outbound-spd-flow-cache-entries: (.*)',
show_ipsec_reply, re.DOTALL)
if regex_match is None:
raise Exception("Unable to find spd flow cache entries \
in \'show ipsec spd\' CLI output - regex failed to match")
else:
try:
num_entries = int(regex_match.group(1))
except ValueError:
raise Exception("Unable to get spd flow cache entries \
from \'show ipsec spd\' string: %s", regex_match.group(0))
self.logger.info("%s", regex_match.group(0))
return num_entries
def verify_num_outbound_flow_cache_entries(self, expected_elements):
self.assertEqual(self.get_spd_flow_cache_entries(), expected_elements)
def crc32_supported(self):
# lscpu is part of util-linux package, available on all Linux Distros
stream = os.popen('lscpu')
cpu_info = stream.read()
# feature/flag "crc32" on Aarch64 and "sse4_2" on x86
# see vppinfra/crc32.h
if "crc32" or "sse4_2" in cpu_info:
self.logger.info("\ncrc32 supported:\n" + cpu_info)
return True
else:
self.logger.info("\ncrc32 NOT supported:\n" + cpu_info)
return False
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
| |
from collections.abc import Iterable
from numbers import Integral
import subprocess
import openmc
from .plots import _get_plot_image
def _process_CLI_arguments(volume=False, geometry_debug=False, particles=None,
plot=False, restart_file=None, threads=None,
tracks=False, event_based=None,
openmc_exec='openmc', mpi_args=None):
"""Converts user-readable flags in to command-line arguments to be run with
the OpenMC executable via subprocess.
Parameters
----------
volume : bool, optional
Run in stochastic volume calculation mode. Defaults to False.
geometry_debug : bool, optional
Turn on geometry debugging during simulation. Defaults to False.
particles : int, optional
Number of particles to simulate per generation.
plot : bool, optional
Run in plotting mode. Defaults to False.
restart_file : str, optional
Path to restart file to use
threads : int, optional
Number of OpenMP threads. If OpenMC is compiled with OpenMP threading
enabled, the default is implementation-dependent but is usually equal
to the number of hardware threads available (or a value set by the
:envvar:`OMP_NUM_THREADS` environment variable).
tracks : bool, optional
Write tracks for all particles. Defaults to False.
event_based : None or bool, optional
Turns on event-based parallelism if True. If None, the value in
the Settings will be used.
openmc_exec : str, optional
Path to OpenMC executable. Defaults to 'openmc'.
mpi_args : list of str, optional
MPI execute command and any additional MPI arguments to pass,
e.g. ['mpiexec', '-n', '8'].
.. versionadded:: 0.13.0
Returns
-------
args : Iterable of str
The runtime flags converted to CLI arguments of the OpenMC executable
"""
args = [openmc_exec]
if volume:
args.append('--volume')
if isinstance(particles, Integral) and particles > 0:
args += ['-n', str(particles)]
if isinstance(threads, Integral) and threads > 0:
args += ['-s', str(threads)]
if geometry_debug:
args.append('-g')
if event_based is not None:
if event_based:
args.append('-e')
if isinstance(restart_file, str):
args += ['-r', restart_file]
if tracks:
args.append('-t')
if plot:
args.append('-p')
if mpi_args is not None:
args = mpi_args + args
return args
def _run(args, output, cwd):
# Launch a subprocess
p = subprocess.Popen(args, cwd=cwd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, universal_newlines=True)
# Capture and re-print OpenMC output in real-time
lines = []
while True:
# If OpenMC is finished, break loop
line = p.stdout.readline()
if not line and p.poll() is not None:
break
lines.append(line)
if output:
# If user requested output, print to screen
print(line, end='')
# Raise an exception if return status is non-zero
if p.returncode != 0:
# Get error message from output and simplify whitespace
output = ''.join(lines)
if 'ERROR: ' in output:
_, _, error_msg = output.partition('ERROR: ')
elif 'what()' in output:
_, _, error_msg = output.partition('what(): ')
else:
error_msg = 'OpenMC aborted unexpectedly.'
error_msg = ' '.join(error_msg.split())
raise RuntimeError(error_msg)
def plot_geometry(output=True, openmc_exec='openmc', cwd='.'):
"""Run OpenMC in plotting mode
Parameters
----------
output : bool, optional
Capture OpenMC output from standard out
openmc_exec : str, optional
Path to OpenMC executable
cwd : str, optional
Path to working directory to run in
Raises
------
RuntimeError
If the `openmc` executable returns a non-zero status
"""
_run([openmc_exec, '-p'], output, cwd)
def plot_inline(plots, openmc_exec='openmc', cwd='.'):
"""Display plots inline in a Jupyter notebook.
.. versionchanged:: 0.13.0
The *convert_exec* argument was removed since OpenMC now produces
.png images directly.
Parameters
----------
plots : Iterable of openmc.Plot
Plots to display
openmc_exec : str
Path to OpenMC executable
cwd : str, optional
Path to working directory to run in
Raises
------
RuntimeError
If the `openmc` executable returns a non-zero status
"""
from IPython.display import display
if not isinstance(plots, Iterable):
plots = [plots]
# Create plots.xml
openmc.Plots(plots).export_to_xml()
# Run OpenMC in geometry plotting mode
plot_geometry(False, openmc_exec, cwd)
if plots is not None:
images = [_get_plot_image(p) for p in plots]
display(*images)
def calculate_volumes(threads=None, output=True, cwd='.',
openmc_exec='openmc', mpi_args=None):
"""Run stochastic volume calculations in OpenMC.
This function runs OpenMC in stochastic volume calculation mode. To specify
the parameters of a volume calculation, one must first create a
:class:`openmc.VolumeCalculation` instance and assign it to
:attr:`openmc.Settings.volume_calculations`. For example:
>>> vol = openmc.VolumeCalculation(domains=[cell1, cell2], samples=100000)
>>> settings = openmc.Settings()
>>> settings.volume_calculations = [vol]
>>> settings.export_to_xml()
>>> openmc.calculate_volumes()
Parameters
----------
threads : int, optional
Number of OpenMP threads. If OpenMC is compiled with OpenMP threading
enabled, the default is implementation-dependent but is usually equal
to the number of hardware threads available (or a value set by the
:envvar:`OMP_NUM_THREADS` environment variable).
output : bool, optional
Capture OpenMC output from standard out
openmc_exec : str, optional
Path to OpenMC executable. Defaults to 'openmc'.
mpi_args : list of str, optional
MPI execute command and any additional MPI arguments to pass,
e.g. ['mpiexec', '-n', '8'].
cwd : str, optional
Path to working directory to run in. Defaults to the current working
directory.
Raises
------
RuntimeError
If the `openmc` executable returns a non-zero status
See Also
--------
openmc.VolumeCalculation
"""
args = _process_CLI_arguments(volume=True, threads=threads,
openmc_exec=openmc_exec, mpi_args=mpi_args)
_run(args, output, cwd)
def run(particles=None, threads=None, geometry_debug=False,
restart_file=None, tracks=False, output=True, cwd='.',
openmc_exec='openmc', mpi_args=None, event_based=False):
"""Run an OpenMC simulation.
Parameters
----------
particles : int, optional
Number of particles to simulate per generation.
threads : int, optional
Number of OpenMP threads. If OpenMC is compiled with OpenMP threading
enabled, the default is implementation-dependent but is usually equal
to the number of hardware threads available (or a value set by the
:envvar:`OMP_NUM_THREADS` environment variable).
geometry_debug : bool, optional
Turn on geometry debugging during simulation. Defaults to False.
restart_file : str, optional
Path to restart file to use
tracks : bool, optional
Write tracks for all particles. Defaults to False.
output : bool
Capture OpenMC output from standard out
cwd : str, optional
Path to working directory to run in. Defaults to the current working
directory.
openmc_exec : str, optional
Path to OpenMC executable. Defaults to 'openmc'.
mpi_args : list of str, optional
MPI execute command and any additional MPI arguments to pass,
e.g. ['mpiexec', '-n', '8'].
event_based : bool, optional
Turns on event-based parallelism, instead of default history-based
.. versionadded:: 0.12
Raises
------
RuntimeError
If the `openmc` executable returns a non-zero status
"""
args = _process_CLI_arguments(
volume=False, geometry_debug=geometry_debug, particles=particles,
restart_file=restart_file, threads=threads, tracks=tracks,
event_based=event_based, openmc_exec=openmc_exec, mpi_args=mpi_args)
_run(args, output, cwd)
| |
from __future__ import unicode_literals
try:
from urllib.parse import parse_qs, urlencode, urlparse, quote, unquote
except ImportError:
from urllib import urlencode, quote, unquote
from urlparse import parse_qs, urlparse
from collections import namedtuple
import six
# To minimise memory consumption, we use a namedtuple to store all instance
# variables, as well as using the __slots__ attribute.
_URLTuple = namedtuple(
"_URLTuple", "host username password scheme port path query fragment")
# Encoding helpers
def to_unicode(string):
"""
Ensure a passed string is unicode
"""
if isinstance(string, six.binary_type):
return string.decode('utf8')
if isinstance(string, six.text_type):
return string
if six.PY2:
return unicode(string)
return str(string)
def to_utf8(string):
"""
Encode a string as a UTF8 bytestring. This function could be passed a
bytestring or unicode string so must distinguish between the two.
"""
if isinstance(string, six.text_type):
return string.encode('utf8')
if isinstance(string, six.binary_type):
return string
return str(string)
def dict_to_unicode(raw_dict):
"""
Ensure all keys and values in a dict are unicode.
The passed dict is assumed to have lists for all values.
"""
decoded = {}
for key, value in raw_dict.items():
decoded[to_unicode(key)] = map(
to_unicode, value)
return decoded
def unicode_quote(string, safe='/'):
if string is None:
return None
return quote(to_utf8(string), to_utf8(safe))
def unicode_unquote(string):
if string is None:
return None
if six.PY3:
return unquote(string)
return to_unicode(unquote(to_utf8(string)))
def unicode_urlencode(query, doseq=True):
"""
Custom wrapper around urlencode to support unicode
Python urlencode doesn't handle unicode well so we need to convert to
bytestrings before using it:
http://stackoverflow.com/questions/6480723/urllib-urlencode-doesnt-like-unicode-values-how-about-this-workaround
"""
pairs = []
for key, value in query.items():
if isinstance(value, list):
value = list(map(to_utf8, value))
else:
value = to_utf8(value)
pairs.append((to_utf8(key), value))
encoded_query = dict(pairs)
xx = urlencode(encoded_query, doseq)
return xx
def parse(url_str):
"""
Extract all parts from a URL string and return them as a dictionary
"""
url_str = to_unicode(url_str)
result = urlparse(url_str)
netloc_parts = result.netloc.split('@')
if len(netloc_parts) == 1:
username = password = None
host = netloc_parts[0]
else:
username, password = netloc_parts[0].split(':')
host = netloc_parts[1]
if host and ':' in host:
host = host.split(':')[0]
return {'host': host,
'username': username,
'password': password,
'scheme': result.scheme,
'port': result.port,
'path': result.path,
'query': result.query,
'fragment': result.fragment}
class URL(object):
"""
The constructor can be used in two ways:
1. Pass a URL string::
>>> URL('http://www.google.com/search?q=testing').as_string()
u'http://www.google.com/search?q=testing'
2. Pass keyword arguments::
>>> URL(host='www.google.com', path='/search', query='q=testing').as_string()
u'http://www.google.com/search?q=testing'
If you pass both a URL string and keyword args, then the values of keyword
args take precedence.
"""
__slots__ = ("_tuple",)
def __init__(self, url_str=None, host=None, username=None, password=None,
scheme=None, port=None, path=None, query=None, fragment=None):
if url_str is not None:
params = parse(url_str)
else:
# Defaults
params = {'scheme': 'http',
'username': None,
'password': None,
'host': None,
'port': None,
'path': '/',
'query': None,
'fragment': None}
# Kwargs override the url_str
for var in 'host username password scheme port path query fragment'.split():
if locals()[var] is not None:
params[var] = locals()[var]
# Store the various components in %-encoded form
self._tuple = _URLTuple(params['host'],
unicode_quote(params['username']),
unicode_quote(params['password']),
params['scheme'],
params['port'],
params['path'],
params['query'],
unicode_quote(params['fragment']))
def __eq__(self, other):
return self._tuple == other._tuple
def __ne__(self, other):
return self._tuple != other._tuple
def __getstate__(self):
return tuple(self._tuple)
def __setstate__(self, state):
self._tuple = _URLTuple(*state)
def __hash__(self):
return hash(self._tuple)
def __repr__(self):
return str(self._tuple)
def __unicode__(self):
url = self._tuple
parts = ["%s://" % url.scheme if url.scheme else '',
self.netloc(),
url.path,
'?%s' % url.query if url.query else '',
'#%s' % url.fragment if url.fragment else '']
if url.host is None:
return ''.join(parts[2:])
return ''.join(parts)
__str__ = as_string = __unicode__
# Accessors / Mutators
# These use the jQuery overloading style whereby they become mutators if
# extra args are passed
def netloc(self):
"""
Return the netloc
"""
url = self._tuple
if url.username and url.password:
netloc = '%s:%s@%s' % (url.username, url.password, url.host)
else:
netloc = url.host
if url.port:
netloc = '%s:%s' % (netloc, url.port)
return netloc
def host(self, value=None):
"""
Return the host
:param string value: new host string
"""
if value:
return URL._mutate(self, host=value)
return self._tuple.host
domain = host
def username(self, value=None):
"""
Return or set the username
:param string value: the new username to use
:returns: string or new :class:`URL` instance
"""
#return unicode_unquote(self._tuple.username)
if value:
return URL._mutate(self, username=value)
return unicode_unquote(self._tuple.username)
def password(self, value=None):
"""
Return or set the password
:param string value: the new password to use
:returns: string or new :class:`URL` instance
"""
if value:
return URL._mutate(self, password=value)
return unicode_unquote(self._tuple.password)
def subdomains(self, value=None):
"""
Returns a list of subdomains or set the subdomains and returns a
new :class:`URL` instance.
:param list value: a list of subdomains
"""
if value is not None:
return URL._mutate(self, host='.'.join(value))
return self.host().split('.')
def subdomain(self, index, value=None):
"""
Return a subdomain or set a new value and return a new :class:`URL`
instance.
:param integer index: 0-indexed subdomain
:param string value: New subdomain
"""
if value is not None:
subdomains = self.subdomains()
subdomains[index] = value
return URL._mutate(self, host='.'.join(subdomains))
return self.subdomains()[index]
def scheme(self, value=None):
"""
Return or set the scheme.
:param string value: the new scheme to use
:returns: string or new :class:`URL` instance
"""
if value:
return URL._mutate(self, scheme=value)
return self._tuple.scheme
def path(self, value=None):
"""
Return or set the path
:param string value: the new path to use
:returns: string or new :class:`URL` instance
"""
if value:
if not value.startswith('/'):
value = '/' + value
encoded_value = unicode_quote(value)
return URL._mutate(self, path=encoded_value)
return self._tuple.path
def query(self, value=None):
"""
Return or set the query string
:param string value: the new query string to use
:returns: string or new :class:`URL` instance
"""
if value:
return URL._mutate(self, query=value)
return self._tuple.query
def port(self, value=None):
"""
Return or set the port
:param string value: the new port to use
:returns: string or new :class:`URL` instance
"""
if value:
return URL._mutate(self, port=value)
return self._tuple.port
def fragment(self, value=None):
"""
Return or set the fragment (hash)
:param string value: the new fragment to use
:returns: string or new :class:`URL` instance
"""
if value:
return URL._mutate(self, fragment=value)
return unicode_unquote(self._tuple.fragment)
# ====
# Path
# ====
def path_segment(self, index, value=None, default=None):
"""
Return the path segment at the given index
:param integer index:
:param string value: the new segment value
:param string default: the default value to return if no path segment exists with the given index
"""
if value is not None:
segments = list(self.path_segments())
segments[index] = value
new_path = '/' + '/'.join(segments)
if self._tuple.path.endswith('/'):
new_path += '/'
return URL._mutate(self, path=new_path)
try:
return self.path_segments()[index]
except IndexError:
return default
def path_segments(self, value=None):
"""
Return the path segments
:param list value: the new path segments to use
"""
if value is not None:
encoded_values = map(unicode_quote, value)
new_path = '/' + '/'.join(encoded_values)
return URL._mutate(self, path=new_path)
parts = self._tuple.path.split('/')
segments = parts[1:]
if self._tuple.path.endswith('/'):
segments.pop()
segments = map(unicode_unquote, segments)
return tuple(segments)
def add_path_segment(self, value):
"""
Add a new path segment to the end of the current string
:param string value: the new path segment to use
Example::
>>> u = URL('http://example.com/foo/')
>>> u.add_path_segment('bar').as_string()
u'http://example.com/foo/bar'
"""
segments = self.path_segments() + (to_unicode(value),)
return self.path_segments(segments)
# ============
# Query params
# ============
def has_query_param(self, key):
"""
Test if a given query parameter is present
:param string key: key to test for
"""
return self.query_param(key) is not None
def has_query_params(self, keys):
"""
Test if a given set of query parameters are present
:param list keys: keys to test for
"""
return all([self.has_query_param(k) for k in keys])
def query_param(self, key, value=None, default=None, as_list=False):
"""
Return or set a query parameter for the given key
The value can be a list.
:param string key: key to look for
:param string default: value to return if ``key`` isn't found
:param boolean as_list: whether to return the values as a list
:param string value: the new query parameter to use
"""
parse_result = self.query_params()
if value is not None:
# Need to ensure all strings are unicode
if isinstance(value, (list, tuple)):
value = list(map(to_unicode, value))
else:
value = to_unicode(value)
parse_result[to_unicode(key)] = value
return URL._mutate(
self, query=unicode_urlencode(parse_result, doseq=True))
try:
result = parse_result[key]
except KeyError:
return default
if as_list:
return result
return result[0] if len(result) == 1 else result
def append_query_param(self, key, value):
"""
Append a query parameter
:param string key: The query param key
:param string value: The new value
"""
values = self.query_param(key, as_list=True, default=[])
values.append(value)
return self.query_param(key, values)
def query_params(self, value=None):
"""
Return or set a dictionary of query params
:param dict value: new dictionary of values
"""
if value is not None:
ss = unicode_urlencode(value, doseq=True)
return URL._mutate(self, query=unicode_urlencode(value, doseq=True))
query = '' if self._tuple.query is None else self._tuple.query
# In Python 2.6, urlparse needs a bytestring so we encode and then
# decode the result.
if not six.PY3:
result = parse_qs(to_utf8(query), True)
return dict_to_unicode(result)
return parse_qs(query, True)
def remove_query_param(self, key, value=None):
"""
Remove a query param from a URL
Set the value parameter if removing from a list.
:param string key: The key to delete
:param string value: The value of the param to delete (of more than one)
"""
parse_result = self.query_params()
if value is not None:
index = parse_result[key].index(value)
del parse_result[key][index]
else:
del parse_result[key]
return URL._mutate(self, query=unicode_urlencode(parse_result, doseq=True))
@classmethod
def _mutate(cls, url, **kwargs):
args = url._tuple._asdict()
args.update(kwargs)
return cls(**args)
@classmethod
def from_string(cls, url_str):
"""
Factory method to create a new instance based on a passed string
This method is deprecated now
"""
return cls(url_str)
| |
# -*- coding: utf-8 -*-
"""
Script to calculate the MTF from a real image.
Based on /afs/EssentialMed/Dev/MTF.py
"""
import matplotlib.pylab as plt
import numpy as np
import os
plt.ion()
# SETUP
SelectStartPointManually = False
SelectEdgeManually = False
PolynomialOrder = 5
# Images
ImagePath = '/afs/psi.ch/project/EssentialMed/Images'
ImageDir = '11-MTF'
Camera = 'iPhone'
# Camera = 'tiscam'
# Camera = 'Elphel'
if Camera == 'iPhone':
# use iPhone images
ImageFile = 'iPhone_with_xray_film.jpg'
ImageFile = 'iPhone_with_xray_film_hdr.jpg'
ImageFile = 'iPhone_with_xray_film_window.jpg'
ImageFile = 'iPhone_with_xray_film_window_hdr.jpg'
elif Camera == 'tiscam':
# 'The imaging source' camera images from different objectives
Objective = 9 # 3,6 or 9
if Objective == 3:
ObjectiveDir = 3.6
ImageFile = 'shot0099.png' # visually the best one
elif Objective == 6:
ObjectiveDir = 6.0
ImageFile = 'shot0364.png' # visually the best one
elif Objective == 9:
ObjectiveDir = 9.6
ImageFile = 'shot0072.png' # visually the best one
Camera = Camera + '_' + str(ObjectiveDir)
elif Camera == 'Elphel':
# Elphel images
ImageFile = 'image.jpg'
else:
print 'I do not know what to do, exiting'
exit()
def rgb2gray(rgb):
'''
convert an image from rgb to grayscale
http://stackoverflow.com/a/12201744/323100
'''
return np.dot(rgb[..., :3], [0.299, 0.587, 0.144])
ImageToLoad = os.path.join(ImagePath, ImageDir, Camera, ImageFile)
ImageRGB = plt.imread(ImageToLoad)
Image = rgb2gray(ImageRGB)
plt.imshow(np.fft.fft2(Image))
# plt.imshow(Image)
plt.ioff()
plt.show()
exit()
def MTF(edgespreadfunction):
'''
Compute the modulation transfer function (MTF).
The MTF is defined as the FFT of the line spread function.
The line spread function is defined as the derivative of the edge spread
function. The edge spread function are the values along an edge, ideally a
knife-edge test target. See an explanation here: http://is.gd/uSC5Ve
'''
linespreadfunction = np.diff(edgespreadfunction)
return np.abs(np.fft.fft(linespreadfunction))
def LSF(edgespreadfunction):
'''
Compute the modulation transfer function (MTF).
The MTF is defined as the FFT of the line spread function.
The line spread function is defined as the derivative of the edge spread
function. The edge spread function are the values along an edge, ideally a
knife-edge test target. See an explanation here: http://is.gd/uSC5Ve
'''
return np.abs(np.diff(edgespreadfunction))
def polynomialfit(data, order):
'''
calculate the polynomial fit of an input for a defined degree
'''
x, y = range(len(data)), data
coefficients = np.polyfit(x, y, order)
return np.polyval(coefficients, x)
ImageToLoad = os.path.join(ImagePath, ImageDir, Camera, ImageFile)
print 'reading', ImageToLoad
# Read the image and convert it to grayscale rightaway
ImageRGB = plt.imread(ImageToLoad)
Image = rgb2gray(ImageRGB)
ImageWidth = Image.shape[0]
ImageHeight = Image.shape[1]
print 'The image we loaded is', ImageWidth, 'by', ImageHeight, \
'pixels big. That is', round(ImageWidth * ImageHeight / 1e6, 3), 'MPx.'
plt.subplot(221)
plt.imshow(ImageRGB, origin='lower')
plt.title('Pick point for drawing\n horizontal and vertical profile')
if SelectStartPointManually:
PickPoint = plt.ginput(1)
else:
if Camera == 'iPhone':
PickPoint = [[1500, 1000]]
elif Camera[:6] == 'tiscam':
# Select middle of image...
PickPoint = [[ImageHeight / 2, ImageWidth / 2]]
elif Camera == 'Elphel':
PickPoint = [[ImageHeight / 2, ImageWidth / 2]]
plt.title('Original image')
Horizon = int(PickPoint[0][1])
Vertigo = int(PickPoint[0][0])
if SelectStartPointManually:
print 'You selected horizontal line', Horizon, 'and vertical line', Vertigo
else:
print 'I selected horizontal line', Horizon, 'and vertical line', Vertigo
plt.hlines(Horizon, 0, ImageHeight, 'r')
plt.vlines(Vertigo, 0, ImageWidth, 'b')
plt.draw()
plt.subplot(223)
HorizontalProfile = Image[Horizon, :]
plt.plot(HorizontalProfile, 'r')
plt.title('Horizontal Profile')
# plt.xlim(0, ImageHeight)
# plt.ylim(0, 256)
plt.subplot(222)
VerticalProfile = Image[:, Vertigo]
plt.plot(VerticalProfile, range(ImageWidth), 'b')
# plt.xlim(0, 256)
# plt.ylim(0, ImageWidth)
plt.title('Vertical Profile')
plt.draw()
print 'The horizontal profile (red) goes from', min(HorizontalProfile), 'to',\
max(HorizontalProfile)
print 'The vertical profile (blue) goes from', min(VerticalProfile), 'to',\
max(VerticalProfile)
# Set range of the region we want to look at to 'Edgerange', about 10% of Image
# width
EdgeRange = int(round(Image.shape[0] * .05 / 10) * 10)
plt.figure(figsize=(16, 9))
plt.subplot(311)
plt.plot(VerticalProfile)
if SelectEdgeManually:
plt.title('Select approximate middle of knife edge')
EdgePosition = plt.ginput(1)
plt.title('Vertical Profile\n(zoom reguion selected manually, width = ' +
str(EdgeRange) + ' px, approx. 5% of image)')
else:
EdgePosition = [[LSF(VerticalProfile).argmax(), np.nan]]
plt.title('Vertical Profile\n(zoom region selected automatically, width ' +
'= ' + str(EdgeRange) + ' px, approx. 5% of image)')
plt.axvspan(EdgePosition[0][0] - EdgeRange, EdgePosition[0][0] + EdgeRange,
facecolor='r', alpha=0.5)
plt.subplot(312)
plt.plot(LSF(VerticalProfile))
plt.axvspan(EdgePosition[0][0] - EdgeRange, EdgePosition[0][0] + EdgeRange,
facecolor='r', alpha=0.5)
plt.title('LSF')
# plt.subplot(413)
# plt.plot(MTF(VerticalProfile))
# plt.title('MTF')
plt.subplot(3, 3, 7)
plt.plot(VerticalProfile)
plt.xlim(EdgePosition[0][0] - EdgeRange, EdgePosition[0][0] + EdgeRange)
plt.title('Zoomed Edge')
plt.subplot(3, 3, 8)
plt.plot(LSF(VerticalProfile))
plt.xlim(EdgePosition[0][0] - EdgeRange, EdgePosition[0][0] + EdgeRange)
plt.title('Zoomed LSF')
plt.subplot(3, 3, 9)
plt.plot(MTF(VerticalProfile), alpha=0.5)
plt.plot(polynomialfit(MTF(VerticalProfile), PolynomialOrder), linewidth=5)
plt.xlim(0, len(MTF(VerticalProfile)) / 2)
plt.title('MTF with polynomial fit of order ' + str(PolynomialOrder) +
'\nwith a minimum at :' +
str(round(min(polynomialfit(MTF(VerticalProfile), PolynomialOrder)),
3)))
plt.ioff()
plt.show()
| |
''' Distutils / setuptools helpers
'''
import os
import sys
from os.path import join as pjoin, split as psplit, splitext, dirname, exists
import tempfile
import shutil
from distutils.version import LooseVersion
from distutils.command.install_scripts import install_scripts
from distutils.errors import CompileError, LinkError
from distutils import log
BAT_TEMPLATE = \
r"""@echo off
REM wrapper to use shebang first line of {FNAME}
set mypath=%~dp0
set pyscript="%mypath%{FNAME}"
set /p line1=<%pyscript%
if "%line1:~0,2%" == "#!" (goto :goodstart)
echo First line of %pyscript% does not start with "#!"
exit /b 1
:goodstart
set py_exe=%line1:~2%
REM quote exe in case of spaces in path name
set py_exe="%py_exe%"
call %py_exe% %pyscript% %*
"""
# Path of file to which to write C conditional vars from build-time checks
CONFIG_H = pjoin('build', 'config.h')
# File name (no directory) to which to write Python vars from build-time checks
CONFIG_PY = '__config__.py'
# Directory to which to write libraries for building
LIB_DIR_TMP = pjoin('build', 'extra_libs')
class install_scripts_bat(install_scripts):
""" Make scripts executable on Windows
Scripts are bare file names without extension on Unix, fitting (for example)
Debian rules. They identify as python scripts with the usual ``#!`` first
line. Unix recognizes and uses this first "shebang" line, but Windows does
not. So, on Windows only we add a ``.bat`` wrapper of name
``bare_script_name.bat`` to call ``bare_script_name`` using the python
interpreter from the #! first line of the script.
Notes
-----
See discussion at
http://matthew-brett.github.com/pydagogue/installing_scripts.html and
example at git://github.com/matthew-brett/myscripter.git for more
background.
"""
def run(self):
install_scripts.run(self)
if not os.name == "nt":
return
for filepath in self.get_outputs():
# If we can find an executable name in the #! top line of the script
# file, make .bat wrapper for script.
with open(filepath, 'rt') as fobj:
first_line = fobj.readline()
if not (first_line.startswith('#!') and
'python' in first_line.lower()):
log.info("No #!python executable found, skipping .bat "
"wrapper")
continue
pth, fname = psplit(filepath)
froot, ext = splitext(fname)
bat_file = pjoin(pth, froot + '.bat')
bat_contents = BAT_TEMPLATE.replace('{FNAME}', fname)
log.info("Making %s wrapper for %s" % (bat_file, filepath))
if self.dry_run:
continue
with open(bat_file, 'wt') as fobj:
fobj.write(bat_contents)
def add_flag_checking(build_ext_class, flag_defines, top_package_dir=''):
""" Override input `build_ext_class` to check compiler `flag_defines`
Parameters
----------
build_ext_class : class
Class implementing ``distutils.command.build_ext.build_ext`` interface,
with a ``build_extensions`` method.
flag_defines : sequence
A sequence of elements, where the elements are sequences of length 4
consisting of (``compile_flags``, ``link_flags``, ``code``,
``defvar``). ``compile_flags`` is a sequence of compiler flags;
``link_flags`` is a sequence of linker flags. We
check ``compile_flags`` to see whether a C source string ``code`` will
compile, and ``link_flags`` to see whether the resulting object file
will link. If both compile and link works, we add ``compile_flags`` to
``extra_compile_args`` and ``link_flags`` to ``extra_link_args`` of
each extension when we build the extensions. If ``defvar`` is not
None, it is the name of C variable to be defined in ``build/config.h``
with 1 if the combination of (``compile_flags``, ``link_flags``,
``code``) will compile and link, 0 otherwise. If None, do not write
variable.
top_package_dir : str
String giving name of top-level package, for writing Python file
containing configuration variables. If empty, do not write this file.
Variables written are the same as the Cython variables generated via
the `flag_defines` setting.
Returns
-------
checker_class : class
A class with similar interface to
``distutils.command.build_ext.build_ext``, that adds all working
``compile_flags`` values to the ``extra_compile_args`` and working
``link_flags`` to ``extra_link_args`` attributes of extensions, before
compiling.
"""
class Checker(build_ext_class):
flag_defs = tuple(flag_defines)
def can_compile_link(self, compile_flags, link_flags, code):
cc = self.compiler
fname = 'test.c'
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
try:
os.chdir(tmpdir)
with open(fname, 'wt') as fobj:
fobj.write(code)
try:
objects = cc.compile([fname],
extra_postargs=compile_flags)
except CompileError:
return False
try:
# Link shared lib rather then executable to avoid
# http://bugs.python.org/issue4431 with MSVC 10+
cc.link_shared_lib(objects, "testlib",
extra_postargs=link_flags)
except (LinkError, TypeError):
return False
finally:
os.chdir(cwd)
shutil.rmtree(tmpdir)
return True
def build_extensions(self):
""" Hook into extension building to check compiler flags """
def_vars = []
good_compile_flags = []
good_link_flags = []
config_dir = dirname(CONFIG_H)
for compile_flags, link_flags, code, def_var in self.flag_defs:
compile_flags = list(compile_flags)
link_flags = list(link_flags)
flags_good = self.can_compile_link(compile_flags,
link_flags,
code)
if def_var:
def_vars.append((def_var, flags_good))
if flags_good:
good_compile_flags += compile_flags
good_link_flags += link_flags
else:
log.warn("Flags {0} omitted because of compile or link "
"error".format(compile_flags + link_flags))
if def_vars: # write config.h file
if not exists(config_dir):
self.mkpath(config_dir)
with open(CONFIG_H, 'wt') as fobj:
fobj.write('/* Automatically generated; do not edit\n')
fobj.write(' C defines from build-time checks */\n')
for v_name, v_value in def_vars:
fobj.write('int {0} = {1};\n'.format(
v_name, 1 if v_value else 0))
if def_vars and top_package_dir: # write __config__.py file
config_py_dir = (top_package_dir if self.inplace else
pjoin(self.build_lib, top_package_dir))
if not exists(config_py_dir):
self.mkpath(config_py_dir)
config_py = pjoin(config_py_dir, CONFIG_PY)
with open(config_py, 'wt') as fobj:
fobj.write('# Automatically generated; do not edit\n')
fobj.write('# Variables from compile checks\n')
for v_name, v_value in def_vars:
fobj.write('{0} = {1}\n'.format(v_name, v_value))
if def_vars or good_compile_flags or good_link_flags:
for ext in self.extensions:
ext.extra_compile_args += good_compile_flags
ext.extra_link_args += good_link_flags
if def_vars:
ext.include_dirs.append(config_dir)
build_ext_class.build_extensions(self)
return Checker
def get_pkg_version(pkg_name):
""" Return package version for `pkg_name` if installed
Returns
-------
pkg_version : str or None
Return None if package not importable. Return 'unknown' if standard
``__version__`` string not present. Otherwise return version string.
"""
try:
pkg = __import__(pkg_name)
except ImportError:
return None
try:
return pkg.__version__
except AttributeError:
return 'unknown'
def version_error_msg(pkg_name, found_ver, min_ver):
""" Return informative error message for version or None
"""
if found_ver is None:
return 'We need package {0}, but not importable'.format(pkg_name)
if found_ver == 'unknown':
return 'We need {0} version {1}, but cannot get version'.format(
pkg_name, min_ver)
if LooseVersion(found_ver) >= LooseVersion(min_ver):
return None
return 'We need {0} version {1}, but found version {2}'.format(
pkg_name, found_ver, min_ver)
class SetupDependency(object):
""" SetupDependency class
Parameters
----------
import_name : str
Name with which required package should be ``import``ed.
min_ver : str
Distutils version string giving minimum version for package.
req_type : {'install_requires', 'setup_requires'}, optional
Setuptools dependency type.
heavy : {False, True}, optional
If True, and package is already installed (importable), then do not add
to the setuptools dependency lists. This prevents setuptools
reinstalling big packages when the package was installed without using
setuptools, or this is an upgrade, and we want to avoid the pip default
behavior of upgrading all dependencies.
install_name : str, optional
Name identifying package to install from pypi etc, if different from
`import_name`.
"""
def __init__(self, import_name,
min_ver,
req_type='install_requires',
heavy=False,
install_name=None):
self.import_name = import_name
self.min_ver = min_ver
self.req_type = req_type
self.heavy = heavy
self.install_name = (import_name if install_name is None
else install_name)
def check_fill(self, setuptools_kwargs):
""" Process this dependency, maybe filling `setuptools_kwargs`
Run checks on this dependency. If not using setuptools, then raise
error for unmet dependencies. If using setuptools, add missing or
not-heavy dependencies to `setuptools_kwargs`.
A heavy dependency is one that is inconvenient to install
automatically, such as numpy or (particularly) scipy, matplotlib.
Parameters
----------
setuptools_kwargs : dict
Dictionary of setuptools keyword arguments that may be modified
in-place while checking dependencies.
"""
found_ver = get_pkg_version(self.import_name)
ver_err_msg = version_error_msg(self.import_name,
found_ver,
self.min_ver)
if not 'setuptools' in sys.modules:
# Not using setuptools; raise error for any unmet dependencies
if ver_err_msg is not None:
raise RuntimeError(ver_err_msg)
return
# Using setuptools; add packages to given section of
# setup/install_requires, unless it's a heavy dependency for which we
# already have an acceptable importable version.
if self.heavy and ver_err_msg is None:
return
new_req = '{0}>={1}'.format(self.import_name, self.min_ver)
old_reqs = setuptools_kwargs.get(self.req_type, [])
setuptools_kwargs[self.req_type] = old_reqs + [new_req]
class Bunch(object):
def __init__(self, vars):
for key, name in vars.items():
if key.startswith('__'):
continue
self.__dict__[key] = name
def read_vars_from(ver_file):
""" Read variables from Python text file
Parameters
----------
ver_file : str
Filename of file to read
Returns
-------
info_vars : Bunch instance
Bunch object where variables read from `ver_file` appear as
attributes
"""
# Use exec for compabibility with Python 3
ns = {}
with open(ver_file, 'rt') as fobj:
exec(fobj.read(), ns)
return Bunch(ns)
def make_np_ext_builder(build_ext_class):
""" Override input `build_ext_class` to add numpy includes to extension
This is useful to delay call of ``np.get_include`` until the extension is
being built.
Parameters
----------
build_ext_class : class
Class implementing ``distutils.command.build_ext.build_ext`` interface,
with a ``build_extensions`` method.
Returns
-------
np_build_ext_class : class
A class with similar interface to
``distutils.command.build_ext.build_ext``, that adds libraries in
``np.get_include()`` to include directories of extension.
"""
class NpExtBuilder(build_ext_class):
def build_extensions(self):
""" Hook into extension building to add np include dirs
"""
# Delay numpy import until last moment
import numpy as np
for ext in self.extensions:
ext.include_dirs.append(np.get_include())
build_ext_class.build_extensions(self)
return NpExtBuilder
| |
from test.support import verbose, TestFailed
import locale
import sys
import test.support as support
import unittest
maxsize = support.MAX_Py_ssize_t
# test string formatting operator (I am not sure if this is being tested
# elsewhere but, surely, some of the given cases are *not* tested because
# they crash python)
# test on unicode strings as well
def testformat(formatstr, args, output=None, limit=None, overflowok=False):
if verbose:
if output:
print("{!a} % {!a} =? {!a} ...".format(formatstr, args, output),
end=' ')
else:
print("{!a} % {!a} works? ...".format(formatstr, args), end=' ')
try:
result = formatstr % args
except OverflowError:
if not overflowok:
raise
if verbose:
print('overflow (this is fine)')
else:
if output and limit is None and result != output:
if verbose:
print('no')
raise AssertionError("%r %% %r == %r != %r" %
(formatstr, args, result, output))
# when 'limit' is specified, it determines how many characters
# must match exactly; lengths must always match.
# ex: limit=5, '12345678' matches '12345___'
# (mainly for floating point format tests for which an exact match
# can't be guaranteed due to rounding and representation errors)
elif output and limit is not None and (
len(result)!=len(output) or result[:limit]!=output[:limit]):
if verbose:
print('no')
print("%s %% %s == %s != %s" % \
(repr(formatstr), repr(args), repr(result), repr(output)))
else:
if verbose:
print('yes')
class FormatTest(unittest.TestCase):
def test_format(self):
testformat("%.1d", (1,), "1")
testformat("%.*d", (sys.maxsize,1), overflowok=True) # expect overflow
testformat("%.100d", (1,), '00000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000'
'00000001', overflowok=True)
testformat("%#.117x", (1,), '0x00000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000'
'0000000000000000000000000001',
overflowok=True)
testformat("%#.118x", (1,), '0x00000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000001',
overflowok=True)
testformat("%f", (1.0,), "1.000000")
# these are trying to test the limits of the internal magic-number-length
# formatting buffer, if that number changes then these tests are less
# effective
testformat("%#.*g", (109, -1.e+49/3.))
testformat("%#.*g", (110, -1.e+49/3.))
testformat("%#.*g", (110, -1.e+100/3.))
# test some ridiculously large precision, expect overflow
testformat('%12.*f', (123456, 1.0))
# check for internal overflow validation on length of precision
# these tests should no longer cause overflow in Python
# 2.7/3.1 and later.
testformat("%#.*g", (110, -1.e+100/3.))
testformat("%#.*G", (110, -1.e+100/3.))
testformat("%#.*f", (110, -1.e+100/3.))
testformat("%#.*F", (110, -1.e+100/3.))
# Formatting of integers. Overflow is not ok
testformat("%x", 10, "a")
testformat("%x", 100000000000, "174876e800")
testformat("%o", 10, "12")
testformat("%o", 100000000000, "1351035564000")
testformat("%d", 10, "10")
testformat("%d", 100000000000, "100000000000")
big = 123456789012345678901234567890
testformat("%d", big, "123456789012345678901234567890")
testformat("%d", -big, "-123456789012345678901234567890")
testformat("%5d", -big, "-123456789012345678901234567890")
testformat("%31d", -big, "-123456789012345678901234567890")
testformat("%32d", -big, " -123456789012345678901234567890")
testformat("%-32d", -big, "-123456789012345678901234567890 ")
testformat("%032d", -big, "-0123456789012345678901234567890")
testformat("%-032d", -big, "-123456789012345678901234567890 ")
testformat("%034d", -big, "-000123456789012345678901234567890")
testformat("%034d", big, "0000123456789012345678901234567890")
testformat("%0+34d", big, "+000123456789012345678901234567890")
testformat("%+34d", big, " +123456789012345678901234567890")
testformat("%34d", big, " 123456789012345678901234567890")
testformat("%.2d", big, "123456789012345678901234567890")
testformat("%.30d", big, "123456789012345678901234567890")
testformat("%.31d", big, "0123456789012345678901234567890")
testformat("%32.31d", big, " 0123456789012345678901234567890")
testformat("%d", float(big), "123456________________________", 6)
big = 0x1234567890abcdef12345 # 21 hex digits
testformat("%x", big, "1234567890abcdef12345")
testformat("%x", -big, "-1234567890abcdef12345")
testformat("%5x", -big, "-1234567890abcdef12345")
testformat("%22x", -big, "-1234567890abcdef12345")
testformat("%23x", -big, " -1234567890abcdef12345")
testformat("%-23x", -big, "-1234567890abcdef12345 ")
testformat("%023x", -big, "-01234567890abcdef12345")
testformat("%-023x", -big, "-1234567890abcdef12345 ")
testformat("%025x", -big, "-0001234567890abcdef12345")
testformat("%025x", big, "00001234567890abcdef12345")
testformat("%0+25x", big, "+0001234567890abcdef12345")
testformat("%+25x", big, " +1234567890abcdef12345")
testformat("%25x", big, " 1234567890abcdef12345")
testformat("%.2x", big, "1234567890abcdef12345")
testformat("%.21x", big, "1234567890abcdef12345")
testformat("%.22x", big, "01234567890abcdef12345")
testformat("%23.22x", big, " 01234567890abcdef12345")
testformat("%-23.22x", big, "01234567890abcdef12345 ")
testformat("%X", big, "1234567890ABCDEF12345")
testformat("%#X", big, "0X1234567890ABCDEF12345")
testformat("%#x", big, "0x1234567890abcdef12345")
testformat("%#x", -big, "-0x1234567890abcdef12345")
testformat("%#.23x", -big, "-0x001234567890abcdef12345")
testformat("%#+.23x", big, "+0x001234567890abcdef12345")
testformat("%# .23x", big, " 0x001234567890abcdef12345")
testformat("%#+.23X", big, "+0X001234567890ABCDEF12345")
testformat("%#-+.23X", big, "+0X001234567890ABCDEF12345")
testformat("%#-+26.23X", big, "+0X001234567890ABCDEF12345")
testformat("%#-+27.23X", big, "+0X001234567890ABCDEF12345 ")
testformat("%#+27.23X", big, " +0X001234567890ABCDEF12345")
# next one gets two leading zeroes from precision, and another from the
# 0 flag and the width
testformat("%#+027.23X", big, "+0X0001234567890ABCDEF12345")
# same, except no 0 flag
testformat("%#+27.23X", big, " +0X001234567890ABCDEF12345")
testformat("%x", float(big), "123456_______________", 6)
big = 0o12345670123456701234567012345670 # 32 octal digits
testformat("%o", big, "12345670123456701234567012345670")
testformat("%o", -big, "-12345670123456701234567012345670")
testformat("%5o", -big, "-12345670123456701234567012345670")
testformat("%33o", -big, "-12345670123456701234567012345670")
testformat("%34o", -big, " -12345670123456701234567012345670")
testformat("%-34o", -big, "-12345670123456701234567012345670 ")
testformat("%034o", -big, "-012345670123456701234567012345670")
testformat("%-034o", -big, "-12345670123456701234567012345670 ")
testformat("%036o", -big, "-00012345670123456701234567012345670")
testformat("%036o", big, "000012345670123456701234567012345670")
testformat("%0+36o", big, "+00012345670123456701234567012345670")
testformat("%+36o", big, " +12345670123456701234567012345670")
testformat("%36o", big, " 12345670123456701234567012345670")
testformat("%.2o", big, "12345670123456701234567012345670")
testformat("%.32o", big, "12345670123456701234567012345670")
testformat("%.33o", big, "012345670123456701234567012345670")
testformat("%34.33o", big, " 012345670123456701234567012345670")
testformat("%-34.33o", big, "012345670123456701234567012345670 ")
testformat("%o", big, "12345670123456701234567012345670")
testformat("%#o", big, "0o12345670123456701234567012345670")
testformat("%#o", -big, "-0o12345670123456701234567012345670")
testformat("%#.34o", -big, "-0o0012345670123456701234567012345670")
testformat("%#+.34o", big, "+0o0012345670123456701234567012345670")
testformat("%# .34o", big, " 0o0012345670123456701234567012345670")
testformat("%#+.34o", big, "+0o0012345670123456701234567012345670")
testformat("%#-+.34o", big, "+0o0012345670123456701234567012345670")
testformat("%#-+37.34o", big, "+0o0012345670123456701234567012345670")
testformat("%#+37.34o", big, "+0o0012345670123456701234567012345670")
# next one gets one leading zero from precision
testformat("%.33o", big, "012345670123456701234567012345670")
# base marker shouldn't change that, since "0" is redundant
testformat("%#.33o", big, "0o012345670123456701234567012345670")
# but reduce precision, and base marker should add a zero
testformat("%#.32o", big, "0o12345670123456701234567012345670")
# one leading zero from precision, and another from "0" flag & width
testformat("%034.33o", big, "0012345670123456701234567012345670")
# base marker shouldn't change that
testformat("%0#34.33o", big, "0o012345670123456701234567012345670")
testformat("%o", float(big), "123456__________________________", 6)
# Some small ints, in both Python int and flavors).
testformat("%d", 42, "42")
testformat("%d", -42, "-42")
testformat("%d", 42, "42")
testformat("%d", -42, "-42")
testformat("%d", 42.0, "42")
testformat("%#x", 1, "0x1")
testformat("%#x", 1, "0x1")
testformat("%#X", 1, "0X1")
testformat("%#X", 1, "0X1")
testformat("%#x", 1.0, "0x1")
testformat("%#o", 1, "0o1")
testformat("%#o", 1, "0o1")
testformat("%#o", 0, "0o0")
testformat("%#o", 0, "0o0")
testformat("%o", 0, "0")
testformat("%o", 0, "0")
testformat("%d", 0, "0")
testformat("%d", 0, "0")
testformat("%#x", 0, "0x0")
testformat("%#x", 0, "0x0")
testformat("%#X", 0, "0X0")
testformat("%#X", 0, "0X0")
testformat("%x", 0x42, "42")
testformat("%x", -0x42, "-42")
testformat("%x", 0x42, "42")
testformat("%x", -0x42, "-42")
testformat("%x", float(0x42), "42")
testformat("%o", 0o42, "42")
testformat("%o", -0o42, "-42")
testformat("%o", 0o42, "42")
testformat("%o", -0o42, "-42")
testformat("%o", float(0o42), "42")
testformat("%r", "\u0378", "'\\u0378'") # non printable
testformat("%a", "\u0378", "'\\u0378'") # non printable
testformat("%r", "\u0374", "'\u0374'") # printable
testformat("%a", "\u0374", "'\\u0374'") # printable
# alternate float formatting
testformat('%g', 1.1, '1.1')
testformat('%#g', 1.1, '1.10000')
# Test exception for unknown format characters
if verbose:
print('Testing exceptions')
def test_exc(formatstr, args, exception, excmsg):
try:
testformat(formatstr, args)
except exception as exc:
if str(exc) == excmsg:
if verbose:
print("yes")
else:
if verbose: print('no')
print('Unexpected ', exception, ':', repr(str(exc)))
except:
if verbose: print('no')
print('Unexpected exception')
raise
else:
raise TestFailed('did not get expected exception: %s' % excmsg)
test_exc('abc %b', 1, ValueError,
"unsupported format character 'b' (0x62) at index 5")
#test_exc(unicode('abc %\u3000','raw-unicode-escape'), 1, ValueError,
# "unsupported format character '?' (0x3000) at index 5")
test_exc('%d', '1', TypeError, "%d format: a number is required, not str")
test_exc('%g', '1', TypeError, "a float is required")
test_exc('no format', '1', TypeError,
"not all arguments converted during string formatting")
test_exc('no format', '1', TypeError,
"not all arguments converted during string formatting")
if maxsize == 2**31-1:
# crashes 2.2.1 and earlier:
try:
"%*d"%(maxsize, -127)
except MemoryError:
pass
else:
raise TestFailed('"%*d"%(maxsize, -127) should fail')
def test_non_ascii(self):
testformat("\u20ac=%f", (1.0,), "\u20ac=1.000000")
self.assertEqual(format("abc", "\u2007<5"), "abc\u2007\u2007")
self.assertEqual(format(123, "\u2007<5"), "123\u2007\u2007")
self.assertEqual(format(12.3, "\u2007<6"), "12.3\u2007\u2007")
self.assertEqual(format(0j, "\u2007<4"), "0j\u2007\u2007")
self.assertEqual(format(1+2j, "\u2007<8"), "(1+2j)\u2007\u2007")
self.assertEqual(format("abc", "\u2007>5"), "\u2007\u2007abc")
self.assertEqual(format(123, "\u2007>5"), "\u2007\u2007123")
self.assertEqual(format(12.3, "\u2007>6"), "\u2007\u200712.3")
self.assertEqual(format(1+2j, "\u2007>8"), "\u2007\u2007(1+2j)")
self.assertEqual(format(0j, "\u2007>4"), "\u2007\u20070j")
self.assertEqual(format("abc", "\u2007^5"), "\u2007abc\u2007")
self.assertEqual(format(123, "\u2007^5"), "\u2007123\u2007")
self.assertEqual(format(12.3, "\u2007^6"), "\u200712.3\u2007")
self.assertEqual(format(1+2j, "\u2007^8"), "\u2007(1+2j)\u2007")
self.assertEqual(format(0j, "\u2007^4"), "\u20070j\u2007")
def test_locale(self):
try:
oldloc = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, '')
except locale.Error as err:
self.skipTest("Cannot set locale: {}".format(err))
try:
localeconv = locale.localeconv()
sep = localeconv['thousands_sep']
point = localeconv['decimal_point']
text = format(123456789, "n")
self.assertIn(sep, text)
self.assertEqual(text.replace(sep, ''), '123456789')
text = format(1234.5, "n")
self.assertIn(sep, text)
self.assertIn(point, text)
self.assertEqual(text.replace(sep, ''), '1234' + point + '5')
finally:
locale.setlocale(locale.LC_ALL, oldloc)
def test_main():
support.run_unittest(FormatTest)
def test_precision(self):
f = 1.2
self.assertEqual(format(f, ".0f"), "1")
self.assertEqual(format(f, ".3f"), "1.200")
with self.assertRaises(ValueError) as cm:
format(f, ".%sf" % (sys.maxsize + 1))
self.assertEqual(str(cm.exception), "precision too big")
c = complex(f)
self.assertEqual(format(c, ".0f"), "1+0j")
self.assertEqual(format(c, ".3f"), "1.200+0.000j")
with self.assertRaises(ValueError) as cm:
format(c, ".%sf" % (sys.maxsize + 1))
self.assertEqual(str(cm.exception), "precision too big")
@support.cpython_only
def test_precision_c_limits(self):
from _testcapi import INT_MAX
f = 1.2
with self.assertRaises(ValueError) as cm:
format(f, ".%sf" % (INT_MAX + 1))
c = complex(f)
with self.assertRaises(ValueError) as cm:
format(c, ".%sf" % (INT_MAX + 1))
if __name__ == "__main__":
unittest.main()
| |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid.core as core
import paddle
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
paddle.enable_static()
class TestGumbelSoftmaxOp(OpTest):
def init_attrs(self):
self.shape = [20, 10]
self.attrs = {"hard": True, "axis": -1}
self.count_expected = 20
self.dtype = "float64"
def verify_output(self, outs):
out_np = np.array(outs[0])
out_np.shape = self.shape
self.assertTrue(list(out_np.shape) == self.shape)
self.assertEqual(out_np.sum(), self.count_expected)
def setUp(self):
self.op_type = "gumbel_softmax"
self.init_attrs()
np.random.seed(0)
x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
out = np.zeros(self.shape).astype(self.dtype)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output_customized(self.verify_output)
def test_check_grad(self):
self.check_grad(["X"], "Out")
class TestGumbelSoftmaxOp2(TestGumbelSoftmaxOp):
def init_attrs(self):
self.shape = [20, 10]
self.attrs = {"hard": True, "axis": 0}
self.count_expected = 10
self.dtype = "float64"
class TestGumbelSoftmaxOp3(TestGumbelSoftmaxOp):
def init_attrs(self):
self.shape = [100]
self.attrs = {"hard": True, "axis": -1}
self.count_expected = 1
self.dtype = "float64"
class TestGumbelSoftmaxOp4(TestGumbelSoftmaxOp):
def init_attrs(self):
self.shape = [20, 10, 5]
self.attrs = {"hard": True, "axis": -1}
self.count_expected = 200
self.dtype = "float64"
class TestGumbelSoftmaxOp5(TestGumbelSoftmaxOp):
def init_attrs(self):
self.shape = [20, 10, 5]
self.attrs = {"hard": True, "axis": 1}
self.count_expected = 100
self.dtype = "float64"
class TestGumbelSoftmaxOpSampleDistribution(OpTest):
def softmax(self, x):
x_row_max = x.max(axis=-1)
x_row_max = x_row_max.reshape(list(x.shape)[:-1] + [1])
x = x - x_row_max
x_exp = np.exp(x)
x_exp_row_sum = x_exp.sum(axis=-1).reshape(list(x.shape)[:-1] + [1])
softmax = x_exp / x_exp_row_sum
return softmax
def init_attrs(self):
self.shape = [100, 3]
self.attrs = {"hard": True, "axis": -1}
self.counts = np.zeros(self.shape).astype(self.dtype)
self._cpu_only = True
def accumulate_output(self, outs):
out_np = np.array(outs)
out_np = out_np.reshape(self.shape)
self.counts = np.sum(out_np, axis=0)
def setUp(self):
self.op_type = "gumbel_softmax"
self.init_attrs()
single_x = np.array([0.2, 0.3, 0.5])
batch_x = np.ones(self.shape) * single_x
out = np.zeros(self.shape).astype(self.dtype)
self.probs = self.softmax(single_x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(batch_x)}
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output_customized(self.accumulate_output)
# Experiment should result in batch num .
self.assertEqual(self.counts.sum(), self.shape[0])
# Treat the probability from softmax as
# the probability of binomial distribution.
# Samples from gumbel softmax meet this binomial distribution.
# Construct statistics z for samples and
# z is approximately N(0,1) for unbiased count
expected = self.probs * self.shape[0]
z = (self.counts - expected) / np.sqrt((expected * (1 - self.probs)))
# A (lazy) approximate 99% two-sided test:
# occurs with prob alpha~>=0.01 if unbiased
self.assertLess(np.max(np.abs(z)).item(), 2.58)
def test_check_grad(self):
self.check_grad(["X"], "Out")
class TestGumbelSoftmaxOpGrad(unittest.TestCase):
def init_attrs(self):
self.shape = [20, 10]
self.dtype = "float64"
def setUp(self):
self.init_attrs()
np.random.seed(0)
self.x_np = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
def test_dygraph_check(self):
paddle.disable_static()
x_hard = paddle.to_tensor(self.x_np, stop_gradient=False)
x_soft = paddle.to_tensor(self.x_np, stop_gradient=False)
out_hard = paddle.nn.functional.gumbel_softmax(x_hard, hard=True)
out_soft = paddle.nn.functional.gumbel_softmax(x_soft, hard=False)
out_hard.sum().backward()
out_soft.sum().backward()
self.assertEqual(
np.allclose(x_hard.grad.numpy(), x_soft.grad.numpy()), True)
paddle.enable_static()
class TestGumbelSoftmaxAPI(unittest.TestCase):
def setUp(self):
self.x_shape = [2, 3, 4, 5]
self.x = np.random.uniform(-1., 1., self.x_shape).astype(np.float32)
self.count_expected = 24
self.place = paddle.CUDAPlace(0) \
if paddle.fluid.core.is_compiled_with_cuda() \
else paddle.CPUPlace()
def test_check_api(self):
# test static api
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data(name='x', shape=self.x_shape)
y = paddle.nn.functional.gumbel_softmax(x, hard=True)
exe = paddle.static.Executor(self.place)
out = exe.run(feed={'x': self.x}, fetch_list=[y])
out_np = np.array(out[0])
self.assertEqual(out_np.sum(), self.count_expected)
# test dygrapg api
paddle.disable_static()
x = paddle.to_tensor(self.x)
y = paddle.nn.functional.gumbel_softmax(x, hard=True)
out_np = np.array(y)
self.assertEqual(out_np.sum(), self.count_expected)
paddle.enable_static()
class TestGumbelSoftmaxOpError(unittest.TestCase):
def test_errors(self):
paddle.disable_static()
def test_Variable():
x1 = fluid.create_lod_tensor(
np.zeros((100, 784)), [[10, 10, 10, 70]], fluid.CPUPlace())
paddle.nn.functional.gumbel_softmax(x1)
self.assertRaises(ValueError, test_Variable)
def test_Variable2():
x1 = np.zeros((100, 784))
paddle.nn.functional.gumbel_softmax(x1)
self.assertRaises(ValueError, test_Variable2)
def test_argument1():
x = paddle.to_tensor([0.2, 0.3, 0.4])
paddle.nn.functional.gumbel_softmax(x, temperature=-1)
self.assertRaises(ValueError, test_argument1)
def test_argument2():
x = paddle.to_tensor([0.2, 0.3, 0.4])
paddle.nn.functional.gumbel_softmax(x, axis=1.1)
self.assertRaises(ValueError, test_argument2)
paddle.enable_static()
def test_dtype():
with paddle.static.program_guard(paddle.static.Program()):
x_int32 = paddle.fluid.data(
name='x_int32', shape=[2, 3], dtype='int32')
paddle.nn.functional.gumbel_softmax(x_int32)
self.assertRaises(TypeError, test_dtype)
if __name__ == '__main__':
unittest.main()
| |
from __future__ import absolute_import, unicode_literals
from time import time
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.db.models import Count
from django.http import Http404, HttpResponse, JsonResponse
from django.http.request import QueryDict
from django.shortcuts import get_object_or_404, redirect, render
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils.http import is_safe_url, urlquote
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_GET, require_POST
from django.views.decorators.vary import vary_on_headers
from django.views.generic import View
from wagtail.utils.pagination import paginate
from wagtail.wagtailadmin import messages, signals
from wagtail.wagtailadmin.forms import CopyForm, SearchForm
from wagtail.wagtailadmin.utils import (
send_notification, user_has_any_page_permission, user_passes_test)
from wagtail.wagtailcore import hooks
from wagtail.wagtailcore.models import Page, PageRevision, UserPagePermissionsProxy
def get_valid_next_url_from_request(request):
next_url = request.POST.get('next') or request.GET.get('next')
if not next_url or not is_safe_url(url=next_url, host=request.get_host()):
return ''
return next_url
@user_passes_test(user_has_any_page_permission)
def index(request, parent_page_id=None):
if parent_page_id:
parent_page = get_object_or_404(Page, id=parent_page_id).specific
else:
parent_page = Page.get_first_root_node().specific
pages = parent_page.get_children().prefetch_related('content_type', 'sites_rooted_here')
# Get page ordering
ordering = request.GET.get('ordering', '-latest_revision_created_at')
if ordering not in [
'title',
'-title',
'content_type',
'-content_type',
'live', '-live',
'latest_revision_created_at',
'-latest_revision_created_at',
'ord'
]:
ordering = '-latest_revision_created_at'
if ordering == 'ord':
# preserve the native ordering from get_children()
pass
elif ordering == 'latest_revision_created_at':
# order by oldest revision first.
# Special case NULL entries - these should go at the top of the list.
# Do this by annotating with Count('latest_revision_created_at'),
# which returns 0 for these
pages = pages.annotate(
null_position=Count('latest_revision_created_at')
).order_by('null_position', 'latest_revision_created_at')
elif ordering == '-latest_revision_created_at':
# order by oldest revision first.
# Special case NULL entries - these should go at the end of the list.
pages = pages.annotate(
null_position=Count('latest_revision_created_at')
).order_by('-null_position', '-latest_revision_created_at')
else:
pages = pages.order_by(ordering)
# Don't paginate if sorting by page order - all pages must be shown to
# allow drag-and-drop reordering
do_paginate = ordering != 'ord'
if do_paginate:
# Retrieve pages in their most specific form.
# Only do this for paginated listings, as this could potentially be a
# very expensive operation when performed on a large queryset.
pages = pages.specific()
# allow hooks to modify the queryset
for hook in hooks.get_hooks('construct_explorer_page_queryset'):
pages = hook(parent_page, pages, request)
# Pagination
if do_paginate:
paginator, pages = paginate(request, pages, per_page=50)
return render(request, 'wagtailadmin/pages/index.html', {
'parent_page': parent_page.specific,
'ordering': ordering,
'pagination_query_params': "ordering=%s" % ordering,
'pages': pages,
'do_paginate': do_paginate,
})
def add_subpage(request, parent_page_id):
parent_page = get_object_or_404(Page, id=parent_page_id).specific
if not parent_page.permissions_for_user(request.user).can_add_subpage():
raise PermissionDenied
page_types = [
(model.get_verbose_name(), model._meta.app_label, model._meta.model_name)
for model in type(parent_page).creatable_subpage_models()
if model.can_create_at(parent_page)
]
# sort by lower-cased version of verbose name
page_types.sort(key=lambda page_type: page_type[0].lower())
if len(page_types) == 1:
# Only one page type is available - redirect straight to the create form rather than
# making the user choose
verbose_name, app_label, model_name = page_types[0]
return redirect('wagtailadmin_pages:add', app_label, model_name, parent_page.id)
return render(request, 'wagtailadmin/pages/add_subpage.html', {
'parent_page': parent_page,
'page_types': page_types,
'next': get_valid_next_url_from_request(request),
})
def content_type_use(request, content_type_app_name, content_type_model_name):
try:
content_type = ContentType.objects.get_by_natural_key(content_type_app_name, content_type_model_name)
except ContentType.DoesNotExist:
raise Http404
page_class = content_type.model_class()
# page_class must be a Page type and not some other random model
if not issubclass(page_class, Page):
raise Http404
pages = page_class.objects.all()
paginator, pages = paginate(request, pages, per_page=10)
return render(request, 'wagtailadmin/pages/content_type_use.html', {
'pages': pages,
'app_name': content_type_app_name,
'content_type': content_type,
'page_class': page_class,
})
def create(request, content_type_app_name, content_type_model_name, parent_page_id):
parent_page = get_object_or_404(Page, id=parent_page_id).specific
parent_page_perms = parent_page.permissions_for_user(request.user)
if not parent_page_perms.can_add_subpage():
raise PermissionDenied
try:
content_type = ContentType.objects.get_by_natural_key(content_type_app_name, content_type_model_name)
except ContentType.DoesNotExist:
raise Http404
# Get class
page_class = content_type.model_class()
# Make sure the class is a descendant of Page
if not issubclass(page_class, Page):
raise Http404
# page must be in the list of allowed subpage types for this parent ID
if page_class not in parent_page.creatable_subpage_models():
raise PermissionDenied
if not page_class.can_create_at(parent_page):
raise PermissionDenied
for fn in hooks.get_hooks('before_create_page'):
result = fn(request, parent_page, page_class)
if hasattr(result, 'status_code'):
return result
page = page_class(owner=request.user)
edit_handler_class = page_class.get_edit_handler()
form_class = edit_handler_class.get_form_class(page_class)
next_url = get_valid_next_url_from_request(request)
if request.method == 'POST':
form = form_class(request.POST, request.FILES, instance=page,
parent_page=parent_page)
if form.is_valid():
page = form.save(commit=False)
is_publishing = bool(request.POST.get('action-publish')) and parent_page_perms.can_publish_subpage()
is_submitting = bool(request.POST.get('action-submit'))
if not is_publishing:
page.live = False
# Save page
parent_page.add_child(instance=page)
# Save revision
revision = page.save_revision(
user=request.user,
submitted_for_moderation=is_submitting,
)
# Publish
if is_publishing:
revision.publish()
# Notifications
if is_publishing:
if page.go_live_at and page.go_live_at > timezone.now():
messages.success(request, _("Page '{0}' created and scheduled for publishing.").format(page.get_admin_display_title()), buttons=[
messages.button(reverse('wagtailadmin_pages:edit', args=(page.id,)), _('Edit'))
])
else:
messages.success(request, _("Page '{0}' created and published.").format(page.get_admin_display_title()), buttons=[
messages.button(page.url, _('View live'), new_window=True),
messages.button(reverse('wagtailadmin_pages:edit', args=(page.id,)), _('Edit'))
])
elif is_submitting:
messages.success(
request,
_("Page '{0}' created and submitted for moderation.").format(page.get_admin_display_title()),
buttons=[
messages.button(
reverse('wagtailadmin_pages:view_draft', args=(page.id,)),
_('View draft'),
new_window=True
),
messages.button(
reverse('wagtailadmin_pages:edit', args=(page.id,)),
_('Edit')
)
]
)
if not send_notification(page.get_latest_revision().id, 'submitted', request.user.pk):
messages.error(request, _("Failed to send notifications to moderators"))
else:
messages.success(request, _("Page '{0}' created.").format(page.get_admin_display_title()))
for fn in hooks.get_hooks('after_create_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
if is_publishing or is_submitting:
# we're done here
if next_url:
# redirect back to 'next' url if present
return redirect(next_url)
# redirect back to the explorer
return redirect('wagtailadmin_explore', page.get_parent().id)
else:
# Just saving - remain on edit page for further edits
target_url = reverse('wagtailadmin_pages:edit', args=[page.id])
if next_url:
# Ensure the 'next' url is passed through again if present
target_url += '?next=%s' % urlquote(next_url)
return redirect(target_url)
else:
messages.validation_error(
request, _("The page could not be created due to validation errors"), form
)
edit_handler = edit_handler_class(instance=page, form=form)
has_unsaved_changes = True
else:
signals.init_new_page.send(sender=create, page=page, parent=parent_page)
form = form_class(instance=page, parent_page=parent_page)
edit_handler = edit_handler_class(instance=page, form=form)
has_unsaved_changes = False
return render(request, 'wagtailadmin/pages/create.html', {
'content_type': content_type,
'page_class': page_class,
'parent_page': parent_page,
'edit_handler': edit_handler,
'preview_modes': page.preview_modes,
'form': form,
'next': next_url,
'has_unsaved_changes': has_unsaved_changes,
})
def edit(request, page_id):
latest_revision = get_object_or_404(Page, id=page_id).get_latest_revision()
page = get_object_or_404(Page, id=page_id).get_latest_revision_as_page()
parent = page.get_parent()
content_type = ContentType.objects.get_for_model(page)
page_class = content_type.model_class()
page_perms = page.permissions_for_user(request.user)
if not page_perms.can_edit():
raise PermissionDenied
for fn in hooks.get_hooks('before_edit_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
edit_handler_class = page_class.get_edit_handler()
form_class = edit_handler_class.get_form_class(page_class)
next_url = get_valid_next_url_from_request(request)
errors_debug = None
if request.method == 'POST':
form = form_class(request.POST, request.FILES, instance=page,
parent_page=parent)
if form.is_valid() and not page.locked:
page = form.save(commit=False)
is_publishing = bool(request.POST.get('action-publish')) and page_perms.can_publish()
is_submitting = bool(request.POST.get('action-submit'))
is_reverting = bool(request.POST.get('revision'))
# If a revision ID was passed in the form, get that revision so its
# date can be referenced in notification messages
if is_reverting:
previous_revision = get_object_or_404(page.revisions, id=request.POST.get('revision'))
# Save revision
revision = page.save_revision(
user=request.user,
submitted_for_moderation=is_submitting,
)
# Publish
if is_publishing:
revision.publish()
# Need to reload the page because the URL may have changed, and we
# need the up-to-date URL for the "View Live" button.
page = page.specific_class.objects.get(pk=page.pk)
# Notifications
if is_publishing:
if page.go_live_at and page.go_live_at > timezone.now():
# Page has been scheduled for publishing in the future
if is_reverting:
message = _(
"Revision from {0} of page '{1}' has been scheduled for publishing."
).format(
previous_revision.created_at.strftime("%d %b %Y %H:%M"),
page.get_admin_display_title()
)
else:
message = _(
"Page '{0}' has been scheduled for publishing."
).format(
page.get_admin_display_title()
)
messages.success(request, message, buttons=[
messages.button(
reverse('wagtailadmin_pages:edit', args=(page.id,)),
_('Edit')
)
])
else:
# Page is being published now
if is_reverting:
message = _(
"Revision from {0} of page '{1}' has been published."
).format(
previous_revision.created_at.strftime("%d %b %Y %H:%M"),
page.get_admin_display_title()
)
else:
message = _(
"Page '{0}' has been published."
).format(
page.get_admin_display_title()
)
messages.success(request, message, buttons=[
messages.button(
page.url,
_('View live'),
new_window=True
),
messages.button(
reverse('wagtailadmin_pages:edit', args=(page_id,)),
_('Edit')
)
])
elif is_submitting:
message = _(
"Page '{0}' has been submitted for moderation."
).format(
page.get_admin_display_title()
)
messages.success(request, message, buttons=[
messages.button(
reverse('wagtailadmin_pages:view_draft', args=(page_id,)),
_('View draft'),
new_window=True
),
messages.button(
reverse('wagtailadmin_pages:edit', args=(page_id,)),
_('Edit')
)
])
if not send_notification(page.get_latest_revision().id, 'submitted', request.user.pk):
messages.error(request, _("Failed to send notifications to moderators"))
else: # Saving
if is_reverting:
message = _(
"Page '{0}' has been replaced with revision from {1}."
).format(
page.get_admin_display_title(),
previous_revision.created_at.strftime("%d %b %Y %H:%M")
)
else:
message = _(
"Page '{0}' has been updated."
).format(
page.get_admin_display_title()
)
messages.success(request, message)
for fn in hooks.get_hooks('after_edit_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
if is_publishing or is_submitting:
# we're done here - redirect back to the explorer
if next_url:
# redirect back to 'next' url if present
return redirect(next_url)
# redirect back to the explorer
return redirect('wagtailadmin_explore', page.get_parent().id)
else:
# Just saving - remain on edit page for further edits
target_url = reverse('wagtailadmin_pages:edit', args=[page.id])
if next_url:
# Ensure the 'next' url is passed through again if present
target_url += '?next=%s' % urlquote(next_url)
return redirect(target_url)
else:
if page.locked:
messages.error(request, _("The page could not be saved as it is locked"))
else:
messages.validation_error(
request, _("The page could not be saved due to validation errors"), form
)
edit_handler = edit_handler_class(instance=page, form=form)
errors_debug = (
repr(edit_handler.form.errors) +
repr([
(name, formset.errors)
for (name, formset) in edit_handler.form.formsets.items()
if formset.errors
])
)
has_unsaved_changes = True
else:
form = form_class(instance=page, parent_page=parent)
edit_handler = edit_handler_class(instance=page, form=form)
has_unsaved_changes = False
# Check for revisions still undergoing moderation and warn
if latest_revision and latest_revision.submitted_for_moderation:
buttons = []
if page.live:
buttons.append(messages.button(
reverse('wagtailadmin_pages:revisions_compare', args=(page.id, 'live', latest_revision.id)),
_('Compare with live version')
))
messages.warning(request, _("This page is currently awaiting moderation"), buttons=buttons)
return render(request, 'wagtailadmin/pages/edit.html', {
'page': page,
'content_type': content_type,
'edit_handler': edit_handler,
'errors_debug': errors_debug,
'preview_modes': page.preview_modes,
'form': form,
'next': next_url,
'has_unsaved_changes': has_unsaved_changes,
})
def delete(request, page_id):
page = get_object_or_404(Page, id=page_id)
if not page.permissions_for_user(request.user).can_delete():
raise PermissionDenied
for fn in hooks.get_hooks('before_delete_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
next_url = get_valid_next_url_from_request(request)
if request.method == 'POST':
parent_id = page.get_parent().id
page.delete()
messages.success(request, _("Page '{0}' deleted.").format(page.get_admin_display_title()))
for fn in hooks.get_hooks('after_delete_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
if next_url:
return redirect(next_url)
return redirect('wagtailadmin_explore', parent_id)
return render(request, 'wagtailadmin/pages/confirm_delete.html', {
'page': page,
'descendant_count': page.get_descendant_count(),
'next': next_url,
})
def view_draft(request, page_id):
page = get_object_or_404(Page, id=page_id).get_latest_revision_as_page()
perms = page.permissions_for_user(request.user)
if not (perms.can_publish() or perms.can_edit()):
raise PermissionDenied
return page.serve_preview(page.dummy_request(request), page.default_preview_mode)
class PreviewOnEdit(View):
http_method_names = ('post', 'get')
preview_expiration_timeout = 60 * 60 * 24 # seconds
session_key_prefix = 'wagtail-preview-'
def remove_old_preview_data(self):
expiration = time() - self.preview_expiration_timeout
expired_keys = [
k for k, v in self.request.session.items()
if k.startswith(self.session_key_prefix) and v[1] < expiration]
# Removes the session key gracefully
for k in expired_keys:
self.request.session.pop(k)
@property
def session_key(self):
return self.session_key_prefix + ','.join(self.args)
def get_page(self):
return get_object_or_404(Page,
id=self.args[0]).get_latest_revision_as_page()
def get_form(self):
page = self.get_page()
form_class = page.get_edit_handler().get_form_class(page._meta.model)
parent_page = page.get_parent().specific
if self.session_key not in self.request.session:
# Session key not in session, returning null form
return form_class(instance=page, parent_page=parent_page)
post_data_dict, timestamp = self.request.session[self.session_key]
# convert post_data_dict back into a QueryDict
post_data = QueryDict('', mutable=True)
for k, v in post_data_dict.items():
post_data.setlist(k, v)
return form_class(post_data, instance=page, parent_page=parent_page)
def post(self, request, *args, **kwargs):
# TODO: Handle request.FILES.
# Convert request.POST to a plain dict (rather than a QueryDict) so that it can be
# stored without data loss in session data
post_data_dict = dict(request.POST.lists())
request.session[self.session_key] = post_data_dict, time()
self.remove_old_preview_data()
form = self.get_form()
return JsonResponse({'is_valid': form.is_valid()})
def error_response(self, page):
return render(self.request, 'wagtailadmin/pages/preview_error.html',
{'page': page})
def get(self, request, *args, **kwargs):
# Receive the form submission that would typically be posted
# to the view. If submission is valid, return the rendered page;
# if not, re-render the edit form
form = self.get_form()
page = form.instance
if form.is_valid():
form.save(commit=False)
preview_mode = request.GET.get('mode', page.default_preview_mode)
return page.serve_preview(page.dummy_request(request),
preview_mode)
return self.error_response(page)
class PreviewOnCreate(PreviewOnEdit):
def get_page(self):
(content_type_app_name, content_type_model_name,
parent_page_id) = self.args
try:
content_type = ContentType.objects.get_by_natural_key(
content_type_app_name, content_type_model_name)
except ContentType.DoesNotExist:
raise Http404
page = content_type.model_class()()
parent_page = get_object_or_404(Page, id=parent_page_id).specific
# We need to populate treebeard's path / depth fields in order to
# pass validation. We can't make these 100% consistent with the rest
# of the tree without making actual database changes (such as
# incrementing the parent's numchild field), but by calling treebeard's
# internal _get_path method, we can set a 'realistic' value that will
# hopefully enable tree traversal operations
# to at least partially work.
page.depth = parent_page.depth + 1
# Puts the page at the maximum possible path
# for a child of `parent_page`.
page.path = Page._get_children_path_interval(parent_page.path)[1]
return page
def get_form(self):
form = super(PreviewOnCreate, self).get_form()
if form.is_valid():
# Ensures our unsaved page has a suitable url.
form.instance.set_url_path(form.parent_page)
form.instance.full_clean()
return form
def unpublish(request, page_id):
page = get_object_or_404(Page, id=page_id).specific
user_perms = UserPagePermissionsProxy(request.user)
if not user_perms.for_page(page).can_unpublish():
raise PermissionDenied
next_url = get_valid_next_url_from_request(request)
if request.method == 'POST':
include_descendants = request.POST.get("include_descendants", False)
page.unpublish()
if include_descendants:
live_descendant_pages = page.get_descendants().live().specific()
for live_descendant_page in live_descendant_pages:
if user_perms.for_page(live_descendant_page).can_unpublish():
live_descendant_page.unpublish()
messages.success(request, _("Page '{0}' unpublished.").format(page.get_admin_display_title()), buttons=[
messages.button(reverse('wagtailadmin_pages:edit', args=(page.id,)), _('Edit'))
])
if next_url:
return redirect(next_url)
return redirect('wagtailadmin_explore', page.get_parent().id)
return render(request, 'wagtailadmin/pages/confirm_unpublish.html', {
'page': page,
'next': next_url,
'live_descendant_count': page.get_descendants().live().count(),
})
def move_choose_destination(request, page_to_move_id, viewed_page_id=None):
page_to_move = get_object_or_404(Page, id=page_to_move_id)
page_perms = page_to_move.permissions_for_user(request.user)
if not page_perms.can_move():
raise PermissionDenied
if viewed_page_id:
viewed_page = get_object_or_404(Page, id=viewed_page_id)
else:
viewed_page = Page.get_first_root_node()
viewed_page.can_choose = page_perms.can_move_to(viewed_page)
child_pages = []
for target in viewed_page.get_children():
# can't move the page into itself or its descendants
target.can_choose = page_perms.can_move_to(target)
target.can_descend = (
not(target == page_to_move or
target.is_child_of(page_to_move)) and
target.get_children_count()
)
child_pages.append(target)
# Pagination
paginator, child_pages = paginate(request, child_pages, per_page=50)
return render(request, 'wagtailadmin/pages/move_choose_destination.html', {
'page_to_move': page_to_move,
'viewed_page': viewed_page,
'child_pages': child_pages,
})
def move_confirm(request, page_to_move_id, destination_id):
page_to_move = get_object_or_404(Page, id=page_to_move_id).specific
destination = get_object_or_404(Page, id=destination_id)
if not page_to_move.permissions_for_user(request.user).can_move_to(destination):
raise PermissionDenied
if request.method == 'POST':
# any invalid moves *should* be caught by the permission check above,
# so don't bother to catch InvalidMoveToDescendant
page_to_move.move(destination, pos='last-child')
messages.success(request, _("Page '{0}' moved.").format(page_to_move.get_admin_display_title()), buttons=[
messages.button(reverse('wagtailadmin_pages:edit', args=(page_to_move.id,)), _('Edit'))
])
return redirect('wagtailadmin_explore', destination.id)
return render(request, 'wagtailadmin/pages/confirm_move.html', {
'page_to_move': page_to_move,
'destination': destination,
})
def set_page_position(request, page_to_move_id):
page_to_move = get_object_or_404(Page, id=page_to_move_id)
parent_page = page_to_move.get_parent()
if not parent_page.permissions_for_user(request.user).can_reorder_children():
raise PermissionDenied
if request.method == 'POST':
# Get position parameter
position = request.GET.get('position', None)
# Find page thats already in this position
position_page = None
if position is not None:
try:
position_page = parent_page.get_children()[int(position)]
except IndexError:
pass # No page in this position
# Move page
# any invalid moves *should* be caught by the permission check above,
# so don't bother to catch InvalidMoveToDescendant
if position_page:
# If the page has been moved to the right, insert it to the
# right. If left, then left.
old_position = list(parent_page.get_children()).index(page_to_move)
if int(position) < old_position:
page_to_move.move(position_page, pos='left')
elif int(position) > old_position:
page_to_move.move(position_page, pos='right')
else:
# Move page to end
page_to_move.move(parent_page, pos='last-child')
return HttpResponse('')
@user_passes_test(user_has_any_page_permission)
def copy(request, page_id):
page = Page.objects.get(id=page_id)
# Parent page defaults to parent of source page
parent_page = page.get_parent()
# Check if the user has permission to publish subpages on the parent
can_publish = parent_page.permissions_for_user(request.user).can_publish_subpage()
# Create the form
form = CopyForm(request.POST or None, user=request.user, page=page, can_publish=can_publish)
next_url = get_valid_next_url_from_request(request)
for fn in hooks.get_hooks('before_copy_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
# Check if user is submitting
if request.method == 'POST':
# Prefill parent_page in case the form is invalid (as prepopulated value for the form field,
# because ModelChoiceField seems to not fall back to the user given value)
parent_page = Page.objects.get(id=request.POST['new_parent_page'])
if form.is_valid():
# Receive the parent page (this should never be empty)
if form.cleaned_data['new_parent_page']:
parent_page = form.cleaned_data['new_parent_page']
if not page.permissions_for_user(request.user).can_copy_to(parent_page,
form.cleaned_data.get('copy_subpages')):
raise PermissionDenied
# Re-check if the user has permission to publish subpages on the new parent
can_publish = parent_page.permissions_for_user(request.user).can_publish_subpage()
# Copy the page
new_page = page.copy(
recursive=form.cleaned_data.get('copy_subpages'),
to=parent_page,
update_attrs={
'title': form.cleaned_data['new_title'],
'slug': form.cleaned_data['new_slug'],
},
keep_live=(can_publish and form.cleaned_data.get('publish_copies')),
user=request.user,
)
# Give a success message back to the user
if form.cleaned_data.get('copy_subpages'):
messages.success(
request,
_("Page '{0}' and {1} subpages copied.").format(page.get_admin_display_title(), new_page.get_descendants().count())
)
else:
messages.success(request, _("Page '{0}' copied.").format(page.get_admin_display_title()))
for fn in hooks.get_hooks('after_copy_page'):
result = fn(request, page, new_page)
if hasattr(result, 'status_code'):
return result
# Redirect to explore of parent page
if next_url:
return redirect(next_url)
return redirect('wagtailadmin_explore', parent_page.id)
return render(request, 'wagtailadmin/pages/copy.html', {
'page': page,
'form': form,
'next': next_url,
})
@vary_on_headers('X-Requested-With')
@user_passes_test(user_has_any_page_permission)
def search(request):
pages = []
q = None
if 'q' in request.GET:
form = SearchForm(request.GET)
if form.is_valid():
q = form.cleaned_data['q']
pages = Page.objects.all().prefetch_related('content_type').search(q)
paginator, pages = paginate(request, pages)
else:
form = SearchForm()
if request.is_ajax():
return render(request, "wagtailadmin/pages/search_results.html", {
'pages': pages,
'query_string': q,
'pagination_query_params': ('q=%s' % q) if q else ''
})
else:
return render(request, "wagtailadmin/pages/search.html", {
'search_form': form,
'pages': pages,
'query_string': q,
'pagination_query_params': ('q=%s' % q) if q else ''
})
def approve_moderation(request, revision_id):
revision = get_object_or_404(PageRevision, id=revision_id)
if not revision.page.permissions_for_user(request.user).can_publish():
raise PermissionDenied
if not revision.submitted_for_moderation:
messages.error(request, _("The page '{0}' is not currently awaiting moderation.").format(revision.page.get_admin_display_title()))
return redirect('wagtailadmin_home')
if request.method == 'POST':
revision.approve_moderation()
messages.success(request, _("Page '{0}' published.").format(revision.page.get_admin_display_title()), buttons=[
messages.button(revision.page.url, _('View live'), new_window=True),
messages.button(reverse('wagtailadmin_pages:edit', args=(revision.page.id,)), _('Edit'))
])
if not send_notification(revision.id, 'approved', request.user.pk):
messages.error(request, _("Failed to send approval notifications"))
return redirect('wagtailadmin_home')
def reject_moderation(request, revision_id):
revision = get_object_or_404(PageRevision, id=revision_id)
if not revision.page.permissions_for_user(request.user).can_publish():
raise PermissionDenied
if not revision.submitted_for_moderation:
messages.error(request, _("The page '{0}' is not currently awaiting moderation.").format(revision.page.get_admin_display_title()))
return redirect('wagtailadmin_home')
if request.method == 'POST':
revision.reject_moderation()
messages.success(request, _("Page '{0}' rejected for publication.").format(revision.page.get_admin_display_title()), buttons=[
messages.button(reverse('wagtailadmin_pages:edit', args=(revision.page.id,)), _('Edit'))
])
if not send_notification(revision.id, 'rejected', request.user.pk):
messages.error(request, _("Failed to send rejection notifications"))
return redirect('wagtailadmin_home')
@require_GET
def preview_for_moderation(request, revision_id):
revision = get_object_or_404(PageRevision, id=revision_id)
if not revision.page.permissions_for_user(request.user).can_publish():
raise PermissionDenied
if not revision.submitted_for_moderation:
messages.error(request, _("The page '{0}' is not currently awaiting moderation.").format(revision.page.get_admin_display_title()))
return redirect('wagtailadmin_home')
page = revision.as_page_object()
request.revision_id = revision_id
# pass in the real user request rather than page.dummy_request(), so that request.user
# and request.revision_id will be picked up by the wagtail user bar
return page.serve_preview(request, page.default_preview_mode)
@require_POST
def lock(request, page_id):
# Get the page
page = get_object_or_404(Page, id=page_id).specific
# Check permissions
if not page.permissions_for_user(request.user).can_lock():
raise PermissionDenied
# Lock the page
if not page.locked:
page.locked = True
page.save()
messages.success(request, _("Page '{0}' is now locked.").format(page.get_admin_display_title()))
# Redirect
redirect_to = request.POST.get('next', None)
if redirect_to and is_safe_url(url=redirect_to, host=request.get_host()):
return redirect(redirect_to)
else:
return redirect('wagtailadmin_explore', page.get_parent().id)
@require_POST
def unlock(request, page_id):
# Get the page
page = get_object_or_404(Page, id=page_id).specific
# Check permissions
if not page.permissions_for_user(request.user).can_lock():
raise PermissionDenied
# Unlock the page
if page.locked:
page.locked = False
page.save()
messages.success(request, _("Page '{0}' is now unlocked.").format(page.get_admin_display_title()))
# Redirect
redirect_to = request.POST.get('next', None)
if redirect_to and is_safe_url(url=redirect_to, host=request.get_host()):
return redirect(redirect_to)
else:
return redirect('wagtailadmin_explore', page.get_parent().id)
@user_passes_test(user_has_any_page_permission)
def revisions_index(request, page_id):
page = get_object_or_404(Page, id=page_id).specific
# Get page ordering
ordering = request.GET.get('ordering', '-created_at')
if ordering not in ['created_at', '-created_at', ]:
ordering = '-created_at'
revisions = page.revisions.order_by(ordering)
paginator, revisions = paginate(request, revisions)
return render(request, 'wagtailadmin/pages/revisions/index.html', {
'page': page,
'ordering': ordering,
'pagination_query_params': "ordering=%s" % ordering,
'revisions': revisions,
})
def revisions_revert(request, page_id, revision_id):
page = get_object_or_404(Page, id=page_id).specific
page_perms = page.permissions_for_user(request.user)
if not page_perms.can_edit():
raise PermissionDenied
revision = get_object_or_404(page.revisions, id=revision_id)
revision_page = revision.as_page_object()
content_type = ContentType.objects.get_for_model(page)
page_class = content_type.model_class()
edit_handler_class = page_class.get_edit_handler()
form_class = edit_handler_class.get_form_class(page_class)
form = form_class(instance=revision_page)
edit_handler = edit_handler_class(instance=revision_page, form=form)
user_avatar = render_to_string('wagtailadmin/shared/user_avatar.html', {'user': revision.user})
messages.warning(request, mark_safe(
_("You are viewing a previous revision of this page from <b>%(created_at)s</b> by %(user)s") % {
'created_at': revision.created_at.strftime("%d %b %Y %H:%M"),
'user': user_avatar,
}
))
return render(request, 'wagtailadmin/pages/edit.html', {
'page': page,
'revision': revision,
'is_revision': True,
'content_type': content_type,
'edit_handler': edit_handler,
'errors_debug': None,
'preview_modes': page.preview_modes,
'form': form, # Used in unit tests
})
@user_passes_test(user_has_any_page_permission)
def revisions_view(request, page_id, revision_id):
page = get_object_or_404(Page, id=page_id).specific
revision = get_object_or_404(page.revisions, id=revision_id)
revision_page = revision.as_page_object()
return revision_page.serve_preview(page.dummy_request(request), page.default_preview_mode)
def revisions_compare(request, page_id, revision_id_a, revision_id_b):
page = get_object_or_404(Page, id=page_id).specific
# Get revision to compare from
if revision_id_a == 'live':
if not page.live:
raise Http404
revision_a = page
revision_a_heading = _("Live")
elif revision_id_a == 'earliest':
revision_a = page.revisions.order_by('created_at', 'id').first()
if revision_a:
revision_a = revision_a.as_page_object()
revision_a_heading = _("Earliest")
else:
raise Http404
else:
revision_a = get_object_or_404(page.revisions, id=revision_id_a).as_page_object()
revision_a_heading = str(get_object_or_404(page.revisions, id=revision_id_a).created_at)
# Get revision to compare to
if revision_id_b == 'live':
if not page.live:
raise Http404
revision_b = page
revision_b_heading = _("Live")
elif revision_id_b == 'latest':
revision_b = page.revisions.order_by('created_at', 'id').last()
if revision_b:
revision_b = revision_b.as_page_object()
revision_b_heading = _("Latest")
else:
raise Http404
else:
revision_b = get_object_or_404(page.revisions, id=revision_id_b).as_page_object()
revision_b_heading = str(get_object_or_404(page.revisions, id=revision_id_b).created_at)
comparison = page.get_edit_handler().get_comparison()
comparison = [comp(revision_a, revision_b) for comp in comparison]
comparison = [comp for comp in comparison if comp.has_changed()]
return render(request, 'wagtailadmin/pages/revisions/compare.html', {
'page': page,
'revision_a_heading': revision_a_heading,
'revision_a': revision_a,
'revision_b_heading': revision_b_heading,
'revision_b': revision_b,
'comparison': comparison,
})
| |
"""distutils.mwerkscompiler
Contains MWerksCompiler, an implementation of the abstract CCompiler class
for MetroWerks CodeWarrior on the Macintosh. Needs work to support CW on
Windows."""
import sys, os, string
from types import *
from distutils.errors import \
DistutilsExecError, DistutilsPlatformError, \
CompileError, LibError, LinkError
from distutils.ccompiler import \
CCompiler, gen_preprocess_options, gen_lib_options
import distutils.util
import distutils.dir_util
import mkcwproject
class MWerksCompiler (CCompiler) :
"""Concrete class that implements an interface to Microsoft Visual C++,
as defined by the CCompiler abstract class."""
compiler_type = 'mwerks'
# Just set this so CCompiler's constructor doesn't barf. We currently
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
# as it really isn't necessary for this sort of single-compiler class.
# Would be nice to have a consistent interface with UnixCCompiler,
# though, so it's worth thinking about.
executables = {}
# Private class data (need to distinguish C from C++ source for compiler)
_c_extensions = ['.c']
_cpp_extensions = ['.cc', '.cpp', '.cxx']
_rc_extensions = ['.r']
_exp_extension = '.exp'
# Needed for the filename generation methods provided by the
# base class, CCompiler.
src_extensions = (_c_extensions + _cpp_extensions +
_rc_extensions)
res_extension = '.rsrc'
obj_extension = '.obj' # Not used, really
static_lib_extension = '.lib'
shared_lib_extension = '.slb'
static_lib_format = shared_lib_format = '%s%s'
exe_extension = ''
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
CCompiler.__init__ (self, verbose, dry_run, force)
def compile (self,
sources,
output_dir=None,
macros=None,
include_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None):
self.__sources = sources
self.__macros = macros
self.__include_dirs = include_dirs
# Don't need extra_preargs and extra_postargs for CW
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None):
# First examine a couple of options for things that aren't implemented yet
if not target_desc in (self.SHARED_LIBRARY, self.SHARED_OBJECT):
raise DistutilsPlatformError, 'Can only make SHARED_LIBRARY or SHARED_OBJECT targets on the Mac'
if runtime_library_dirs:
raise DistutilsPlatformError, 'Runtime library dirs not implemented yet'
if extra_preargs or extra_postargs:
raise DistutilsPlatformError, 'Runtime library dirs not implemented yet'
if len(export_symbols) != 1:
raise DistutilsPlatformError, 'Need exactly one export symbol'
# Next there are various things for which we need absolute pathnames.
# This is because we (usually) create the project in a subdirectory of
# where we are now, and keeping the paths relative is too much work right
# now.
sources = map(self._filename_to_abs, self.__sources)
include_dirs = map(self._filename_to_abs, self.__include_dirs)
if objects:
objects = map(self._filename_to_abs, objects)
else:
objects = []
if build_temp:
build_temp = self._filename_to_abs(build_temp)
else:
build_temp = os.curdir()
if output_dir:
output_filename = os.path.join(output_dir, output_filename)
# The output filename needs special handling: splitting it into dir and
# filename part. Actually I'm not sure this is really needed, but it
# can't hurt.
output_filename = self._filename_to_abs(output_filename)
output_dir, output_filename = os.path.split(output_filename)
# Now we need the short names of a couple of things for putting them
# into the project.
if output_filename[-8:] == '.ppc.slb':
basename = output_filename[:-8]
else:
basename = os.path.strip(output_filename)[0]
projectname = basename + '.mcp'
targetname = basename
xmlname = basename + '.xml'
exportname = basename + '.mcp.exp'
prefixname = 'mwerks_%s_config.h'%basename
# Create the directories we need
distutils.dir_util.mkpath(build_temp, self.verbose, self.dry_run)
distutils.dir_util.mkpath(output_dir, self.verbose, self.dry_run)
# And on to filling in the parameters for the project builder
settings = {}
settings['mac_exportname'] = exportname
settings['mac_outputdir'] = output_dir
settings['mac_dllname'] = output_filename
settings['mac_targetname'] = targetname
settings['sysprefix'] = sys.prefix
settings['mac_sysprefixtype'] = 'Absolute'
sourcefilenames = []
sourcefiledirs = []
for filename in sources + objects:
dirname, filename = os.path.split(filename)
sourcefilenames.append(filename)
if not dirname in sourcefiledirs:
sourcefiledirs.append(dirname)
settings['sources'] = sourcefilenames
settings['extrasearchdirs'] = sourcefiledirs + include_dirs + library_dirs
if self.dry_run:
print 'CALLING LINKER IN', os.getcwd()
for key, value in settings.items():
print '%20.20s %s'%(key, value)
return
# Build the export file
exportfilename = os.path.join(build_temp, exportname)
if self.verbose:
print '\tCreate export file', exportfilename
fp = open(exportfilename, 'w')
fp.write('%s\n'%export_symbols[0])
fp.close()
# Generate the prefix file, if needed, and put it in the settings
if self.__macros:
prefixfilename = os.path.join(os.getcwd(), os.path.join(build_temp, prefixname))
fp = open(prefixfilename, 'w')
fp.write('#include "mwerks_plugin_config.h"\n')
for name, value in self.__macros:
if value is None:
fp.write('#define %s\n'%name)
else:
fp.write('#define %s "%s"\n'%(name, value))
fp.close()
settings['prefixname'] = prefixname
# Build the XML file. We need the full pathname (only lateron, really)
# because we pass this pathname to CodeWarrior in an AppleEvent, and CW
# doesn't have a clue about our working directory.
xmlfilename = os.path.join(os.getcwd(), os.path.join(build_temp, xmlname))
if self.verbose:
print '\tCreate XML file', xmlfilename
xmlbuilder = mkcwproject.cwxmlgen.ProjectBuilder(settings)
xmlbuilder.generate()
xmldata = settings['tmp_projectxmldata']
fp = open(xmlfilename, 'w')
fp.write(xmldata)
fp.close()
# Generate the project. Again a full pathname.
projectfilename = os.path.join(os.getcwd(), os.path.join(build_temp, projectname))
if self.verbose:
print '\tCreate project file', projectfilename
mkcwproject.makeproject(xmlfilename, projectfilename)
# And build it
if self.verbose:
print '\tBuild project'
mkcwproject.buildproject(projectfilename)
def _filename_to_abs(self, filename):
# Some filenames seem to be unix-like. Convert to Mac names.
## if '/' in filename and ':' in filename:
## raise DistutilsPlatformError, 'Filename may be Unix or Mac style: %s'%filename
## if '/' in filename:
## filename = macurl2path(filename)
filename = distutils.util.convert_path(filename)
if not os.path.isabs(filename):
curdir = os.getcwd()
filename = os.path.join(curdir, filename)
return filename
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012, Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Rohit Agarwalla, Cisco Systems, Inc.
from sqlalchemy.orm import exc
from neutron.db import api as db
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from neutron.plugins.cisco.common import cisco_constants as const
from neutron.plugins.cisco.common import cisco_exceptions as c_exc
from neutron.plugins.cisco.db import network_models_v2
# Do NOT remove this import. It is required for all the models to be seen
# by db.initalize() when called from VirtualPhysicalSwitchModelV2.__init__.
from neutron.plugins.cisco.db import nexus_models_v2 # noqa
from neutron.plugins.openvswitch import ovs_models_v2
LOG = logging.getLogger(__name__)
def get_all_qoss(tenant_id):
"""Lists all the qos to tenant associations."""
LOG.debug(_("get_all_qoss() called"))
session = db.get_session()
return (session.query(network_models_v2.QoS).
filter_by(tenant_id=tenant_id).all())
def get_qos(tenant_id, qos_id):
"""Lists the qos given a tenant_id and qos_id."""
LOG.debug(_("get_qos() called"))
session = db.get_session()
try:
return (session.query(network_models_v2.QoS).
filter_by(tenant_id=tenant_id).
filter_by(qos_id=qos_id).one())
except exc.NoResultFound:
raise c_exc.QosNotFound(qos_id=qos_id,
tenant_id=tenant_id)
def add_qos(tenant_id, qos_name, qos_desc):
"""Adds a qos to tenant association."""
LOG.debug(_("add_qos() called"))
session = db.get_session()
try:
qos = (session.query(network_models_v2.QoS).
filter_by(tenant_id=tenant_id).
filter_by(qos_name=qos_name).one())
raise c_exc.QosNameAlreadyExists(qos_name=qos_name,
tenant_id=tenant_id)
except exc.NoResultFound:
qos = network_models_v2.QoS(qos_id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
qos_name=qos_name,
qos_desc=qos_desc)
session.add(qos)
session.flush()
return qos
def remove_qos(tenant_id, qos_id):
"""Removes a qos to tenant association."""
session = db.get_session()
try:
qos = (session.query(network_models_v2.QoS).
filter_by(tenant_id=tenant_id).
filter_by(qos_id=qos_id).one())
session.delete(qos)
session.flush()
return qos
except exc.NoResultFound:
pass
def update_qos(tenant_id, qos_id, new_qos_name=None):
"""Updates a qos to tenant association."""
session = db.get_session()
try:
qos = (session.query(network_models_v2.QoS).
filter_by(tenant_id=tenant_id).
filter_by(qos_id=qos_id).one())
if new_qos_name:
qos["qos_name"] = new_qos_name
session.merge(qos)
session.flush()
return qos
except exc.NoResultFound:
raise c_exc.QosNotFound(qos_id=qos_id,
tenant_id=tenant_id)
def get_all_credentials():
"""Lists all the creds for a tenant."""
session = db.get_session()
return (session.query(network_models_v2.Credential).all())
def get_credential(credential_id):
"""Lists the creds for given a cred_id."""
session = db.get_session()
try:
return (session.query(network_models_v2.Credential).
filter_by(credential_id=credential_id).one())
except exc.NoResultFound:
raise c_exc.CredentialNotFound(credential_id=credential_id)
def get_credential_name(credential_name):
"""Lists the creds for given a cred_name."""
session = db.get_session()
try:
return (session.query(network_models_v2.Credential).
filter_by(credential_name=credential_name).one())
except exc.NoResultFound:
raise c_exc.CredentialNameNotFound(credential_name=credential_name)
def add_credential(credential_name, user_name, password, type):
"""Create a credential."""
session = db.get_session()
try:
cred = (session.query(network_models_v2.Credential).
filter_by(credential_name=credential_name).one())
raise c_exc.CredentialAlreadyExists(credential_name=credential_name)
except exc.NoResultFound:
cred = network_models_v2.Credential(
credential_id=uuidutils.generate_uuid(),
credential_name=credential_name,
user_name=user_name,
password=password,
type=type)
session.add(cred)
session.flush()
return cred
def remove_credential(credential_id):
"""Removes a credential."""
session = db.get_session()
try:
cred = (session.query(network_models_v2.Credential).
filter_by(credential_id=credential_id).one())
session.delete(cred)
session.flush()
return cred
except exc.NoResultFound:
pass
def update_credential(credential_id,
new_user_name=None, new_password=None):
"""Updates a credential for a tenant."""
session = db.get_session()
try:
cred = (session.query(network_models_v2.Credential).
filter_by(credential_id=credential_id).one())
if new_user_name:
cred["user_name"] = new_user_name
if new_password:
cred["password"] = new_password
session.merge(cred)
session.flush()
return cred
except exc.NoResultFound:
raise c_exc.CredentialNotFound(credential_id=credential_id)
def get_all_n1kv_credentials():
session = db.get_session()
return (session.query(network_models_v2.Credential).
filter_by(type='n1kv'))
def add_provider_network(network_id, network_type, segmentation_id):
"""Add a network to the provider network table."""
session = db.get_session()
if session.query(network_models_v2.ProviderNetwork).filter_by(
network_id=network_id).first():
raise c_exc.ProviderNetworkExists(network_id)
pnet = network_models_v2.ProviderNetwork(network_id=network_id,
network_type=network_type,
segmentation_id=segmentation_id)
session.add(pnet)
session.flush()
def remove_provider_network(network_id):
"""Remove network_id from the provider network table.
:param network_id: Any network id. If it is not in the table, do nothing.
:return: network_id if it was in the table and successfully removed.
"""
session = db.get_session()
pnet = (session.query(network_models_v2.ProviderNetwork).
filter_by(network_id=network_id).first())
if pnet:
session.delete(pnet)
session.flush()
return network_id
def is_provider_network(network_id):
"""Return True if network_id is in the provider network table."""
session = db.get_session()
if session.query(network_models_v2.ProviderNetwork).filter_by(
network_id=network_id).first():
return True
def is_provider_vlan(vlan_id):
"""Check for a for a vlan provider network with the specified vland_id.
Returns True if the provider network table contains a vlan network
with the specified vlan_id.
"""
session = db.get_session()
if (session.query(network_models_v2.ProviderNetwork).
filter_by(network_type=const.NETWORK_TYPE_VLAN,
segmentation_id=vlan_id).first()):
return True
def get_ovs_vlans():
session = db.get_session()
bindings = (session.query(ovs_models_v2.VlanAllocation.vlan_id).
filter_by(allocated=True))
return [binding.vlan_id for binding in bindings]
class Credential_db_mixin(object):
"""Mixin class for Cisco Credentials as a resource."""
def _make_credential_dict(self, credential, fields=None):
res = {'credential_id': credential['credential_id'],
'credential_name': credential['credential_name'],
'user_name': credential['user_name'],
'password': credential['password'],
'type': credential['type']}
return self._fields(res, fields)
def create_credential(self, context, credential):
"""Create a credential."""
c = credential['credential']
cred = add_credential(c['credential_name'],
c['user_name'],
c['password'],
c['type'])
return self._make_credential_dict(cred)
def get_credentials(self, context, filters=None, fields=None):
"""Retrieve a list of credentials."""
return self._get_collection(context,
network_models_v2.Credential,
self._make_credential_dict,
filters=filters,
fields=fields)
def get_credential(self, context, id, fields=None):
"""Retireve the requested credential based on its id."""
credential = get_credential(id)
return self._make_credential_dict(credential, fields)
def update_credential(self, context, id, credential):
"""Update a credential based on its id."""
c = credential['credential']
cred = update_credential(id,
c['user_name'],
c['password'])
return self._make_credential_dict(cred)
def delete_credential(self, context, id):
"""Delete a credential based on its id."""
return remove_credential(id)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
import functools
import itertools
import sys
import types
from time import time as wallclock
from heat.openstack.common import excutils
from heat.openstack.common import log as logging
from heat.openstack.common.gettextutils import _
logger = logging.getLogger(__name__)
# Whether TaskRunner._sleep actually does an eventlet sleep when called.
ENABLE_SLEEP = True
def task_description(task):
"""
Return a human-readable string description of a task suitable for logging
the status of the task.
"""
name = task.__name__ if hasattr(task, '__name__') else None
if isinstance(task, types.MethodType):
if name is not None and hasattr(task, '__self__'):
return '%s from %s' % (name, task.__self__)
elif isinstance(task, types.FunctionType):
if name is not None:
return str(name)
return repr(task)
class Timeout(BaseException):
"""
Timeout exception, raised within a task when it has exceeded its allotted
(wallclock) running time.
This allows the task to perform any necessary cleanup, as well as use a
different exception to notify the controlling task if appropriate. If the
task supresses the exception altogether, it will be cancelled but the
controlling task will not be notified of the timeout.
"""
def __init__(self, task_runner, timeout):
"""
Initialise with the TaskRunner and a timeout period in seconds.
"""
message = _('%s Timed out') % task_runner
super(Timeout, self).__init__(message)
# Note that we don't attempt to handle leap seconds or large clock
# jumps here. The latter are assumed to be rare and the former
# negligible in the context of the timeout. Time zone adjustments,
# Daylight Savings and the like *are* handled. PEP 418 adds a proper
# monotonic clock, but only in Python 3.3.
self._endtime = wallclock() + timeout
def expired(self):
return wallclock() > self._endtime
class TaskRunner(object):
"""
Wrapper for a resumable task (co-routine).
"""
def __init__(self, task, *args, **kwargs):
"""
Initialise with a task function, and arguments to be passed to it when
it is started.
The task function may be a co-routine that yields control flow between
steps.
"""
assert callable(task), "Task is not callable"
self._task = task
self._args = args
self._kwargs = kwargs
self._runner = None
self._done = False
self._timeout = None
self.name = task_description(task)
def __str__(self):
"""Return a human-readable string representation of the task."""
return 'Task %s' % self.name
def _sleep(self, wait_time):
"""Sleep for the specified number of seconds."""
if ENABLE_SLEEP and wait_time is not None:
logger.debug('%s sleeping' % str(self))
eventlet.sleep(wait_time)
def __call__(self, wait_time=1, timeout=None):
"""
Start and run the task to completion.
The task will sleep for `wait_time` seconds between steps. To avoid
sleeping, pass `None` for `wait_time`.
"""
self.start(timeout=timeout)
self.run_to_completion(wait_time=wait_time)
def start(self, timeout=None):
"""
Initialise the task and run its first step.
If a timeout is specified, any attempt to step the task after that
number of seconds has elapsed will result in a Timeout being
raised inside the task.
"""
assert self._runner is None, "Task already started"
logger.debug('%s starting' % str(self))
if timeout is not None:
self._timeout = Timeout(self, timeout)
result = self._task(*self._args, **self._kwargs)
if isinstance(result, types.GeneratorType):
self._runner = result
self.step()
else:
self._runner = False
self._done = True
logger.debug('%s done (not resumable)' % str(self))
def step(self):
"""
Run another step of the task, and return True if the task is complete;
False otherwise.
"""
if not self.done():
assert self._runner is not None, "Task not started"
if self._timeout is not None and self._timeout.expired():
logger.info('%s timed out' % str(self))
try:
self._runner.throw(self._timeout)
except StopIteration:
self._done = True
else:
# Clean up in case task swallows exception without exiting
self.cancel()
else:
logger.debug('%s running' % str(self))
try:
next(self._runner)
except StopIteration:
self._done = True
logger.debug('%s complete' % str(self))
return self._done
def run_to_completion(self, wait_time=1):
"""
Run the task to completion.
The task will sleep for `wait_time` seconds between steps. To avoid
sleeping, pass `None` for `wait_time`.
"""
while not self.step():
self._sleep(wait_time)
def cancel(self):
"""Cancel the task if it is running."""
if self.started() and not self.done():
logger.debug('%s cancelled' % str(self))
self._runner.close()
self._done = True
def started(self):
"""Return True if the task has been started."""
return self._runner is not None
def done(self):
"""Return True if the task is complete."""
return self._done
def __nonzero__(self):
"""Return True if there are steps remaining."""
return not self.done()
def wrappertask(task):
"""
Decorator for a task that needs to drive a subtask.
This is essentially a replacement for the Python 3-only "yield from"
keyword (PEP 380), using the "yield" keyword that is supported in
Python 2. For example:
@wrappertask
def parent_task(self):
self.setup()
yield self.child_task()
self.cleanup()
"""
@functools.wraps(task)
def wrapper(*args, **kwargs):
parent = task(*args, **kwargs)
subtask = next(parent)
while True:
try:
if subtask is not None:
subtask_running = True
try:
step = next(subtask)
except StopIteration:
subtask_running = False
while subtask_running:
try:
yield step
except GeneratorExit as exit:
subtask.close()
raise exit
except:
try:
step = subtask.throw(*sys.exc_info())
except StopIteration:
subtask_running = False
else:
try:
step = next(subtask)
except StopIteration:
subtask_running = False
else:
yield
except GeneratorExit as exit:
parent.close()
raise exit
except:
subtask = parent.throw(*sys.exc_info())
else:
subtask = next(parent)
return wrapper
class DependencyTaskGroup(object):
"""
A task which manages a group of subtasks that have ordering dependencies.
"""
def __init__(self, dependencies, task=lambda o: o(),
reverse=False, name=None):
"""
Initialise with the task dependencies and (optionally) a task to run on
each.
If no task is supplied, it is assumed that the tasks are stored
directly in the dependency tree. If a task is supplied, the object
stored in the dependency tree is passed as an argument.
"""
self._runners = dict((o, TaskRunner(task, o)) for o in dependencies)
self._graph = dependencies.graph(reverse=reverse)
if name is None:
name = '(%s) %s' % (getattr(task, '__name__',
task_description(task)),
str(dependencies))
self.name = name
def __repr__(self):
"""Return a string representation of the task."""
return '%s(%s)' % (type(self).__name__, self.name)
def __call__(self):
"""Return a co-routine which runs the task group."""
try:
while any(self._runners.itervalues()):
for k, r in self._ready():
r.start()
yield
for k, r in self._running():
if r.step():
del self._graph[k]
except:
with excutils.save_and_reraise_exception():
for r in self._runners.itervalues():
r.cancel()
def _ready(self):
"""
Iterate over all subtasks that are ready to start - i.e. all their
dependencies have been satisfied but they have not yet been started.
"""
for k, n in self._graph.iteritems():
if not n:
runner = self._runners[k]
if not runner.started():
yield k, runner
def _running(self):
"""
Iterate over all subtasks that are currently running - i.e. they have
been started but have not yet completed.
"""
running = lambda (k, r): k in self._graph and r.started()
return itertools.ifilter(running, self._runners.iteritems())
class PollingTaskGroup(object):
"""
A task which manages a group of subtasks.
When the task is started, all of its subtasks are also started. The task
completes when all subtasks are complete.
Once started, the subtasks are assumed to be only polling for completion
of an asynchronous operation, so no attempt is made to give them equal
scheduling slots.
"""
def __init__(self, tasks, name=None):
"""Initialise with a list of tasks."""
self._tasks = list(tasks)
if name is None:
name = ', '.join(task_description(t) for t in self._tasks)
self.name = name
@staticmethod
def _args(arg_lists):
"""Return a list containing the positional args for each subtask."""
return zip(*arg_lists)
@staticmethod
def _kwargs(kwarg_lists):
"""Return a list containing the keyword args for each subtask."""
keygroups = (itertools.izip(itertools.repeat(name),
arglist)
for name, arglist in kwarg_lists.iteritems())
return [dict(kwargs) for kwargs in itertools.izip(*keygroups)]
@classmethod
def from_task_with_args(cls, task, *arg_lists, **kwarg_lists):
"""
Return a new PollingTaskGroup where each subtask is identical except
for the arguments passed to it.
Each argument to use should be passed as a list (or iterable) of values
such that one is passed in the corresponding position for each subtask.
The number of subtasks spawned depends on the length of the argument
lists. For example:
PollingTaskGroup.from_task_with_args(my_task,
[1, 2, 3],
alpha=['a', 'b', 'c'])
will start three TaskRunners that will run:
my_task(1, alpha='a')
my_task(2, alpha='b')
my_task(3, alpha='c')
respectively.
If multiple arguments are supplied, each list should be of the same
length. In the case of any discrepancy, the length of the shortest
argument list will be used, and any extra arguments discarded.
"""
args_list = cls._args(arg_lists)
kwargs_list = cls._kwargs(kwarg_lists)
if kwarg_lists and not arg_lists:
args_list = [[]] * len(kwargs_list)
elif arg_lists and not kwarg_lists:
kwargs_list = [{}] * len(args_list)
task_args = itertools.izip(args_list, kwargs_list)
tasks = (functools.partial(task, *a, **kwa) for a, kwa in task_args)
return cls(tasks, name=task_description(task))
def __repr__(self):
"""Return a string representation of the task group."""
return '%s(%s)' % (type(self).__name__, self.name)
def __call__(self):
"""Return a co-routine which runs the task group."""
runners = [TaskRunner(t) for t in self._tasks]
try:
for r in runners:
r.start()
while runners:
yield
runners = list(itertools.dropwhile(lambda r: r.step(),
runners))
except:
with excutils.save_and_reraise_exception():
for r in runners:
r.cancel()
| |
import warnings
import unittest
import sys
from nose.tools import assert_raises
from sklearn.utils.testing import (
_assert_less,
_assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message,
ignore_warnings)
from sklearn.tree import DecisionTreeClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
try:
from nose.tools import assert_less
def test_assert_less():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_less(0, 1)
_assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
assert_raises(AssertionError, _assert_less, 1, 0)
except ImportError:
pass
try:
from nose.tools import assert_greater
def test_assert_greater():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_greater(1, 0)
_assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
assert_raises(AssertionError, _assert_greater, 0, 1)
except ImportError:
pass
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LinearDiscriminantAnalysis()
tree = DecisionTreeClassifier()
# Linear Discriminant Analysis doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
def _no_raise():
pass
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "test",
_no_raise)
# multiple exceptions in a tuple
assert_raises(AssertionError,
assert_raise_message, (ValueError, AttributeError),
"test", _no_raise)
def test_ignore_warning():
# This check that ignore_warning decorateur and context manager are working
# as expected
def _warning_function():
warnings.warn("deprecation warning", DeprecationWarning)
def _multiple_warning_function():
warnings.warn("deprecation warning", DeprecationWarning)
warnings.warn("deprecation warning")
# Check the function directly
assert_no_warnings(ignore_warnings(_warning_function))
assert_no_warnings(ignore_warnings(_warning_function,
category=DeprecationWarning))
assert_warns(DeprecationWarning, ignore_warnings(_warning_function,
category=UserWarning))
assert_warns(UserWarning,
ignore_warnings(_multiple_warning_function,
category=DeprecationWarning))
assert_warns(DeprecationWarning,
ignore_warnings(_multiple_warning_function,
category=UserWarning))
assert_no_warnings(ignore_warnings(_warning_function,
category=(DeprecationWarning,
UserWarning)))
# Check the decorator
@ignore_warnings
def decorator_no_warning():
_warning_function()
_multiple_warning_function()
@ignore_warnings(category=(DeprecationWarning, UserWarning))
def decorator_no_warning_multiple():
_multiple_warning_function()
@ignore_warnings(category=DeprecationWarning)
def decorator_no_deprecation_warning():
_warning_function()
@ignore_warnings(category=UserWarning)
def decorator_no_user_warning():
_warning_function()
@ignore_warnings(category=DeprecationWarning)
def decorator_no_deprecation_multiple_warning():
_multiple_warning_function()
@ignore_warnings(category=UserWarning)
def decorator_no_user_multiple_warning():
_multiple_warning_function()
assert_no_warnings(decorator_no_warning)
assert_no_warnings(decorator_no_warning_multiple)
assert_no_warnings(decorator_no_deprecation_warning)
assert_warns(DeprecationWarning, decorator_no_user_warning)
assert_warns(UserWarning, decorator_no_deprecation_multiple_warning)
assert_warns(DeprecationWarning, decorator_no_user_multiple_warning)
# Check the context manager
def context_manager_no_warning():
with ignore_warnings():
_warning_function()
def context_manager_no_warning_multiple():
with ignore_warnings(category=(DeprecationWarning, UserWarning)):
_multiple_warning_function()
def context_manager_no_deprecation_warning():
with ignore_warnings(category=DeprecationWarning):
_warning_function()
def context_manager_no_user_warning():
with ignore_warnings(category=UserWarning):
_warning_function()
def context_manager_no_deprecation_multiple_warning():
with ignore_warnings(category=DeprecationWarning):
_multiple_warning_function()
def context_manager_no_user_multiple_warning():
with ignore_warnings(category=UserWarning):
_multiple_warning_function()
assert_no_warnings(context_manager_no_warning)
assert_no_warnings(context_manager_no_warning_multiple)
assert_no_warnings(context_manager_no_deprecation_warning)
assert_warns(DeprecationWarning, context_manager_no_user_warning)
assert_warns(UserWarning, context_manager_no_deprecation_multiple_warning)
assert_warns(DeprecationWarning, context_manager_no_user_multiple_warning)
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
#`clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
| |
# Copyright 2013 OpenStack Foundation
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
ML2 Mechanism Driver for Cisco Nexus platforms.
"""
import threading
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from networking_cisco.plugins.ml2.drivers.cisco.nexus import (
config as conf)
from networking_cisco.plugins.ml2.drivers.cisco.nexus import (
constants as const)
from networking_cisco.plugins.ml2.drivers.cisco.nexus import (
exceptions as excep)
from networking_cisco.plugins.ml2.drivers.cisco.nexus import (
nexus_db_v2 as nxos_db)
from networking_cisco.plugins.ml2.drivers.cisco.nexus import (
nexus_network_driver)
from neutron.common import constants as n_const
from neutron.db import api as db_api
from neutron.extensions import portbindings
from neutron.i18n import _LW, _LE, _LI
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2 import db as ml2_db
from neutron.plugins.ml2 import driver_api as api
LOG = logging.getLogger(__name__)
HOST_NOT_FOUND = _LW("Host %s not defined in switch configuration section.")
class CiscoNexusCfgMonitor(object):
"""Replay config on communication failure between Openstack to Nexus."""
def __init__(self, driver, mdriver):
self._driver = driver
self._mdriver = mdriver
switch_connections = self._mdriver.get_switch_ips()
for switch_ip in switch_connections:
self._mdriver.set_switch_ip_and_active_state(
switch_ip, False)
def _configure_nexus_type(self, switch_ip, nexus_type):
if nexus_type not in (const.NEXUS_3K, const.NEXUS_5K,
const.NEXUS_7K, const.NEXUS_9K):
LOG.error(_LE("Received invalid Nexus type %(nexus_type)d "
"for switch ip %(switch_ip)s"),
{'nexus_type': nexus_type, 'switch_ip': switch_ip})
return
if (self._mdriver.get_switch_nexus_type(switch_ip) ==
const.NEXUS_TYPE_INVALID):
self._mdriver.set_switch_nexus_type(switch_ip, nexus_type)
def replay_config(self, switch_ip):
"""Sends pending config data in OpenStack to Nexus."""
LOG.debug("Replaying config for switch ip %(switch_ip)s",
{'switch_ip': switch_ip})
nve_bindings = nxos_db.get_nve_switch_bindings(switch_ip)
for x in nve_bindings:
try:
self._driver.create_nve_member(switch_ip,
const.NVE_INT_NUM, x.vni, x.mcast_group)
except Exception as e:
LOG.error(_LE("Failed to configure nve_member for "
"switch %(switch_ip)s, vni %(vni)s"
"Reason:%(reason)s "),
{'switch_ip': switch_ip, 'vni': x.vni,
'reason': e})
if self.monitor_timeout > 0:
self._mdriver.register_switch_as_inactive(switch_ip,
'replay create_nve_member')
return
try:
port_bindings = nxos_db.get_nexusport_switch_bindings(switch_ip)
except excep.NexusPortBindingNotFound:
LOG.warn(_LW("No port entries found for switch ip "
"%(switch_ip)s during replay."),
{'switch_ip': switch_ip})
return
self._mdriver.configure_switch_entries(switch_ip,
port_bindings)
def check_connections(self):
"""Check connection between Openstack to Nexus device."""
switch_connections = self._mdriver.get_switch_state()
for switch_ip in switch_connections:
state = self._mdriver.get_switch_ip_and_active_state(switch_ip)
retry_count = self._mdriver.get_switch_retry_count(switch_ip)
cfg_retry = conf.cfg.CONF.ml2_cisco.switch_replay_count
if retry_count > cfg_retry:
continue
if retry_count == cfg_retry:
LOG.warn(_LW("check_connections() switch "
"%(switch_ip)s retry count %(rcnt)d exceeded "
"configured threshold %(thld)d"),
{'switch_ip': switch_ip,
'rcnt': retry_count,
'thld': cfg_retry})
self._mdriver.incr_switch_retry_count(switch_ip)
continue
LOG.debug("check_connections() switch "
"%(switch_ip)s state %(state)d",
{'switch_ip': switch_ip, 'state': state})
try:
nexus_type = self._driver.get_nexus_type(switch_ip)
except Exception:
if state is True:
LOG.error(_LE("Lost connection to switch ip "
"%(switch_ip)s"), {'switch_ip': switch_ip})
self._mdriver.set_switch_ip_and_active_state(
switch_ip, False)
else:
if state is False:
self._configure_nexus_type(switch_ip, nexus_type)
LOG.info(_LI("Re-established connection to switch "
"ip %(switch_ip)s"),
{'switch_ip': switch_ip})
self._mdriver.set_switch_ip_and_active_state(
switch_ip, True)
self.replay_config(switch_ip)
# If replay failed, it stops trying to configure db entries
# and sets switch state to False so this caller knows
# it failed.
if self._mdriver.get_switch_ip_and_active_state(
switch_ip) is False:
self._mdriver.incr_switch_retry_count(switch_ip)
class CiscoNexusMechanismDriver(api.MechanismDriver):
"""Cisco Nexus ML2 Mechanism Driver."""
def initialize(self):
# Create ML2 device dictionary from ml2_conf.ini entries.
conf.ML2MechCiscoConfig()
# Extract configuration parameters from the configuration file.
self._nexus_switches = conf.ML2MechCiscoConfig.nexus_dict
LOG.debug("nexus_switches found = %s", self._nexus_switches)
# Save dynamic switch information
self._switch_state = {}
self.driver = nexus_network_driver.CiscoNexusDriver()
self.monitor = CiscoNexusCfgMonitor(self.driver, self)
self.timer = None
self.monitor_timeout = conf.cfg.CONF.ml2_cisco.switch_heartbeat_time
self.monitor_lock = threading.Lock()
# Start the monitor thread
if self.monitor_timeout > 0:
self._monitor_thread()
def set_switch_ip_and_active_state(self, switch_ip, state):
self._switch_state[switch_ip, '_connect_active'] = state
def get_switch_ip_and_active_state(self, switch_ip):
if (switch_ip, '_connect_active') in self._switch_state:
return self._switch_state[switch_ip, '_connect_active']
else:
return False
def register_switch_as_inactive(self, switch_ip, func_name):
self.set_switch_ip_and_active_state(switch_ip, False)
LOG.exception(
_LE("Nexus Driver cisco_nexus failed in %(func_name)s"),
{'func_name': func_name})
def set_switch_nexus_type(self, switch_ip, type):
self._switch_state[switch_ip, '_nexus_type'] = type
def get_switch_nexus_type(self, switch_ip):
if (switch_ip, '_nexus_type') in self._switch_state:
return self._switch_state[switch_ip, '_nexus_type']
else:
return -1
def reset_switch_retry_count(self, switch_ip):
self._switch_state[switch_ip, '_retry_count'] = 0
def incr_switch_retry_count(self, switch_ip):
if (switch_ip, '_retry_count') in self._switch_state:
self._switch_state[switch_ip, '_retry_count'] += 1
else:
self.reset_switch_retry_count(switch_ip)
def get_switch_retry_count(self, switch_ip):
if (switch_ip, '_retry_count') not in self._switch_state:
self.reset_switch_retry_count(switch_ip)
return self._switch_state[switch_ip, '_retry_count']
def get_switch_state(self):
switch_connections = []
for switch_ip, attr in self._switch_state:
if str(attr) == '_connect_active':
switch_connections.append(switch_ip)
return switch_connections
def is_switch_configurable(self, switch_ip):
if self.monitor_timeout > 0:
return self.get_switch_ip_and_active_state(switch_ip)
else:
return True
def choose_to_reraise_driver_exception(self, switch_ip, func_name):
if self.monitor_timeout > 0:
self.register_switch_as_inactive(switch_ip, func_name)
return False
else:
return True
def _valid_network_segment(self, segment):
return (cfg.CONF.ml2_cisco.managed_physical_network is None or
cfg.CONF.ml2_cisco.managed_physical_network ==
segment[api.PHYSICAL_NETWORK])
def _is_supported_deviceowner(self, port):
return (port['device_owner'].startswith('compute') or
port['device_owner'] == n_const.DEVICE_OWNER_DHCP)
def _is_status_active(self, port):
return port['status'] == n_const.PORT_STATUS_ACTIVE
def _get_switch_info(self, host_id):
host_connections = []
for switch_ip, attr in self._nexus_switches:
if str(attr) == str(host_id):
for port_id in (
self._nexus_switches[switch_ip, attr].split(',')):
if ':' in port_id:
intf_type, port = port_id.split(':')
else:
intf_type, port = 'ethernet', port_id
host_connections.append((switch_ip, intf_type, port))
if not host_connections:
LOG.warn(HOST_NOT_FOUND, host_id)
return host_connections
def get_switch_ips(self):
switch_connections = []
for switch_ip, attr in self._nexus_switches:
if str(attr) == 'username':
switch_connections.append(switch_ip)
return switch_connections
def _get_switch_nve_info(self, host_id):
host_nve_connections = []
for switch_ip, attr in self._nexus_switches:
if str(attr) == str(host_id):
host_nve_connections.append(switch_ip)
if not host_nve_connections:
LOG.warn(HOST_NOT_FOUND, host_id)
return host_nve_connections
def _configure_nve_db(self, vni, device_id, mcast_group, host_id):
"""Create the nexus NVE database entry.
Called during update precommit port event.
"""
host_nve_connections = self._get_switch_nve_info(host_id)
for switch_ip in host_nve_connections:
if not nxos_db.get_nve_vni_member_bindings(vni, switch_ip,
device_id):
nxos_db.add_nexusnve_binding(vni, switch_ip, device_id,
mcast_group)
def _configure_nve_member(self, vni, device_id, mcast_group, host_id):
"""Add "member vni" configuration to the NVE interface.
Called during update postcommit port event.
"""
host_nve_connections = self._get_switch_nve_info(host_id)
for switch_ip in host_nve_connections:
if self.is_switch_configurable(switch_ip) is False:
self.reset_switch_retry_count(switch_ip)
continue
# If configured to set global VXLAN values then
# If this is the first database entry for this switch_ip
# then configure the "interface nve" entry on the switch.
if cfg.CONF.ml2_cisco.vxlan_global_config:
nve_bindings = nxos_db.get_nve_switch_bindings(switch_ip)
if len(nve_bindings) == 1:
LOG.debug("Nexus: create NVE interface")
loopback = self._nexus_switches.get(
(switch_ip, 'nve_src_intf'), '0')
try:
self.driver.enable_vxlan_feature(switch_ip,
const.NVE_INT_NUM, loopback)
except Exception:
with excutils.save_and_reraise_exception() as ctxt:
ctxt.reraise = (
self.choose_to_reraise_driver_exception(
switch_ip, 'enable_vxlan_feature'))
continue
# If this is the first database entry for this (VNI, switch_ip)
# then configure the "member vni #" entry on the switch.
member_bindings = nxos_db.get_nve_vni_switch_bindings(vni,
switch_ip)
if len(member_bindings) == 1:
LOG.debug("Nexus: add member")
try:
self.driver.create_nve_member(switch_ip, const.NVE_INT_NUM,
vni, mcast_group)
except Exception:
with excutils.save_and_reraise_exception() as ctxt:
ctxt.reraise = (
self.choose_to_reraise_driver_exception(switch_ip,
'create_nve_member'))
def _delete_nve_db(self, vni, device_id, mcast_group, host_id):
"""Delete the nexus NVE database entry.
Called during delete precommit port event.
"""
rows = nxos_db.get_nve_vni_deviceid_bindings(vni, device_id)
for row in rows:
nxos_db.remove_nexusnve_binding(vni, row.switch_ip, device_id)
def _delete_nve_member(self, vni, device_id, mcast_group, host_id):
"""Remove "member vni" configuration from the NVE interface.
Called during delete postcommit port event.
"""
host_nve_connections = self._get_switch_nve_info(host_id)
for switch_ip in host_nve_connections:
if self.is_switch_configurable(switch_ip) is False:
self.reset_switch_retry_count(switch_ip)
continue
try:
if not nxos_db.get_nve_vni_switch_bindings(vni, switch_ip):
self.driver.delete_nve_member(switch_ip,
const.NVE_INT_NUM, vni)
if (cfg.CONF.ml2_cisco.vxlan_global_config and
not nxos_db.get_nve_switch_bindings(switch_ip)):
self.driver.disable_vxlan_feature(switch_ip)
except Exception:
with excutils.save_and_reraise_exception() as ctxt:
ctxt.reraise = (
self.choose_to_reraise_driver_exception(switch_ip,
'(delete_nve_member||disable_vxlan_feature)'))
def _configure_nxos_db(self, vlan_id, device_id, host_id, vni,
is_provider_vlan):
"""Create the nexus database entry.
Called during update precommit port event.
"""
host_connections = self._get_switch_info(host_id)
for switch_ip, intf_type, nexus_port in host_connections:
port_id = '%s:%s' % (intf_type, nexus_port)
try:
nxos_db.get_nexusport_binding(port_id, vlan_id, switch_ip,
device_id)
except excep.NexusPortBindingNotFound:
nxos_db.add_nexusport_binding(port_id, str(vlan_id), str(vni),
switch_ip, device_id,
is_provider_vlan)
def _configure_port_binding(self, is_provider_vlan, duplicate_type,
switch_ip, vlan_id,
intf_type, nexus_port, vni):
"""Conditionally calls vlan and port Nexus drivers."""
# This implies VLAN, VNI, and Port are all duplicate.
# Then there is nothing to configure in Nexus.
if duplicate_type == const.DUPLICATE_PORT:
return
if is_provider_vlan:
vlan_name = cfg.CONF.ml2_cisco.provider_vlan_name_prefix
auto_create = cfg.CONF.ml2_cisco.provider_vlan_auto_create
auto_trunk = cfg.CONF.ml2_cisco.provider_vlan_auto_trunk
else:
vlan_name = cfg.CONF.ml2_cisco.vlan_name_prefix
auto_create = True
auto_trunk = True
vlan_name_max_len = const.NEXUS_MAX_VLAN_NAME_LEN - len(str(vlan_id))
if len(vlan_name) > vlan_name_max_len:
vlan_name = vlan_name[:vlan_name_max_len]
LOG.warn(_LW("Nexus: truncating vlan name to %s"), vlan_name)
vlan_name = vlan_name + str(vlan_id)
# if type DUPLICATE_VLAN, don't create vlan
if duplicate_type == const.DUPLICATE_VLAN:
auto_create = False
if auto_create and auto_trunk:
LOG.debug("Nexus: create & trunk vlan %s", vlan_name)
self.driver.create_and_trunk_vlan(
switch_ip, vlan_id, vlan_name, intf_type, nexus_port,
vni)
elif auto_create:
LOG.debug("Nexus: create vlan %s", vlan_name)
self.driver.create_vlan(switch_ip, vlan_id, vlan_name, vni)
elif auto_trunk:
LOG.debug("Nexus: trunk vlan %s", vlan_name)
self.driver.enable_vlan_on_trunk_int(switch_ip, vlan_id,
intf_type, nexus_port)
def _configure_host_entries(self, vlan_id, device_id, host_id, vni,
is_provider_vlan):
"""Create a nexus switch entry.
if needed, create a VLAN in the appropriate switch or port and
configure the appropriate interfaces for this VLAN.
Called during update postcommit port event.
"""
host_connections = self._get_switch_info(host_id)
# (nexus_port,switch_ip) will be unique in each iteration.
# But switch_ip will repeat if host has >1 connection to same switch.
# So track which switch_ips already have vlan created in this loop.
vlan_already_created = []
for switch_ip, intf_type, nexus_port in host_connections:
if self.is_switch_configurable(switch_ip) is False:
self.reset_switch_retry_count(switch_ip)
continue
# The VLAN needs to be created on the switch if no other
# instance has been placed in this VLAN on a different host
# attached to this switch. Search the existing bindings in the
# database. If all the instance_id in the database match the
# current device_id, then create the VLAN, but only once per
# switch_ip. Otherwise, just trunk.
all_bindings = nxos_db.get_nexusvlan_binding(vlan_id, switch_ip)
previous_bindings = [row for row in all_bindings
if row.instance_id != device_id]
duplicate_port = [row for row in all_bindings
if row.instance_id != device_id and
row.port_id == intf_type + ':' + nexus_port]
if duplicate_port:
duplicate_type = const.DUPLICATE_PORT
elif previous_bindings or (switch_ip in vlan_already_created):
duplicate_type = const.DUPLICATE_VLAN
else:
vlan_already_created.append(switch_ip)
duplicate_type = const.NO_DUPLICATE
try:
self._configure_port_binding(is_provider_vlan,
duplicate_type,
switch_ip, vlan_id,
intf_type, nexus_port,
vni)
except Exception:
with excutils.save_and_reraise_exception() as ctxt:
ctxt.reraise = (
self.choose_to_reraise_driver_exception(switch_ip,
'_configure_port_binding'))
def configure_switch_entries(self, switch_ip, port_bindings):
"""Create a nexus switch entry in Nexus.
The port_bindings is sorted by vlan_id, vni, port_id.
When there is a change in vlan_id or vni, then vlan
data is configured in Nexus device.
Otherwise we check if there is a change in port_id
where we configure the port with vlan trunk config.
Called during switch replay event.
"""
prev_vlan = -1
prev_vni = -1
prev_port = None
port_bindings.sort(key=lambda x: (x.vlan_id, x.vni, x.port_id))
for port in port_bindings:
if ':' in port.port_id:
intf_type, nexus_port = port.port_id.split(':')
else:
intf_type, nexus_port = 'ethernet', port.port_id
if port.vlan_id == prev_vlan and port.vni == prev_vni:
duplicate_type = const.DUPLICATE_VLAN
if port.port_id == prev_port:
duplicate_type = const.DUPLICATE_PORT
else:
duplicate_type = const.NO_DUPLICATE
try:
self._configure_port_binding(
port.is_provider_vlan, duplicate_type,
switch_ip, port.vlan_id,
intf_type, nexus_port,
port.vni)
except Exception as e:
self.choose_to_reraise_driver_exception(switch_ip,
'replay _configure_port_binding')
LOG.error(_LE("Failed to configure port binding "
"for switch %(switch_ip)s, vlan %(vlan)s "
"vni %(vni)s, port %(port)s, "
"reason %(reason)s"),
{'switch_ip': switch_ip,
'vlan': port.vlan_id,
'vni': port.vni,
'port': port.port_id,
'reason': e})
break
prev_vlan = port.vlan_id
prev_vni = port.vni
prev_port = port.port_id
def _delete_nxos_db(self, vlan_id, device_id, host_id, vni,
is_provider_vlan):
"""Delete the nexus database entry.
Called during delete precommit port event.
"""
try:
rows = nxos_db.get_nexusvm_bindings(vlan_id, device_id)
for row in rows:
nxos_db.remove_nexusport_binding(row.port_id, row.vlan_id,
row.vni, row.switch_ip, row.instance_id,
row.is_provider_vlan)
except excep.NexusPortBindingNotFound:
return
def _delete_switch_entry(self, vlan_id, device_id, host_id, vni,
is_provider_vlan):
"""Delete the nexus switch entry.
By accessing the current db entries determine if switch
configuration can be removed.
Called during delete postcommit port event.
"""
host_connections = self._get_switch_info(host_id)
# (nexus_port,switch_ip) will be unique in each iteration.
# But switch_ip will repeat if host has >1 connection to same switch.
# So track which switch_ips already have vlan removed in this loop.
vlan_already_removed = []
for switch_ip, intf_type, nexus_port in host_connections:
if self.is_switch_configurable(switch_ip) is False:
self.reset_switch_retry_count(switch_ip)
continue
# if there are no remaining db entries using this vlan on this
# nexus switch port then remove vlan from the switchport trunk.
port_id = '%s:%s' % (intf_type, nexus_port)
auto_create = True
auto_trunk = True
if is_provider_vlan:
auto_create = cfg.CONF.ml2_cisco.provider_vlan_auto_create
auto_trunk = cfg.CONF.ml2_cisco.provider_vlan_auto_trunk
try:
nxos_db.get_port_vlan_switch_binding(port_id, vlan_id,
switch_ip)
except excep.NexusPortBindingNotFound:
pass
else:
continue
if auto_trunk:
try:
self.driver.disable_vlan_on_trunk_int(switch_ip,
vlan_id, intf_type, nexus_port)
except Exception:
with excutils.save_and_reraise_exception() as ctxt:
ctxt.reraise = (
self.choose_to_reraise_driver_exception(
switch_ip,
'disable_vlan_on_trunk_int'))
continue
# if there are no remaining db entries using this vlan on this
# nexus switch then remove the vlan.
if auto_create:
try:
nxos_db.get_nexusvlan_binding(vlan_id, switch_ip)
except excep.NexusPortBindingNotFound:
# Do not perform a second time on same switch
if switch_ip not in vlan_already_removed:
try:
self.driver.delete_vlan(switch_ip, vlan_id)
except Exception:
with excutils.save_and_reraise_exception() as ctxt:
ctxt.reraise = (
self.choose_to_reraise_driver_exception(
switch_ip, 'delete_vlan'))
vlan_already_removed.append(switch_ip)
def _is_segment_nexus_vxlan(self, segment):
return segment[api.NETWORK_TYPE] == const.TYPE_NEXUS_VXLAN
def _get_segments(self, top_segment, bottom_segment):
# Return vlan segment and vxlan segment (if configured).
if top_segment is None:
return None, None
elif self._is_segment_nexus_vxlan(top_segment):
return bottom_segment, top_segment
else:
return top_segment, None
def _is_vm_migrating(self, context, vlan_segment, orig_vlan_segment):
if not vlan_segment and orig_vlan_segment:
return (context.current.get(portbindings.HOST_ID) !=
context.original.get(portbindings.HOST_ID))
def _log_missing_segment(self):
LOG.warn(_LW("Nexus: Segment is None, Event not processed."))
def _is_valid_segment(self, segment):
valid_segment = True
if segment:
if (segment[api.NETWORK_TYPE] != p_const.TYPE_VLAN or
not self._valid_network_segment(segment)):
LOG.warn(_LW("Nexus: Segment is an invalid type or not "
"supported by this driver. Network type = "
"%(network_type)s Physical network = "
"%(phy_network)s. Event not processed."),
{'network_type': segment[api.NETWORK_TYPE],
'phy_network': segment[api.PHYSICAL_NETWORK]})
valid_segment = False
else:
self._log_missing_segment()
valid_segment = False
return valid_segment
def _port_action_vlan(self, port, segment, func, vni):
"""Verify configuration and then process event."""
# Verify segment.
if not self._is_valid_segment(segment):
return
device_id = port.get('device_id')
host_id = port.get(portbindings.HOST_ID)
vlan_id = segment.get(api.SEGMENTATION_ID)
# TODO(rpothier) Add back in provider segment support.
is_provider = False
settings = {"vlan_id": vlan_id,
"device_id": device_id,
"host_id": host_id,
"is_provider": is_provider is not None}
missing_fields = [field for field, value in settings.items()
if not value]
if not missing_fields:
func(vlan_id, device_id, host_id, vni, is_provider)
else:
raise excep.NexusMissingRequiredFields(
fields=' '.join(missing_fields))
def _port_action_vxlan(self, port, segment, func):
"""Verify configuration and then process event."""
# If the segment is None, just log a warning message and return.
if segment is None:
self._log_missing_segment()
return
device_id = port.get('device_id')
mcast_group = segment.get(api.PHYSICAL_NETWORK)
host_id = port.get(portbindings.HOST_ID)
vni = segment.get(api.SEGMENTATION_ID)
if vni and device_id and mcast_group and host_id:
func(vni, device_id, mcast_group, host_id)
return vni
else:
fields = "vni " if not vni else ""
fields += "device_id " if not device_id else ""
fields += "mcast_group " if not mcast_group else ""
fields += "host_id" if not host_id else ""
raise excep.NexusMissingRequiredFields(fields=fields)
def _monitor_thread(self):
"""Periodically restarts the monitor thread."""
with self.monitor_lock:
self.monitor.check_connections()
self.timer = threading.Timer(self.monitor_timeout,
self._monitor_thread)
self.timer.start()
def _stop_monitor_thread(self):
"""Terminates the monitor thread."""
if self.timer:
self.timer.cancel()
self.timer = None
@lockutils.synchronized('cisco-nexus-portlock')
def update_port_precommit(self, context):
"""Update port pre-database transaction commit event."""
vlan_segment, vxlan_segment = self._get_segments(
context.top_bound_segment,
context.bottom_bound_segment)
orig_vlan_segment, orig_vxlan_segment = self._get_segments(
context.original_top_bound_segment,
context.original_bottom_bound_segment)
# if VM migration is occurring then remove previous database entry
# else process update event.
if self._is_vm_migrating(context, vlan_segment, orig_vlan_segment):
vni = self._port_action_vxlan(context.original, orig_vxlan_segment,
self._delete_nve_db) if orig_vxlan_segment else 0
self._port_action_vlan(context.original, orig_vlan_segment,
self._delete_nxos_db, vni)
else:
if (self._is_supported_deviceowner(context.current) and
self._is_status_active(context.current)):
vni = self._port_action_vxlan(context.current, vxlan_segment,
self._configure_nve_db) if vxlan_segment else 0
self._port_action_vlan(context.current, vlan_segment,
self._configure_nxos_db, vni)
@lockutils.synchronized('cisco-nexus-portlock')
def update_port_postcommit(self, context):
"""Update port non-database commit event."""
vlan_segment, vxlan_segment = self._get_segments(
context.top_bound_segment,
context.bottom_bound_segment)
orig_vlan_segment, orig_vxlan_segment = self._get_segments(
context.original_top_bound_segment,
context.original_bottom_bound_segment)
# if VM migration is occurring then remove previous nexus switch entry
# else process update event.
if self._is_vm_migrating(context, vlan_segment, orig_vlan_segment):
vni = self._port_action_vxlan(context.original, orig_vxlan_segment,
self._delete_nve_member) if orig_vxlan_segment else 0
self._port_action_vlan(context.original, orig_vlan_segment,
self._delete_switch_entry, vni)
else:
if (self._is_supported_deviceowner(context.current) and
self._is_status_active(context.current)):
vni = self._port_action_vxlan(context.current, vxlan_segment,
self._configure_nve_member) if vxlan_segment else 0
self._port_action_vlan(context.current, vlan_segment,
self._configure_host_entries, vni)
@lockutils.synchronized('cisco-nexus-portlock')
def delete_port_precommit(self, context):
"""Delete port pre-database commit event."""
if self._is_supported_deviceowner(context.current):
vlan_segment, vxlan_segment = self._get_segments(
context.top_bound_segment,
context.bottom_bound_segment)
vni = self._port_action_vxlan(context.current, vxlan_segment,
self._delete_nve_db) if vxlan_segment else 0
self._port_action_vlan(context.current, vlan_segment,
self._delete_nxos_db, vni)
@lockutils.synchronized('cisco-nexus-portlock')
def delete_port_postcommit(self, context):
"""Delete port non-database commit event."""
if self._is_supported_deviceowner(context.current):
vlan_segment, vxlan_segment = self._get_segments(
context.top_bound_segment,
context.bottom_bound_segment)
vni = self._port_action_vxlan(context.current, vxlan_segment,
self._delete_nve_member) if vxlan_segment else 0
self._port_action_vlan(context.current, vlan_segment,
self._delete_switch_entry, vni)
def bind_port(self, context):
LOG.debug("Attempting to bind port %(port)s on network %(network)s",
{'port': context.current['id'],
'network': context.network.current['id']})
for segment in context.segments_to_bind:
if self._is_segment_nexus_vxlan(segment):
# Find physical network setting for this host.
host_id = context.current.get(portbindings.HOST_ID)
host_connections = self._get_switch_info(host_id)
if not host_connections:
return
for switch_ip, attr2, attr3 in host_connections:
physnet = self._nexus_switches.get((switch_ip, 'physnet'))
if physnet:
break
else:
raise excep.PhysnetNotConfigured(host_id=host_id,
host_connections=host_connections)
# Allocate dynamic vlan segment.
vlan_segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN,
api.PHYSICAL_NETWORK: physnet}
context.allocate_dynamic_segment(vlan_segment)
# Retrieve the dynamically allocated segment.
# Database has provider_segment dictionary key.
network_id = context.current['network_id']
dynamic_segment = ml2_db.get_dynamic_segment(
db_api.get_session(), network_id, physnet)
# Have other drivers bind the VLAN dynamic segment.
if dynamic_segment:
context.continue_binding(segment[api.ID],
[dynamic_segment])
else:
raise excep.NoDynamicSegmentAllocated(
network_id=network_id, physnet=physnet)
else:
LOG.debug("No binding required for segment ID %(id)s, "
"segment %(seg)s, phys net %(physnet)s, and "
"network type %(nettype)s",
{'id': segment[api.ID],
'seg': segment[api.SEGMENTATION_ID],
'physnet': segment[api.PHYSICAL_NETWORK],
'nettype': segment[api.NETWORK_TYPE]})
| |
# Copyright 2010 OpenStack Foundation
# Copyright 2012 University Of Minho
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from jacket.compute import exception
from jacket.compute import test
from jacket.tests.compute.unit.virt.libvirt import fakelibvirt
from jacket.compute import utils
from jacket.compute.virt.libvirt import host
from jacket.compute.virt.libvirt.volume import volume
SECRET_UUID = '2a0a0d6c-babf-454d-b93e-9ac9957b95e0'
class FakeSecret(object):
def __init__(self):
self.uuid = SECRET_UUID
def getUUIDString(self):
return self.uuid
def UUIDString(self):
return self.uuid
def setValue(self, value):
self.value = value
return 0
def getValue(self, value):
return self.value
def undefine(self):
self.value = None
return 0
class LibvirtVolumeBaseTestCase(test.NoDBTestCase):
"""Contains common setup and helper methods for libvirt volume tests."""
def setUp(self):
super(LibvirtVolumeBaseTestCase, self).setUp()
self.executes = []
def fake_execute(*cmd, **kwargs):
self.executes.append(cmd)
return None, None
self.stubs.Set(utils, 'execute', fake_execute)
self.useFixture(fakelibvirt.FakeLibvirtFixture())
class FakeLibvirtDriver(object):
def __init__(self):
self._host = host.Host("qemu:///system")
def _get_all_block_devices(self):
return []
self.fake_conn = FakeLibvirtDriver()
self.connr = {
'ip': '127.0.0.1',
'initiator': 'fake_initiator',
'host': 'fake_host'
}
self.disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
self.name = 'volume-00000001'
self.location = '10.0.2.15:3260'
self.iqn = 'iqn.2010-10.org.openstack:%s' % self.name
self.vol = {'id': 1, 'name': self.name}
self.uuid = '875a8070-d0b9-4949-8b31-104d125c9a64'
self.user = 'foo'
def _assertFileTypeEquals(self, tree, file_path):
self.assertEqual('file', tree.get('type'))
self.assertEqual(file_path, tree.find('./source').get('file'))
class LibvirtISCSIVolumeBaseTestCase(LibvirtVolumeBaseTestCase):
"""Contains common setup and helper methods for iSCSI volume tests."""
def iscsi_connection(self, volume, location, iqn, auth=False,
transport=None):
dev_name = 'ip-%s-iscsi-%s-lun-1' % (location, iqn)
if transport is not None:
dev_name = 'pci-0000:00:00.0-' + dev_name
dev_path = '/dev/disk/by-path/%s' % (dev_name)
ret = {
'driver_volume_type': 'iscsi',
'data': {
'volume_id': volume['id'],
'target_portal': location,
'target_iqn': iqn,
'target_lun': 1,
'device_path': dev_path,
'qos_specs': {
'total_bytes_sec': '102400',
'read_iops_sec': '200',
}
}
}
if auth:
ret['data']['auth_method'] = 'CHAP'
ret['data']['auth_username'] = 'foo'
ret['data']['auth_password'] = 'bar'
return ret
class LibvirtVolumeTestCase(LibvirtISCSIVolumeBaseTestCase):
def _assertDiskInfoEquals(self, tree, disk_info):
self.assertEqual(disk_info['type'], tree.get('device'))
self.assertEqual(disk_info['bus'], tree.find('./target').get('bus'))
self.assertEqual(disk_info['dev'], tree.find('./target').get('dev'))
def _test_libvirt_volume_driver_disk_info(self):
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
connection_info = {
'driver_volume_type': 'fake',
'data': {
'device_path': '/foo',
},
'serial': 'fake_serial',
}
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertDiskInfoEquals(tree, self.disk_info)
def test_libvirt_volume_disk_info_type(self):
self.disk_info['type'] = 'cdrom'
self._test_libvirt_volume_driver_disk_info()
def test_libvirt_volume_disk_info_dev(self):
self.disk_info['dev'] = 'hdc'
self._test_libvirt_volume_driver_disk_info()
def test_libvirt_volume_disk_info_bus(self):
self.disk_info['bus'] = 'scsi'
self._test_libvirt_volume_driver_disk_info()
def test_libvirt_volume_driver_serial(self):
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
connection_info = {
'driver_volume_type': 'fake',
'data': {
'device_path': '/foo',
},
'serial': 'fake_serial',
}
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self.assertEqual('block', tree.get('type'))
self.assertEqual('fake_serial', tree.find('./serial').text)
self.assertIsNone(tree.find('./blockio'))
self.assertIsNone(tree.find("driver[@discard]"))
def test_libvirt_volume_driver_blockio(self):
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
connection_info = {
'driver_volume_type': 'fake',
'data': {
'device_path': '/foo',
'logical_block_size': '4096',
'physical_block_size': '4096',
},
'serial': 'fake_serial',
}
disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
conf = libvirt_driver.get_config(connection_info, disk_info)
tree = conf.format_dom()
blockio = tree.find('./blockio')
self.assertEqual('4096', blockio.get('logical_block_size'))
self.assertEqual('4096', blockio.get('physical_block_size'))
def test_libvirt_volume_driver_iotune(self):
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
connection_info = {
'driver_volume_type': 'fake',
'data': {
"device_path": "/foo",
'qos_specs': 'bar',
},
}
disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
conf = libvirt_driver.get_config(connection_info, disk_info)
tree = conf.format_dom()
iotune = tree.find('./iotune')
# ensure invalid qos_specs is ignored
self.assertIsNone(iotune)
specs = {
'total_bytes_sec': '102400',
'read_bytes_sec': '51200',
'write_bytes_sec': '0',
'total_iops_sec': '0',
'read_iops_sec': '200',
'write_iops_sec': '200',
}
del connection_info['data']['qos_specs']
connection_info['data'].update(dict(qos_specs=specs))
conf = libvirt_driver.get_config(connection_info, disk_info)
tree = conf.format_dom()
self.assertEqual('102400', tree.find('./iotune/total_bytes_sec').text)
self.assertEqual('51200', tree.find('./iotune/read_bytes_sec').text)
self.assertEqual('0', tree.find('./iotune/write_bytes_sec').text)
self.assertEqual('0', tree.find('./iotune/total_iops_sec').text)
self.assertEqual('200', tree.find('./iotune/read_iops_sec').text)
self.assertEqual('200', tree.find('./iotune/write_iops_sec').text)
def test_libvirt_volume_driver_readonly(self):
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
connection_info = {
'driver_volume_type': 'fake',
'data': {
"device_path": "/foo",
'access_mode': 'bar',
},
}
disk_info = {
"bus": "virtio",
"dev": "vde",
"type": "disk",
}
self.assertRaises(exception.InvalidVolumeAccessMode,
libvirt_driver.get_config,
connection_info, self.disk_info)
connection_info['data']['access_mode'] = 'rw'
conf = libvirt_driver.get_config(connection_info, disk_info)
tree = conf.format_dom()
readonly = tree.find('./readonly')
self.assertIsNone(readonly)
connection_info['data']['access_mode'] = 'ro'
conf = libvirt_driver.get_config(connection_info, disk_info)
tree = conf.format_dom()
readonly = tree.find('./readonly')
self.assertIsNotNone(readonly)
@mock.patch('compute.virt.libvirt.host.Host.has_min_version')
def test_libvirt_volume_driver_discard_true(self, mock_has_min_version):
# Check the discard attrib is present in driver section
mock_has_min_version.return_value = True
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
connection_info = {
'driver_volume_type': 'fake',
'data': {
'device_path': '/foo',
'discard': True,
},
'serial': 'fake_serial',
}
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
driver_node = tree.find("driver[@discard]")
self.assertIsNotNone(driver_node)
self.assertEqual('unmap', driver_node.attrib['discard'])
def test_libvirt_volume_driver_discard_false(self):
# Check the discard attrib is not present in driver section
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
connection_info = {
'driver_volume_type': 'fake',
'data': {
'device_path': '/foo',
'discard': False,
},
'serial': 'fake_serial',
}
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self.assertIsNone(tree.find("driver[@discard]"))
@mock.patch('compute.virt.libvirt.host.Host.has_min_version')
def test_libvirt_volume_driver_discard_true_bad_version(
self, mock_has_min_version):
# Check the discard attrib is not present in driver section
mock_has_min_version.return_value = False
libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
connection_info = {
'driver_volume_type': 'fake',
'data': {
'device_path': '/foo',
'discard': True,
},
'serial': 'fake_serial',
}
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self.assertIsNone(tree.find("driver[@discard]"))
| |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base classes for nodes."""
import abc
import enum
import os
import shlex
import subprocess
import sys
import threading
import time
import traceback
from . import node_base
from typing import Any, Dict, IO, List, Optional, Union
class ProcessStatus(enum.Enum):
# Use number values so we can sort based on value.
Finished = 0
"""The node has completed its task and shut down."""
Running = 1
"""The node is still running."""
Errored = 2
"""The node has failed."""
class NodeBase(object):
"""A base class for nodes that run a single subprocess."""
@abc.abstractmethod
def __init__(self) -> None:
self._process: Optional[subprocess.Popen] = None
def __del__(self) -> None:
# If the process isn't stopped by now, stop it here. It is preferable to
# explicitly call stop().
self.stop(None)
@abc.abstractmethod
def start(self):
"""Start the subprocess.
Should be overridden by the subclass to construct a command line, call
self._create_process, and assign the result to self._process.
"""
pass
def _create_process(self,
args: Union[str, List[str]],
env: Dict[str, str] = {},
merge_env: bool = True,
stdout: Union[int, IO[Any], None] = None,
stderr: Union[int, IO[Any], None] = None,
shell: bool = False, **kwargs) -> subprocess.Popen:
"""A central point to create subprocesses, so that we can debug the
command-line arguments.
Args:
args: An array of strings if shell is False, or a single string is shell
is True; the command line of the subprocess.
env: A dictionary of environment variables to pass to the subprocess.
merge_env: If true, merge env with the parent process environment.
shell: If true, args must be a single string, which will be executed as a
shell command.
Returns:
The Popen object of the subprocess.
"""
if merge_env:
child_env = os.environ.copy()
child_env.update(env)
else:
child_env = env
# Print arguments formatted as output from bash -x would be.
# This makes it easy to see the arguments and easy to copy/paste them for
# debugging in a shell.
if shell:
assert isinstance(args, str)
print('+ ' + args)
else:
assert type(args) is list
print('+ ' + ' '.join([shlex.quote(arg) for arg in args]))
return subprocess.Popen(args,
env=child_env,
stdin=subprocess.DEVNULL,
stdout=stdout, stderr=stderr,
shell=shell, **kwargs)
def check_status(self) -> ProcessStatus:
"""Returns the current ProcessStatus of the node."""
if not self._process:
raise ValueError('Must have a process to check')
self._process.poll()
if self._process.returncode is None:
return ProcessStatus.Running
if self._process.returncode == 0:
return ProcessStatus.Finished
else:
return ProcessStatus.Errored
def stop(self, status: Optional[ProcessStatus]) -> None:
"""Stop the subprocess if it's still running."""
if self._process:
# Slightly more polite than kill. Try this first.
self._process.terminate()
if self.check_status() == ProcessStatus.Running:
# If it's not dead yet, wait 1 second.
time.sleep(1)
if self.check_status() == ProcessStatus.Running:
# If it's still not dead, use kill.
self._process.kill()
# Wait for the process to die and read its exit code. There is no way
# to ignore a kill signal, so this will happen quickly. If we don't do
# this, it can create a zombie process.
self._process.wait()
class PolitelyWaitOnFinish(node_base.NodeBase):
"""A mixin that makes stop() wait for the subprocess if status is Finished.
This is as opposed to the base class behavior, in which stop() forces
the subprocesses of a node to terminate.
"""
def stop(self, status: Optional[ProcessStatus]) -> None:
if self._process and status == ProcessStatus.Finished:
try:
print('Waiting for', self.__class__.__name__)
self._process.wait(timeout=300) # 5m timeout
except subprocess.TimeoutExpired:
traceback.print_exc() # print the exception
# Fall through.
super().stop(status)
class ThreadedNodeBase(NodeBase):
"""A base class for nodes that run a thread.
The thread repeats some callback in a background thread.
"""
def __init__(self, thread_name: str, continue_on_exception: bool, sleep_time: float):
super().__init__()
self._status = ProcessStatus.Finished
self._thread_name = thread_name
self._continue_on_exception = continue_on_exception
self._sleep_time = sleep_time
self._thread = threading.Thread(target=self._thread_main, name=thread_name)
def _thread_main(self) -> None:
while self._status == ProcessStatus.Running:
try:
self._thread_single_pass()
except:
print('Exception in', self._thread_name, '-', sys.exc_info())
if self._continue_on_exception:
print(self.__class__.__name__+": 'Continuing.'")
else:
print(self.__class__.__name__+": 'Quitting.'")
self._status = ProcessStatus.Errored
return
# Wait a little bit before performing the next pass.
time.sleep(self._sleep_time)
@abc.abstractmethod
def _thread_single_pass(self) -> None:
"""Runs a single step of the thread loop.
This is implemented by subclasses to do whatever it is they do. It will be
called repeatedly by the base class from the node's background thread. If
this method raises an exception, the behavior depends on the
continue_on_exception argument in the constructor. If
continue_on_exception is true, the the thread will continue. Otherwise, an
exception will stop the thread and therefore the node.
"""
pass
def start(self) -> None:
self._status = ProcessStatus.Running
self._thread.start()
def stop(self, status: Optional[ProcessStatus]) -> None:
self._status = ProcessStatus.Finished
self._thread.join()
def check_status(self) -> ProcessStatus:
return self._status
| |
#!/usr/bin/env python
"""Process management on Windows.
On Windows, we want to create a subprocess such that:
* We can share a pair of named pipes with the subprocess for communication.
* We can share an open file handle with the subprocess.
* We can avoid implicitly sharing (leaking) any other handles.
This would normally be done as follows:
```
subprocess.Popen(
close_fds=True,
startupinfo=subprocess.STARTUPINFO(lpAttributeList={
"handle_list": [ pipe_input, pipe_output, extra_file_handle ]
})
```
However, on Python 3.6, the `subprocess` module doesn't support
the member `handle_list` in `STARTUPINFO.lpAttributeList`.
So for Python 3.6 we implement the `Process` class emulating that behavior.
Once we migrate to Python 3.7, this custom code can be removed.
"""
# win32 api types and class members violate naming conventions.
# pylint: disable=invalid-name
import contextlib
import ctypes
# pylint: disable=g-importing-member
from ctypes.wintypes import BOOL
from ctypes.wintypes import DWORD
from ctypes.wintypes import HANDLE
from ctypes.wintypes import LPBYTE
from ctypes.wintypes import LPCWSTR
from ctypes.wintypes import LPVOID
from ctypes.wintypes import LPWSTR
from ctypes.wintypes import WORD
# pylint: enable=g-importing-member
import os
import subprocess
from typing import List, Optional, NamedTuple
import win32api
import win32con
import win32event
import win32process
from grr_response_client.unprivileged.windows import sandbox
kernel32 = ctypes.WinDLL("kernel32")
advapi32 = ctypes.WinDLL("advapi32")
PSID_AND_ATTRIBUTES = LPVOID
PSID = LPVOID
class SECURITY_CAPABILITIES(ctypes.Structure):
_fields_ = [
("AppContainerSid", PSID),
("Capabilities", PSID_AND_ATTRIBUTES),
("CapabilityCount", DWORD),
("Reserved", DWORD),
]
class SECURITY_ATTRIBUTES(ctypes.Structure):
_fields_ = [
("nLength", DWORD),
("lpSecurityDescriptor", LPVOID),
("bInheritHandle", BOOL),
]
LPSECURITY_ATTRIBUTES = ctypes.POINTER(SECURITY_ATTRIBUTES)
class STARTUPINFOW(ctypes.Structure):
_fields_ = [
("cb", DWORD),
("lpReserved", LPWSTR),
("lpDesktop", LPWSTR),
("lpTitle", LPWSTR),
("dwX", DWORD),
("dwY", DWORD),
("dwXSize", DWORD),
("dwYSize", DWORD),
("dwXCountChars", DWORD),
("dwYCountChars", DWORD),
("dwFillAttribute", DWORD),
("dwFlags", DWORD),
("wShowWindow", WORD),
("cbReserved2", WORD),
("lpReserved2", LPBYTE),
("hStdInput", HANDLE),
("hStdOutput", HANDLE),
("hStdError", HANDLE),
]
LPSTARTUPINFOW = ctypes.POINTER(STARTUPINFOW)
LPPROC_THREAD_ATTRIBUTE_LIST = LPVOID
class STARTUPINFOEXW(ctypes.Structure):
_fields_ = [
("StartupInfo", STARTUPINFOW),
("lpAttributeList", LPPROC_THREAD_ATTRIBUTE_LIST),
]
class PROCESS_INFORMATION(ctypes.Structure):
_fields_ = [
("hProcess", HANDLE),
("hThread", HANDLE),
("dwProcessId", DWORD),
("dwThreadId", DWORD),
]
LPPROCESS_INFORMATION = ctypes.POINTER(PROCESS_INFORMATION)
CreateProcessW = kernel32.CreateProcessW
CreateProcessW.argtypes = [
LPCWSTR,
LPWSTR,
LPSECURITY_ATTRIBUTES,
LPSECURITY_ATTRIBUTES,
BOOL,
DWORD,
LPVOID,
LPCWSTR,
LPSTARTUPINFOW,
LPPROCESS_INFORMATION,
]
CreateProcessW.restype = BOOL
if ctypes.sizeof(ctypes.c_void_p) == 8:
ULONG_PTR = ctypes.c_ulonglong
else:
ULONG_PTR = ctypes.c_ulong
SIZE_T = ULONG_PTR
PSIZE_T = ctypes.POINTER(SIZE_T)
InitializeProcThreadAttributeList = kernel32.InitializeProcThreadAttributeList
InitializeProcThreadAttributeList.argtypes = [
LPPROC_THREAD_ATTRIBUTE_LIST,
DWORD,
DWORD,
PSIZE_T,
]
InitializeProcThreadAttributeList.restype = BOOL
DWORD_PTR = ULONG_PTR
PVOID = LPVOID
UpdateProcThreadAttribute = kernel32.UpdateProcThreadAttribute
UpdateProcThreadAttribute.argtypes = [
LPPROC_THREAD_ATTRIBUTE_LIST,
DWORD,
DWORD_PTR,
PVOID,
SIZE_T,
PVOID,
PSIZE_T,
]
UpdateProcThreadAttribute.restype = BOOL
DeleteProcThreadAttributeList = kernel32.DeleteProcThreadAttributeList
DeleteProcThreadAttributeList.argtypes = [LPPROC_THREAD_ATTRIBUTE_LIST]
GetProcessId = kernel32.GetProcessId
GetProcessId.argtypes = [HANDLE]
GetProcessId.restype = DWORD
ConvertStringSidToSidW = advapi32.ConvertStringSidToSidW
ConvertStringSidToSidW.argtypes = [LPCWSTR, ctypes.POINTER(PSID)]
ConvertStringSidToSidW.restype = BOOL
FreeSid = advapi32.FreeSid
FreeSid.argtypes = [PSID]
FreeSid.restype = PVOID
class Error(Exception):
pass
EXTENDED_STARTUPINFO_PRESENT = 0x00080000
PROC_THREAD_ATTRIBUTE_HANDLE_LIST = 0x20002
PROC_THREAD_ATTRIBUTE_SECURITY_CAPABILITIES = 0x20009
class CpuTimes(NamedTuple):
cpu_time: float
sys_time: float
class Process:
"""A subprocess.
A pair of pipes is created and shared with the subprocess.
"""
def __init__(self,
args: List[str],
extra_handles: Optional[List[int]] = None):
"""Constructor.
Args:
args: Command line to run, in argv format.
extra_handles: Optional list of extra handles to share with the
subprocess.
Raises:
Error: if a win32 call fails.
"""
# Stack for resources which are needed by this instance.
self._exit_stack = contextlib.ExitStack()
# Stack for resources which are needed only during this method.
with contextlib.ExitStack() as stack:
sandbox_obj = self._exit_stack.enter_context(sandbox.CreateSandbox())
size = SIZE_T()
InitializeProcThreadAttributeList(None, 2, 0, ctypes.byref(size))
attr_list = ctypes.create_string_buffer(size.value)
res = InitializeProcThreadAttributeList(attr_list, 2, 0,
ctypes.byref(size))
if not res:
raise Error("InitializeProcThreadAttributeList failed.")
stack.callback(DeleteProcThreadAttributeList, attr_list)
if extra_handles is None:
extra_handles = []
for extra_handle in extra_handles:
os.set_handle_inheritable(extra_handle, True)
handle_list_size = len(extra_handles)
handle_list = (HANDLE * handle_list_size)(
*[HANDLE(handle) for handle in extra_handles])
if handle_list:
res = UpdateProcThreadAttribute(attr_list, 0,
PROC_THREAD_ATTRIBUTE_HANDLE_LIST,
handle_list, ctypes.sizeof(handle_list),
None, None)
if not res:
raise Error("UpdateProcThreadAttribute failed.")
if sandbox_obj.sid_string is not None:
psid = PSID()
if not ConvertStringSidToSidW(sandbox_obj.sid_string,
ctypes.byref(psid)):
raise Error("ConvertStringSidToSidW")
stack.callback(FreeSid, psid)
security_capabilities = SECURITY_CAPABILITIES()
security_capabilities.AppContainerSid = psid
res = UpdateProcThreadAttribute(
attr_list, 0, PROC_THREAD_ATTRIBUTE_SECURITY_CAPABILITIES,
ctypes.byref(security_capabilities),
ctypes.sizeof(security_capabilities), None, None)
if not res:
raise Error("UpdateProcThreadAttribute failed.")
siex = STARTUPINFOEXW()
si = siex.StartupInfo
si.cb = ctypes.sizeof(siex)
si.wShowWindow = False
siex.lpAttributeList = ctypes.cast(attr_list,
LPPROC_THREAD_ATTRIBUTE_LIST)
if sandbox_obj.desktop_name is not None:
si.lpDesktop = sandbox_obj.desktop_name
pi = PROCESS_INFORMATION()
command_line = subprocess.list2cmdline(args)
res = CreateProcessW(
None,
command_line,
None,
None,
True,
EXTENDED_STARTUPINFO_PRESENT,
None,
None,
ctypes.byref(si),
ctypes.byref(pi),
)
if not res:
raise Error("CreateProcessW failed.")
self._handle = pi.hProcess
self._exit_stack.callback(win32api.CloseHandle, pi.hProcess)
win32api.CloseHandle(pi.hThread)
self.pid = GetProcessId(self._handle)
if self.pid == 0:
raise Error("GetProcessId failed.")
def Stop(self) -> int:
"""Terminates the process and waits for the process to exit.
Returns:
The exit code.
"""
exit_code = win32process.GetExitCodeProcess(self._handle)
if exit_code == win32con.STILL_ACTIVE:
win32process.TerminateProcess(self._handle, -1)
return self.Wait()
def Wait(self) -> int:
"""Waits for the process to exit.
Returns:
The exit code.
Raises:
Error: on system error.
"""
res = win32event.WaitForSingleObject(self._handle, win32event.INFINITE)
if res == win32event.WAIT_FAILED:
raise Error("WaitForSingleObject failed.")
exit_code = win32process.GetExitCodeProcess(self._handle)
self._exit_stack.close()
return exit_code
def GetCpuTimes(self) -> CpuTimes:
times = win32process.GetProcessTimes(self._handle)
return CpuTimes(
cpu_time=times["UserTime"] / 10000000.0,
sys_time=times["KernelTime"] / 10000000.0)
| |
# -*- coding: utf-8 -*-
"""
Public Python API to create CMS contents.
WARNING: None of the functions defined in this module checks for permissions.
You must implement the necessary permission checks in your own code before
calling these methods!
"""
import datetime
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.core.exceptions import FieldError
from django.core.exceptions import PermissionDenied
from django.core.exceptions import ValidationError
from django.template.defaultfilters import slugify
from django.template.loader import get_template
from django.utils import six
from django.utils.translation import activate
from cms import constants
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from cms.constants import TEMPLATE_INHERITANCE_MAGIC
from cms.models.pagemodel import Page
from cms.models.permissionmodels import (PageUser, PagePermission, GlobalPagePermission,
ACCESS_PAGE_AND_DESCENDANTS)
from cms.models.placeholdermodel import Placeholder
from cms.models.pluginmodel import CMSPlugin
from cms.models.titlemodels import Title
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cms.utils import copy_plugins
from cms.utils.conf import get_cms_setting
from cms.utils.i18n import get_language_list
from cms.utils.permissions import _thread_locals, current_user, has_page_change_permission
from menus.menu_pool import menu_pool
#===============================================================================
# Helpers/Internals
#===============================================================================
def generate_valid_slug(source, parent, language):
"""
Generate a valid slug for a page from source for the given language.
Parent is passed so we can make sure the slug is unique for this level in
the page tree.
"""
if parent:
qs = Title.objects.filter(language=language, page__parent=parent)
else:
qs = Title.objects.filter(language=language, page__parent__isnull=True)
used = list(qs.values_list('slug', flat=True))
baseslug = slugify(source)
slug = baseslug
i = 1
if used:
while slug in used:
slug = '%s-%s' % (baseslug, i)
i += 1
return slug
def _verify_apphook(apphook, namespace):
"""
Verifies the apphook given is valid and returns the normalized form (name)
"""
apphook_pool.discover_apps()
if isinstance(apphook, CMSApp):
try:
assert apphook.__class__ in [app.__class__ for app in apphook_pool.apps.values()]
except AssertionError:
print(apphook_pool.apps.values())
raise
apphook_name = apphook.__class__.__name__
elif hasattr(apphook, '__module__') and issubclass(apphook, CMSApp):
return apphook.__name__
elif isinstance(apphook, six.string_types):
try:
assert apphook in apphook_pool.apps
except AssertionError:
print(apphook_pool.apps.values())
raise
apphook_name = apphook
else:
raise TypeError("apphook must be string or CMSApp instance")
if apphook_pool.apps[apphook_name].app_name and not namespace:
raise ValidationError('apphook with app_name must define a namespace')
return apphook_name
def _verify_plugin_type(plugin_type):
"""
Verifies the given plugin_type is valid and returns a tuple of
(plugin_model, plugin_type)
"""
if (hasattr(plugin_type, '__module__') and
issubclass(plugin_type, CMSPluginBase)):
plugin_pool.set_plugin_meta()
plugin_model = plugin_type.model
assert plugin_type in plugin_pool.plugins.values()
plugin_type = plugin_type.__name__
elif isinstance(plugin_type, six.string_types):
try:
plugin_model = plugin_pool.get_plugin(plugin_type).model
except KeyError:
raise TypeError(
'plugin_type must be CMSPluginBase subclass or string'
)
else:
raise TypeError('plugin_type must be CMSPluginBase subclass or string')
return plugin_model, plugin_type
#===============================================================================
# Public API
#===============================================================================
def create_page(title, template, language, menu_title=None, slug=None,
apphook=None, apphook_namespace=None, redirect=None, meta_description=None,
created_by='python-api', parent=None,
publication_date=None, publication_end_date=None,
in_navigation=False, soft_root=False, reverse_id=None,
navigation_extenders=None, published=False, site=None,
login_required=False, limit_visibility_in_menu=constants.VISIBILITY_ALL,
position="last-child", overwrite_url=None, xframe_options=Page.X_FRAME_OPTIONS_INHERIT):
"""
Create a CMS Page and it's title for the given language
See docs/extending_cms/api_reference.rst for more info
"""
# ugly permissions hack
if created_by and isinstance(created_by, get_user_model()):
_thread_locals.user = created_by
created_by = getattr(created_by, get_user_model().USERNAME_FIELD)
else:
_thread_locals.user = None
# validate template
if not template == TEMPLATE_INHERITANCE_MAGIC:
assert template in [tpl[0] for tpl in get_cms_setting('TEMPLATES')]
get_template(template)
# validate site
if not site:
site = Site.objects.get_current()
else:
assert isinstance(site, Site)
# validate language:
assert language in get_language_list(site), get_cms_setting('LANGUAGES').get(site.pk)
# set default slug:
if not slug:
slug = generate_valid_slug(title, parent, language)
# validate parent
if parent:
assert isinstance(parent, Page)
parent = Page.objects.get(pk=parent.pk)
# validate publication date
if publication_date:
assert isinstance(publication_date, datetime.date)
# validate publication end date
if publication_end_date:
assert isinstance(publication_end_date, datetime.date)
if navigation_extenders:
raw_menus = menu_pool.get_menus_by_attribute("cms_enabled", True)
menus = [menu[0] for menu in raw_menus]
assert navigation_extenders in menus
# validate menu visibility
accepted_limitations = (constants.VISIBILITY_ALL, constants.VISIBILITY_USERS, constants.VISIBILITY_ANONYMOUS)
assert limit_visibility_in_menu in accepted_limitations
# validate position
assert position in ('last-child', 'first-child', 'left', 'right')
if parent:
if position in ('last-child', 'first-child'):
parent_id = parent.pk
else:
parent_id = parent.parent_id
else:
parent_id = None
# validate and normalize apphook
if apphook:
application_urls = _verify_apphook(apphook, apphook_namespace)
else:
application_urls = None
if reverse_id:
if Page.objects.drafts().filter(reverse_id=reverse_id, site=site).count():
raise FieldError('A page with the reverse_id="%s" already exist.' % reverse_id)
page = Page(
created_by=created_by,
changed_by=created_by,
parent_id=parent_id,
publication_date=publication_date,
publication_end_date=publication_end_date,
in_navigation=in_navigation,
soft_root=soft_root,
reverse_id=reverse_id,
navigation_extenders=navigation_extenders,
template=template,
application_urls=application_urls,
application_namespace=apphook_namespace,
site=site,
login_required=login_required,
limit_visibility_in_menu=limit_visibility_in_menu,
xframe_options=xframe_options,
)
page = page.add_root(instance=page)
if parent:
page = page.move(target=parent, pos=position)
create_title(
language=language,
title=title,
menu_title=menu_title,
slug=slug,
redirect=redirect,
meta_description=meta_description,
page=page,
overwrite_url=overwrite_url,
)
if published:
page.publish(language)
del _thread_locals.user
return page.reload()
def create_title(language, title, page, menu_title=None, slug=None,
redirect=None, meta_description=None,
parent=None, overwrite_url=None):
"""
Create a title.
Parent is only used if slug=None.
See docs/extending_cms/api_reference.rst for more info
"""
# validate page
assert isinstance(page, Page)
# validate language:
assert language in get_language_list(page.site_id)
# set default slug:
if not slug:
slug = generate_valid_slug(title, parent, language)
title = Title.objects.create(
language=language,
title=title,
menu_title=menu_title,
slug=slug,
redirect=redirect,
meta_description=meta_description,
page=page
)
if overwrite_url:
title.has_url_overwrite = True
title.path = overwrite_url
title.save()
return title
def add_plugin(placeholder, plugin_type, language, position='last-child',
target=None, **data):
"""
Add a plugin to a placeholder
See docs/extending_cms/api_reference.rst for more info
"""
# validate placeholder
assert isinstance(placeholder, Placeholder)
# validate and normalize plugin type
plugin_model, plugin_type = _verify_plugin_type(plugin_type)
if target:
if position == 'last-child':
if CMSPlugin.node_order_by:
position = 'sorted-child'
new_pos = CMSPlugin.objects.filter(parent=target).count()
parent_id = target.pk
elif position == 'first-child':
new_pos = 0
if CMSPlugin.node_order_by:
position = 'sorted-child'
parent_id = target.pk
elif position == 'left':
new_pos = target.position
if CMSPlugin.node_order_by:
position = 'sorted-sibling'
parent_id = target.parent_id
elif position == 'right':
new_pos = target.position + 1
if CMSPlugin.node_order_by:
position = 'sorted-sibling'
parent_id = target.parent_id
else:
raise Exception('position not supported: %s' % position)
if position == 'last-child' or position == 'first-child':
qs = CMSPlugin.objects.filter(language=language, parent=target, position__gte=new_pos,
placeholder=placeholder)
else:
qs = CMSPlugin.objects.filter(language=language, parent=target.parent_id, position__gte=new_pos,
placeholder=placeholder)
for pl in qs:
pl.position += 1
pl.save()
else:
if position == 'last-child':
new_pos = CMSPlugin.objects.filter(language=language, parent__isnull=True, placeholder=placeholder).count()
else:
new_pos = 0
for pl in CMSPlugin.objects.filter(language=language, parent__isnull=True, position__gte=new_pos,
placeholder=placeholder):
pl.position += 1
pl.save()
parent_id = None
plugin_base = CMSPlugin(
plugin_type=plugin_type,
placeholder=placeholder,
position=new_pos,
language=language,
parent_id=parent_id,
)
plugin_base = plugin_base.add_root(instance=plugin_base)
if target:
plugin_base = plugin_base.move(target, pos=position)
plugin = plugin_model(**data)
plugin_base.set_base_attr(plugin)
plugin.save()
return plugin
def create_page_user(created_by, user,
can_add_page=True, can_view_page=True,
can_change_page=True, can_delete_page=True,
can_recover_page=True, can_add_pageuser=True,
can_change_pageuser=True, can_delete_pageuser=True,
can_add_pagepermission=True,
can_change_pagepermission=True,
can_delete_pagepermission=True, grant_all=False):
"""
Creates a page user.
See docs/extending_cms/api_reference.rst for more info
"""
from cms.admin.forms import save_permissions
if grant_all:
# just be lazy
return create_page_user(created_by, user, True, True, True, True,
True, True, True, True, True, True, True)
# validate created_by
assert isinstance(created_by, get_user_model())
data = {
'can_add_page': can_add_page,
'can_view_page': can_view_page,
'can_change_page': can_change_page,
'can_delete_page': can_delete_page,
'can_recover_page': can_recover_page,
'can_add_pageuser': can_add_pageuser,
'can_change_pageuser': can_change_pageuser,
'can_delete_pageuser': can_delete_pageuser,
'can_add_pagepermission': can_add_pagepermission,
'can_change_pagepermission': can_change_pagepermission,
'can_delete_pagepermission': can_delete_pagepermission,
}
user.is_staff = True
user.is_active = True
page_user = PageUser(created_by=created_by)
for field in [f.name for f in get_user_model()._meta.local_fields]:
setattr(page_user, field, getattr(user, field))
user.save()
page_user.save()
save_permissions(data, page_user)
return user
def assign_user_to_page(page, user, grant_on=ACCESS_PAGE_AND_DESCENDANTS,
can_add=False, can_change=False, can_delete=False,
can_change_advanced_settings=False, can_publish=False,
can_change_permissions=False, can_move_page=False,
can_recover_page=True, can_view=False,
grant_all=False, global_permission=False):
"""
Assigns given user to page, and gives him requested permissions.
See docs/extending_cms/api_reference.rst for more info
"""
grant_all = grant_all and not global_permission
data = {
'can_add': can_add or grant_all,
'can_change': can_change or grant_all,
'can_delete': can_delete or grant_all,
'can_change_advanced_settings': can_change_advanced_settings or grant_all,
'can_publish': can_publish or grant_all,
'can_change_permissions': can_change_permissions or grant_all,
'can_move_page': can_move_page or grant_all,
'can_view': can_view or grant_all,
}
page_permission = PagePermission(page=page, user=user,
grant_on=grant_on, **data)
page_permission.save()
if global_permission:
page_permission = GlobalPagePermission(
user=user, can_recover_page=can_recover_page, **data)
page_permission.save()
page_permission.sites.add(Site.objects.get_current())
return page_permission
def publish_page(page, user, language):
"""
Publish a page. This sets `page.published` to `True` and calls publish()
which does the actual publishing.
See docs/extending_cms/api_reference.rst for more info
"""
page = page.reload()
class FakeRequest(object):
def __init__(self, user):
self.user = user
request = FakeRequest(user)
if not page.has_publish_permission(request):
raise PermissionDenied()
# Set the current_user to have the page's changed_by
# attribute set correctly.
# 'user' is a user object, but current_user() just wants the username (a string).
with current_user(user.get_username()):
page.publish(language)
return page.reload()
def publish_pages(include_unpublished=False, language=None, site=None):
"""
Create published public version of selected drafts.
"""
qs = Page.objects.drafts()
if not include_unpublished:
qs = qs.filter(title_set__published=True).distinct()
if site:
qs = qs.filter(site=site)
output_language = None
for i, page in enumerate(qs):
add = True
titles = page.title_set
if not include_unpublished:
titles = titles.filter(published=True)
for lang in titles.values_list("language", flat=True):
if language is None or lang == language:
if not output_language:
output_language = lang
if not page.publish(lang):
add = False
# we may need to activate the first (main) language for proper page title rendering
activate(output_language)
yield (page, add)
def get_page_draft(page):
"""
Returns the draft version of a page, regardless if the passed in
page is a published version or a draft version.
:param page: The page to get the draft version
:type page: :class:`cms.models.pagemodel.Page` instance
:return page: draft version of the page
:type page: :class:`cms.models.pagemodel.Page` instance
"""
if page:
if page.publisher_is_draft:
return page
else:
return page.publisher_draft
else:
return None
def copy_plugins_to_language(page, source_language, target_language,
only_empty=True):
"""
Copy the plugins to another language in the same page for all the page
placeholders.
By default plugins are copied only if placeholder has no plugin for the
target language; use ``only_empty=False`` to change this.
.. warning: This function skips permissions checks
:param page: the page to copy
:type page: :class:`cms.models.pagemodel.Page` instance
:param string source_language: The source language code,
must be in :setting:`django:LANGUAGES`
:param string target_language: The source language code,
must be in :setting:`django:LANGUAGES`
:param bool only_empty: if False, plugin are copied even if
plugins exists in the target language (on a placeholder basis).
:return int: number of copied plugins
"""
copied = 0
placeholders = page.get_placeholders()
for placeholder in placeholders:
# only_empty is True we check if the placeholder already has plugins and
# we skip it if has some
if not only_empty or not placeholder.cmsplugin_set.filter(language=target_language).exists():
plugins = list(
placeholder.cmsplugin_set.filter(language=source_language).order_by('path'))
copied_plugins = copy_plugins.copy_plugins_to(plugins, placeholder, target_language)
copied += len(copied_plugins)
return copied
def can_change_page(request):
"""
Check whether a user has the permission to change the page.
This will work across all permission-related setting, with a unified interface
to permission checking.
"""
# check global permissions if CMS_PERMISSION is active
global_permission = get_cms_setting('PERMISSION') and has_page_change_permission(request)
# check if user has page edit permission
page_permission = request.current_page and request.current_page.has_change_permission(request)
return global_permission or page_permission
| |
#!/usr/bin/env python3
import abc
import re
import os.path
import docutils.parsers.rst
import docutils.parsers.rst.roles
import docutils.parsers.rst.directives
import docutils.parsers.rst.directives.misc
import docutils.parsers.rst.directives.admonitions
import sphinx.addnodes
import sphinx.directives.code
import sphinx.directives.other
import sphinx.roles
from typing import Any, Dict
import mut.tuft.mongodb_conf
REF_PAT = re.compile(r'(.*)(?:<(.+)>)$', re.M | re.DOTALL)
METHOD_PAT = re.compile(r'^([^(\n]+)')
class Example(docutils.nodes.Admonition, docutils.nodes.Element):
pass
class Optional(docutils.nodes.Admonition, docutils.nodes.Element):
pass
class Related(docutils.nodes.Admonition, docutils.nodes.Element):
pass
class See(docutils.nodes.Admonition, docutils.nodes.Element):
pass
class Todo(docutils.nodes.Admonition, docutils.nodes.Element):
pass
def parse_ref(text):
groups = REF_PAT.findall(text)
if not groups:
groups = [(text, text)]
label, ref = groups[0]
if not ref:
ref = label
return label, ref
def role(name: str):
def inner(f):
docutils.parsers.rst.roles.register_local_role(name, f)
return inner
class RefRole:
def __call__(self):
return []
@role('doc')
def doc_role(role, rawsource, text, lineno, inliner):
label, ref = parse_ref(text)
if ref.endswith('/'):
ref = ref[:-1]
# Relative path
if not ref.startswith('/'):
curdir = os.path.dirname(inliner.document.settings.env.current_input_path)
ref = os.path.normpath('/'.join(('', curdir, ref)))
node = docutils.nodes.reference(text=label, href=ref)
node.document = inliner.document
return [node], []
@role('ref')
def ref_role(role, rawsource, text, lineno, inliner):
label, ref = parse_ref(text)
node = docutils.nodes.reference(text=label, href=ref)
node.document = inliner.document
return [node], []
@role('mms')
def mms_role(role, rawsource, text, lineno, inliner):
return [], []
@role('opsmgr')
def opsmgr_role(role, rawsource, text, lineno, inliner):
return [], []
@role('option')
def products_role(role, rawsource, text, lineno, inliner):
return [], []
@role('pep')
def indexmarkup_role(role, rawsource, text, lineno, inliner):
return [], []
@role('rfc')
def rfc_role(role, rawsource, text, lineno, inliner):
return [], []
@role('term')
def term_role(role, rawsource, text, lineno, inliner):
label, ref = parse_ref(text)
href = ':term:' + docutils.nodes.make_id(ref)
node = docutils.nodes.reference(text=label, href=href)
node.document = inliner.document
return [node], []
def register_extlink(name, pattern):
@role(name)
def inner(role, rawsource, text, lineno, inliner):
label, ref = parse_ref(text)
ref = pattern.format(ref)
return [docutils.nodes.reference(text=label, href=ref)], []
class BaseDirective(docutils.parsers.rst.Directive, metaclass=abc.ABCMeta):
def run(self):
nodes = self.handle(*self.arguments)
results = []
for node in nodes:
if node.source is None:
node.source, node.line = (self.state_machine.get_source_and_line(self.lineno))
results.append(node)
return results
@abc.abstractmethod
def handle(self): pass
class IndexDirective(BaseDirective):
required_arguments = 1
optional_arguments = 0
has_content = False
final_argument_whitespace = True
def handle(self, *_): return []
class DefaultDomainDirective(BaseDirective):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
has_content = False
def handle(self, *_): return []
class TocTreeDirective(BaseDirective):
"""
Directive to notify Sphinx about the hierarchical structure of the docs.
"""
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'hidden': docutils.parsers.rst.directives.flag,
'titlesonly': docutils.parsers.rst.directives.flag,
'maxdepth': docutils.parsers.rst.directives.nonnegative_int,
}
def handle(self):
env = self.state.document.settings.env
cur_page = env.current_input_page
ret = []
for entry in self.content:
if not entry:
continue
# Create a reference node node
title, ref = parse_ref(entry)
if not ref.startswith('/'):
curdir = os.path.dirname(cur_page)
ref = os.path.normpath('/'.join((curdir, ref)))
node = docutils.nodes.reference(text=title, href=ref)
ret.append(node)
# Register this child document
env.register_toc(cur_page, ref)
return ret
class NopDirective(BaseDirective):
def handle(self, *_):
return []
class IncludeDirective(docutils.parsers.rst.directives.misc.Include):
def run(self):
env = self.state.document.settings.env
if self.arguments[0].startswith('<') and \
self.arguments[0].endswith('>'):
return docutils.parsers.rst.directives.misc.Include.run(self)
_, filename = env.relfn2path(self.arguments[0])
self.arguments[0] = filename
return docutils.parsers.rst.directives.misc.Include.run(self)
class OnlyDirective(BaseDirective):
"""
Directive to only include text if the given tag(s) are enabled.
"""
has_content = True
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
def handle(self, expr):
node = sphinx.addnodes.only()
node.document = self.state.document
node['expr'] = expr
# Same as util.nested_parse_with_titles but try to handle nested
# sections which should be raised higher up the doctree.
surrounding_title_styles = self.state.memo.title_styles
surrounding_section_level = self.state.memo.section_level
self.state.memo.title_styles = []
self.state.memo.section_level = 0
try:
self.state.nested_parse(self.content, self.content_offset,
node, match_titles=1)
title_styles = self.state.memo.title_styles
if not surrounding_title_styles or \
not title_styles or \
title_styles[0] not in surrounding_title_styles or \
not self.state.parent:
# No nested sections so no special handling needed.
return [node]
# Calculate the depths of the current and nested sections.
current_depth = 0
parent = self.state.parent
while parent:
current_depth += 1
parent = parent.parent
current_depth -= 2
title_style = title_styles[0]
nested_depth = len(surrounding_title_styles)
if title_style in surrounding_title_styles:
nested_depth = surrounding_title_styles.index(title_style)
# Use these depths to determine where the nested sections should
# be placed in the doctree.
n_sects_to_raise = current_depth - nested_depth + 1
parent = self.state.parent
for i in range(n_sects_to_raise):
if parent.parent:
parent = parent.parent
parent.append(node)
# This whole behavior is incredibly naughty. We duplicate it for
# compatibility reasons, but need to yell at the user.
return [self.state.document.reporter.warning('Titles within an '
'"only" directive may yield surprising reordering.')]
finally:
self.state.memo.title_styles = surrounding_title_styles
self.state.memo.section_level = surrounding_section_level
class SeeAlsoDirective(docutils.parsers.rst.directives.admonitions.BaseAdmonition):
node_class = sphinx.addnodes.seealso
class ExampleDirective(docutils.parsers.rst.directives.admonitions.BaseAdmonition):
node_class = Example
class OptionalDirective(docutils.parsers.rst.directives.admonitions.BaseAdmonition):
node_class = Optional
class RelatedDirective(docutils.parsers.rst.directives.admonitions.BaseAdmonition):
node_class = Related
class TodoDirective(docutils.parsers.rst.directives.admonitions.BaseAdmonition):
node_class = Todo
class SeeDirective(docutils.parsers.rst.directives.admonitions.BaseAdmonition):
node_class = See
class VersionChange(BaseDirective):
has_content = True
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
option_spec = {} # type: Dict[str, Any]
def handle(self, version, *args):
node = sphinx.addnodes.versionmodified()
node.document = self.state.document
node['version'] = version
node['type'] = self.name
messages = []
if args:
text = args[0]
inodes, new_messages = self.state.inline_text(text, self.lineno+1)
messages.extend(new_messages)
para = docutils.nodes.paragraph(text, '', *inodes)
para.document = self.state.document
node.append(para)
if self.content:
self.state.nested_parse(self.content, self.content_offset, node)
return [node] + messages
def make_object_directive(name, can_call):
class RefDirective(BaseDirective):
has_content = True
final_argument_whitespace = True
required_arguments = 1
optional_arguments = 100
callable = can_call
def handle(self, *args):
# XXX Handle multiple prototypes (sphinx uses comma as a delimiter)
name = ' '.join(args)
parsed = METHOD_PAT.findall(name)
if not parsed:
return []
suffix = '()' if (self.callable and not parsed[0].endswith(')')) else ''
mangled_name = '{0}:{1}{2}'.format(self.name, parsed[0], suffix)
node = sphinx.addnodes.desc()
node.document = self.state.document
node_signature = sphinx.addnodes.desc_signature()
node_signature.document = self.state.document
node_signature.append(docutils.nodes.literal(text=name))
node.append(node_signature)
if self.content:
self.state.nested_parse(self.content, self.content_offset, node)
return [docutils.nodes.target(ids=[mangled_name]), node]
docutils.parsers.rst.directives.register_directive(name, RefDirective)
for directive_info in mut.tuft.mongodb_conf.conf['directives']:
make_object_directive(directive_info['name'], directive_info['callable'])
@role(directive_info['name'])
def r(role, rawsource, text, lineno, inliner):
label, ref = parse_ref(text)
abbreviate = False
if ref.startswith('~'):
abbreviate = True
ref = ref[1:]
ref_name = ':'.join((role, ref))
# These elements should be code-block'd
wrapper_node = docutils.nodes.literal()
wrapper_node.document = inliner.document
wrapper_node.append(docutils.nodes.reference(text=label, href=ref_name))
return [wrapper_node], []
docutils.parsers.rst.directives.register_directive('include', IncludeDirective)
docutils.parsers.rst.directives.register_directive('rst-class', docutils.parsers.rst.directives.misc.Class)
docutils.parsers.rst.directives.register_directive('default-domain', DefaultDomainDirective)
docutils.parsers.rst.directives.register_directive('toctree', TocTreeDirective)
docutils.parsers.rst.directives.register_directive('index', IndexDirective)
docutils.parsers.rst.directives.register_directive('versionadded', VersionChange)
docutils.parsers.rst.directives.register_directive('versionchanged', VersionChange)
docutils.parsers.rst.directives.register_directive('deprecated', VersionChange)
docutils.parsers.rst.directives.register_directive('only', OnlyDirective)
docutils.parsers.rst.directives.register_directive('seealso', SeeAlsoDirective)
docutils.parsers.rst.directives.register_directive('optional', OptionalDirective)
docutils.parsers.rst.directives.register_directive('example', ExampleDirective)
docutils.parsers.rst.directives.register_directive('see', SeeDirective)
docutils.parsers.rst.directives.register_directive('related', RelatedDirective)
docutils.parsers.rst.directives.register_directive('todo', TodoDirective)
EXTLINKS = {
'hardlink': 'http://docs.mongodb.org/master/{0}',
'issue': 'https://jira.mongodb.org/browse/{0}',
'wiki': 'http://www.mongodb.org/display/DOCS/{0}',
'api': 'https://api.mongodb.org/{0}',
'manual': 'https://docs.mongodb.org/manual{0}',
'gettingstarted': 'https://docs.mongodb.org/getting-started{0}',
'ecosystem': 'https://docs.mongodb.org/ecosystem{0}',
'meta-driver': 'http://docs.mongodb.org/meta-driver/latest{0}',
'mms-docs': 'https://docs.cloud.mongodb.com{0}',
'mms-home': 'https://cloud.mongodb.com{0}',
'opsmgr': 'https://docs.opsmanager.mongodb.com{0}',
'about': 'https://www.mongodb.org/about{0}',
'products': 'https://www.mongodb.com/products{0}'
}
for name, pattern in EXTLINKS.items():
register_extlink(name, pattern)
| |
#!/usr/bin/env python
"""Web authentication classes for the GUI."""
import base64
import logging
from werkzeug import utils as werkzeug_utils
from google.oauth2 import id_token
from grr_response_core import config
from grr_response_core.lib import utils
from grr_response_core.lib.registry import MetaclassRegistry
from grr_response_server import access_control
from grr_response_server import data_store
from grr_response_server.databases import db
from grr_response_server.gui import http_response
from grr_response_server.gui import validate_iap
class BaseWebAuthManager(metaclass=MetaclassRegistry):
"""A class managing web authentication.
This class is responsible for deciding if the user will have access to the web
interface and for generating the token that will be passed to the functions
that deal with data.
Checks are done using a decorator function.
"""
def SecurityCheck(self, func, request, *args, **kwargs):
"""A decorator applied to protected web handlers.
Args:
func: The wrapped function to call.
request: The web request.
*args: Passthrough to wrapped function.
**kwargs: Passthrough to wrapped function.
Returns:
A WSGI http response object.
This will get called for all requests that get passed through one of our
handlers that is wrapped in @SecurityCheck.
"""
def RedirectBase(self):
"""Return a redirect to the main GRR page."""
return werkzeug_utils.redirect(config.CONFIG["AdminUI.url"])
class IAPWebAuthManager(BaseWebAuthManager):
"""Auth Manager for Google IAP.
This extension pulls the x-goog-iap-jwt-assertion header and generates
a new user for that header via the 'sub' claim. Authorization is now
delegated to IAP.
"""
IAP_HEADER = "x-goog-iap-jwt-assertion"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if (config.CONFIG["AdminUI.google_cloud_project_id"] is None or
config.CONFIG["AdminUI.google_cloud_backend_service_id"] is None):
raise RuntimeError(
"The necessary Cloud IAP configuration options are"
"not set. Please set your AdminUI.google_cloud_project_id"
"or AdminUI.google_cloud_backend_service_id keys.")
self.cloud_project_id = config.CONFIG["AdminUI.google_cloud_project_id"]
self.backend_service_id = config.CONFIG[
"AdminUI.google_cloud_backend_service_id"]
def SecurityCheck(self, func, request, *args, **kwargs):
"""Wrapping function."""
if self.IAP_HEADER not in request.headers:
return http_response.HttpResponse("Unauthorized", status=401)
jwt = request.headers.get(self.IAP_HEADER)
try:
request.user, _ = validate_iap.ValidateIapJwtFromComputeEngine(
jwt, self.cloud_project_id, self.backend_service_id)
return func(request, *args, **kwargs)
except validate_iap.IAPValidationFailedError as e:
# Return failure if IAP is not decoded correctly.
logging.error("IAPWebAuthManager failed with: %s", e)
return http_response.HttpResponse("Unauthorized", status=401)
class BasicWebAuthManager(BaseWebAuthManager):
"""Manager using basic auth using the config file."""
def SecurityCheck(self, func, request, *args, **kwargs):
"""Wrapping function."""
request.user = u""
authorized = False
try:
auth_type, authorization = request.headers.get("Authorization",
" ").split(" ", 1)
if auth_type == "Basic":
authorization_string = base64.b64decode(authorization).decode("utf-8")
user, password = authorization_string.split(":", 1)
try:
user_obj = data_store.REL_DB.ReadGRRUser(user)
if user_obj.password.CheckPassword(password):
authorized = True
# The password is ok - update the user
request.user = user
except db.UnknownGRRUserError:
pass
except access_control.UnauthorizedAccess as e:
logging.warning("UnauthorizedAccess: %s for %s", e, request)
except (IndexError, KeyError, IOError):
pass
if not authorized:
result = http_response.HttpResponse("Unauthorized", status=401)
result.headers["WWW-Authenticate"] = "Basic realm='Secure Area'"
return result
# Modify this to implement additional checking (e.g. enforce SSL).
return func(request, *args, **kwargs)
class RemoteUserWebAuthManager(BaseWebAuthManager):
"""Manager that reads remote username from HTTP headers.
NOTE: This manager should only be used when GRR UI runs behind an
reverse http proxy (Apache, Nginx, etc). It assumes that
authentication is done by the reverse http proxy server and the
authenticated username is passed to GRR via a HTTP header.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.remote_user_header = config.CONFIG["AdminUI.remote_user_header"]
self.remote_email_header = config.CONFIG["AdminUI.remote_email_header"]
self.trusted_ips = config.CONFIG["AdminUI.remote_user_trusted_ips"]
def AuthError(self, message):
return http_response.HttpResponse(message, status=403)
def SecurityCheck(self, func, request, *args, **kwargs):
if request.remote_addr not in self.trusted_ips:
return self.AuthError("Request sent from an IP not in "
"AdminUI.remote_user_trusted_ips. "
"Source was %s" % request.remote_addr)
try:
username = request.headers[self.remote_user_header]
except KeyError:
return self.AuthError("No username header found.")
if not username:
return self.AuthError("Empty username is not allowed.")
request.user = username
if config.CONFIG["Email.enable_custom_email_address"]:
try:
request.email = request.headers[self.remote_email_header]
except KeyError:
pass
return func(request, *args, **kwargs)
class FirebaseWebAuthManager(BaseWebAuthManager):
"""Manager using Firebase auth service."""
BEARER_PREFIX = "Bearer "
SECURE_TOKEN_PREFIX = "https://securetoken.google.com/"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def_router = config.CONFIG["API.DefaultRouter"]
if def_router != "DisabledApiCallRouter":
raise RuntimeError("Using FirebaseWebAuthManager with API.DefaultRouter "
"being anything but DisabledApiCallRouter means "
"risking opening your GRR UI/API to the world. "
"Current setting is: %s" % def_router)
def AuthError(self, message):
return http_response.HttpResponse(message, status=403)
def SecurityCheck(self, func, request, *args, **kwargs):
"""Check if access should be allowed for the request."""
try:
auth_header = request.headers.get("Authorization", "")
if not auth_header.startswith(self.BEARER_PREFIX):
raise ValueError("JWT token is missing.")
token = auth_header[len(self.BEARER_PREFIX):]
auth_domain = config.CONFIG["AdminUI.firebase_auth_domain"]
project_id = auth_domain.split(".")[0]
idinfo = id_token.verify_firebase_token(
token, request, audience=project_id)
if idinfo["iss"] != self.SECURE_TOKEN_PREFIX + project_id:
raise ValueError("Wrong issuer.")
request.user = idinfo["email"]
except ValueError as e:
# For a homepage, just do a pass-through, otherwise JS code responsible
# for the Firebase auth won't ever get executed. This approach is safe,
# because wsgiapp.HttpRequest object will raise on any attempt to
# access uninitialized HttpRequest.user attribute.
if request.path != "/":
return self.AuthError("JWT token validation failed: %s" % e)
return func(request, *args, **kwargs)
class NullWebAuthManager(BaseWebAuthManager):
"""Null web auth manager always returns test user unless set."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.username = u"gui_user"
def SetUserName(self, username):
self.username = username
def SecurityCheck(self, func, request, *args, **kwargs):
"""A decorator applied to protected web handlers."""
request.user = self.username
return func(request, *args, **kwargs)
# Global to store the configured web auth manager.
WEBAUTH_MANAGER = None
def SecurityCheck(func):
"""A decorator applied to protected web handlers."""
def Wrapper(request, *args, **kwargs):
"""Wrapping function."""
if WEBAUTH_MANAGER is None:
raise RuntimeError("Attempt to initialize before WEBAUTH_MANAGER set.")
return WEBAUTH_MANAGER.SecurityCheck(func, request, *args, **kwargs)
return Wrapper
def InitializeWebAuth():
"""Initializes WebAuth."""
global WEBAUTH_MANAGER # pylint: disable=global-statement
# pylint: disable=g-bad-name
WEBAUTH_MANAGER = BaseWebAuthManager.GetPlugin(
config.CONFIG["AdminUI.webauth_manager"])()
# pylint: enable=g-bad-name
logging.info("Using webauth manager %s", WEBAUTH_MANAGER)
@utils.RunOnce
def InitializeWebAuthOnce():
"""Initializes WebAuth once only."""
InitializeWebAuth()
| |
"""
Calculate the probability of a mendelian error given the genotype likelihoods
from a trio."""
import sys
from math import log10
import gzip
nan = float('nan')
class LowGenotypeException(Exception):
pass
def rescale(li):
s = float(sum(li))
if s < 1e-40:
raise LowGenotypeException
return [v / s for v in li]
def mendelian_error(mother, father, child, pls=False):
"""
Return the probability of a mendelian error given the log10 genotype
likelihoods. A large value indicates a high probability of a mendelian
error. Low values mean that the genotype-likelihoods indicate enough
uncertainty that it could be a genotyping error.
# everyone is het:
>>> het = (-2.0, -0.1, -2.0)
>>> mendelian_error(het, het, het)
0.047...
# parents are hom, child is het.
>>> father = mother = [-0.6, -2.5, -2.5]
>>> child = [-2.5, -0.6, -2.5]
>>> mendelian_error(mother, father, child)
0.987...
# same as above, but more certainty in the called genotypes:
>>> child[1] = 1e-6
>>> mother[0] = father[0] = 1e-6
>>> mendelian_error(mother, father, child)
0.996...
# everyone is confidently homozygous alt
>>> child = father = mother = [-11.0, -11.0, -0.1]
>>> mendelian_error(mother, father, child)
7.55...e-11
# everyone is less confidently homozygous refs:
>>> child = father = mother = [-0.1, -2.0, -2.0]
>>> mendelian_error(mother, father, child)
0.071...
mother and fater are homozygous alts
>>> mother = father = [-3.0, -3.0, -0.1]
# child is het
>>> child = [-3., -0.1, -3.]
>>> mendelian_error(mother, father, child)
0.998...
# but when the hom-alt call is close...
>>> child = [-3., -0.1, -0.15]
>>> mendelian_error(mother, father, child)
0.53...
# mother is hom_ref, dad is het, child is hom_alt
>>> mother, father, child = (-0.1, -2, -2), (-2, -0.1, -2), (-2, -2, -0.1)
>>> mendelian_error(mother, father, child)
0.976...
# mother is hom_ref, dad is hom_alt, child is hom_ref
>>> mother, father, child = (-0.1, -2.5, -2.5), (-2.5, -2.5, -0.1), (-0.1, -2.5, -2.5)
>>> mendelian_error(mother, father, child)
0.993...
# same, but child is hom_alt
>>> mendelian_error(mother, father, (-5, -5, -0.01))
0.994...
# child should be het:
>>> mendelian_error(mother, father, (-3, 0, -3))
0.75...
# NOTE: does oddish things if all have very low, equal values.
>>> mendelian_error([-16.2, -16.2, -16.2], [-14.4, -15.0, -22.6], [-24.9, -21.2, -20.9])
0.8629...
>>> mendelian_error([-15.5, -15.8, -19.7], [-11.8, -9.9, -22.9], [-69.7, -55.9, -58.3])
>>> mendelian_error([-3.4, -0, -2.9], [-0, -1.8, -23.0], [-6.7, 0.0, -10.7])
0.742...
>>> mendelian_error([34, 0, 29], [0, 18, 23], [67, 0, 107], pls=True)
0.74...
"""
if pls:
mother = [m / -10.0 for m in mother]
father = [f / -10.0 for f in father]
child = [c / -10.0 for c in child]
try:
M = rescale([10.**m for m in mother])
F = rescale([10.**f for f in father])
C = rescale([10.**c for c in child])
except LowGenotypeException:
return None
# by ref, and alt, we mean hom_ref, hom_alt
p_two_ref = M[0] * F[0]
p_two_het = M[1] * F[1]
p_two_alt = M[2] * F[2]
# only 1 of the parents is ...
p_one_ref = (M[0] + F[0])/2 - p_two_ref
p_one_het = (M[1] + F[1])/2 - p_two_het
p_one_alt = (M[2] + F[2])/2 - p_two_alt
# divide by 2 because parents independent.
# all options covered because, e.g. p(two_ref) == p(zero_alt)
assert abs(sum((p_one_ref, p_one_het, p_one_alt, p_two_ref, p_two_het, p_two_alt)) - 1) < 1e-4, \
abs(sum((p_one_ref, p_one_het, p_one_alt, p_two_ref, p_two_het, p_two_alt)) - 1)
##################
# Non-violations #
##################
# a. everyone is reference
a = p_two_ref * C[0]
# b. everyone is hom alt
b = p_two_alt * C[2]
# c. 1 het and 1 ref parent. child matches
c = p_one_het * p_one_ref * (C[0] + C[1])
# d. 1 het and 1 alt parent. child matches
d = p_one_het * p_one_alt * (C[1] + C[2])
# e. both parents hets. (child can be anything)
e = p_two_het
# f. one hom ref, one home alt. child is het
f = p_one_ref * p_one_alt * C[1]
#print a, b, c, d, e, f
p_not_error = a + b + c + d + e + f
return 1.0 - p_not_error
def xopen(f):
return gzip.open(f) if f.endswith(".gz") else sys.stdin if "-" == f else open(f)
def main(fh, father, mother, child):
for line in fh:
if line.startswith("##"):
print line,
continue
elif line.startswith("#CHROM"):
print "##INFO=<ID=MEP,Number=1,Type=Float,Description=\"probability of mendelian error\">"
print "##INFO=<ID=MER,Number=1,Type=Float,Description=\"log10 ratio of mendelian error\">"
fields = line.rstrip().split("\t")
samples = fields[9:]
idxs = [9 + samples.index(s) for s in (father, mother, child)]
print line,
continue
fields = line.rstrip().split("\t")
samples = [fields[i].split(":") for i in idxs]
fmt = fields[8].split(":")
if "PL" in fmt:
gli = fmt.index("PL")
opls = [s[gli].split(",") for s in samples]
gls = [[int(p)/-10. for p in pl] for pl in opls]
else:
gli = fmt.index("GL")
ogls = [s[gli].split(",") for s in samples]
gls = [[float(p) for g in gl] for gl in ogls]
for i, gl in enumerate(gls):
while sum(gls[i]) < -50:
gls[i] = [p / 10. for p in gls[i]]
p = mendelian_error(gls[0], gls[1], gls[2])
if p == 1:
mer = 100
elif p == 0:
mer = 0
elif p is None:
mer = None
else:
mer = log10(p / (1.0 - p))
if p < 1 - 1e-5 or p is None:
continue
fields[7] += ";MEP=%.8g" % (nan if p is None else p)
fields[7] += ";MER=%.8g" % (nan if p is None else mer)
print "\t".join(fields)
def test():
from random import randint
def gen3():
return [randint(-70, 1) / 10. for i in range(3)]
ps = []
for i in xrange(100000):
a, b, c = gen3(), gen3(), gen3()
ps.append(mendelian_error(a, b, c))
if ps[-1] > 0.999999:
print "mendelian error:", tuple(a), tuple(b), tuple(c)
elif ps[-1] < 0.00001:
print "expected :", tuple(a), tuple(b), tuple(c)
try:
import pylab as pl
pl.hist(ps, 50)
pl.show()
except ImportError:
pass
def _main():
if len(sys.argv) > 1 and sys.argv[1] == "test":
sys.exit(test())
elif len(sys.argv) != 5:
print __doc__
print "\nUsage: %s some.vcf father_id mother_id child_id > new.vcf\n" % sys.argv[0]
sys.exit()
father, mother, child = sys.argv[2:]
main(xopen(sys.argv[1]), father, mother, child)
if __name__ == "__main__":
import doctest
sys.stderr.write(str(doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS | doctest.REPORT_ONLY_FIRST_FAILURE, verbose=0)) + "\n")
_main()
| |
import unittest
import cvxpy as cp
import diffcp
import numpy as np
import tensorflow as tf
from cvxpylayers.tensorflow import CvxpyLayer
def numerical_grad(f, params, param_values, delta=1e-6):
size = int(sum(np.prod(v.shape) for v in param_values))
values = np.zeros(size)
offset = 0
for param, value in zip(params, param_values):
values[offset:offset + param.size] = value.numpy().flatten()
param.value = values[offset:offset + param.size].reshape(param.shape)
offset += param.size
numgrad = np.zeros(values.shape)
for i in range(values.size):
old = values[i]
values[i] = old + 0.5 * delta
left_soln = f()
values[i] = old - 0.5 * delta
right_soln = f()
numgrad[i] = (left_soln - right_soln) / delta
values[i] = old
numgrads = []
offset = 0
for param in params:
numgrads.append(
numgrad[offset:offset + param.size].reshape(param.shape))
offset += param.size
return numgrads
class TestCvxpyLayer(unittest.TestCase):
def test_docstring_example(self):
np.random.seed(0)
tf.random.set_seed(0)
n, m = 2, 3
x = cp.Variable(n)
A = cp.Parameter((m, n))
b = cp.Parameter(m)
constraints = [x >= 0]
objective = cp.Minimize(0.5 * cp.pnorm(A @ x - b, p=1))
problem = cp.Problem(objective, constraints)
assert problem.is_dpp()
cvxpylayer = CvxpyLayer(problem, parameters=[A, b], variables=[x])
A_tf = tf.Variable(tf.random.normal((m, n)))
b_tf = tf.Variable(tf.random.normal((m,)))
with tf.GradientTape() as tape:
# solve the problem, setting the values of A and b to A_tf and b_tf
solution, = cvxpylayer(A_tf, b_tf)
summed_solution = tf.math.reduce_sum(solution)
gradA, gradb = tape.gradient(summed_solution, [A_tf, b_tf])
def f():
problem.solve(solver=cp.SCS, eps=1e-10)
return np.sum(x.value)
numgradA, numgradb = numerical_grad(f, [A, b], [A_tf, b_tf])
np.testing.assert_almost_equal(gradA, numgradA, decimal=4)
np.testing.assert_almost_equal(gradb, numgradb, decimal=4)
def test_simple_qp(self):
np.random.seed(0)
tf.random.set_seed(0)
nx, ncon = 2, 3
G = cp.Parameter((ncon, nx), name='G')
h = cp.Parameter(ncon, name='h')
x = cp.Variable(nx)
obj = cp.Minimize(0.5 * cp.sum_squares(x - 1))
cons = [G * x <= h]
problem = cp.Problem(obj, cons)
cvxlayer = CvxpyLayer(problem, [G, h], [x])
x0 = tf.random.normal((nx, 1))
s0 = tf.random.normal((ncon, 1))
G_t = tf.random.normal((ncon, nx))
h_t = tf.squeeze(tf.matmul(G_t, x0) + s0)
with tf.GradientTape() as tape:
tape.watch(G_t)
tape.watch(h_t)
soln = cvxlayer(G_t, h_t, solver_args={'eps': 1e-10})
soln = {x.name(): soln[0]}
grads = tape.gradient(soln, [G_t, h_t])
gradG = grads[0]
gradh = grads[1]
G.value = G_t.numpy()
h.value = h_t.numpy()
problem.solve(solver=cp.SCS)
self.assertEqual(len(soln.values()), len(problem.variables()))
np.testing.assert_almost_equal(
x.value, list(soln.values())[0], decimal=5)
def f():
problem.solve(solver=cp.SCS, eps=1e-10)
return np.sum(x.value)
numgradG, numgradh = numerical_grad(f, [G, h], [G_t, h_t])
np.testing.assert_almost_equal(gradG, numgradG, decimal=3)
np.testing.assert_almost_equal(gradh, numgradh, decimal=3)
def test_simple_qp_with_solver_args(self):
np.random.seed(0)
tf.random.set_seed(0)
nx, ncon = 2, 3
G = cp.Parameter((ncon, nx), name='G')
h = cp.Parameter(ncon, name='h')
x = cp.Variable(nx)
obj = cp.Minimize(0.5 * cp.sum_squares(x - 1))
cons = [G * x <= h]
problem = cp.Problem(obj, cons)
cvxlayer = CvxpyLayer(problem, [G, h], [x])
x0 = tf.random.normal((nx, 1))
s0 = tf.random.normal((ncon, 1))
G_t = tf.random.normal((ncon, nx))
h_t = tf.squeeze(tf.matmul(G_t, x0) + s0)
with tf.GradientTape() as tape:
tape.watch(G_t)
tape.watch(h_t)
soln = cvxlayer(G_t, h_t, solver_args={'eps': 1e-10})
soln = {x.name(): soln[0]}
grads = tape.gradient(soln, [G_t, h_t])
gradG = grads[0]
gradh = grads[1]
G.value = G_t.numpy()
h.value = h_t.numpy()
problem.solve(solver=cp.SCS)
self.assertEqual(len(soln.values()), len(problem.variables()))
np.testing.assert_almost_equal(
x.value, list(soln.values())[0], decimal=5)
def f():
problem.solve(solver=cp.SCS, eps=1e-10)
return np.sum(x.value)
numgradG, numgradh = numerical_grad(f, [G, h], [G_t, h_t])
np.testing.assert_almost_equal(gradG, numgradG, decimal=3)
np.testing.assert_almost_equal(gradh, numgradh, decimal=3)
def test_simple_qp_batched(self):
np.random.seed(0)
tf.random.set_seed(0)
nbtch, nx, ncon = 4, 3, 2
G = cp.Parameter((ncon, nx), name='G')
h = cp.Parameter(ncon, name='h')
x = cp.Variable(nx, name='x')
obj = cp.Minimize(0.5 * cp.sum_squares(x - 1))
cons = [G * x <= h]
problem = cp.Problem(obj, cons)
cvxlayer = CvxpyLayer(problem, [G, h], [x])
x0 = tf.random.normal((nx, 1))
s0 = tf.random.normal((ncon, 1))
G_t = tf.random.normal((nbtch, ncon, nx))
h_t = tf.squeeze(tf.tensordot(G_t, x0, axes=1) + s0)
with tf.GradientTape() as tape:
tape.watch(G_t)
tape.watch(h_t)
soln = cvxlayer(G_t, h_t, solver_args={'eps': 1e-10})
soln = {x.name(): soln[0]}
grads = tape.gradient(soln, [G_t, h_t])
gradG = grads[0]
gradh = grads[1]
solns = [tf.squeeze(t).numpy() for t in tf.split(soln['x'], nbtch)]
Gs = [tf.squeeze(t) for t in tf.split(G_t, nbtch)]
hs = [tf.squeeze(t) for t in tf.split(h_t, nbtch)]
gradGs = [tf.squeeze(t).numpy() for t in tf.split(gradG, nbtch)]
gradhs = [tf.squeeze(t).numpy() for t in tf.split(gradh, nbtch)]
for soln, G_t, h_t, gG, gh in zip(solns, Gs, hs, gradGs, gradhs):
G.value = G_t.numpy()
h.value = h_t.numpy()
problem.solve(solver=cp.SCS)
np.testing.assert_almost_equal(x.value, soln, decimal=5)
def f():
problem.solve(solver=cp.SCS, eps=1e-10)
return np.sum(x.value)
numgradG, numgradh = numerical_grad(f, [G, h], [G_t, h_t])
np.testing.assert_almost_equal(gG, numgradG, decimal=2)
np.testing.assert_almost_equal(gh, numgradh, decimal=2)
def test_logistic_regression(self):
np.random.seed(243)
N, n = 10, 2
def sigmoid(z):
return 1 / (1 + np.exp(-z))
X_np = np.random.randn(N, n)
a_true = np.random.randn(n, 1)
y_np = np.round(sigmoid(X_np @ a_true + np.random.randn(N, 1) * 0.5))
X_tf = tf.Variable(X_np)
lam_tf = tf.Variable(1.0 * tf.ones(1))
a = cp.Variable((n, 1))
X = cp.Parameter((N, n))
lam = cp.Parameter(1, nonneg=True)
y = y_np
log_likelihood = cp.sum(
cp.multiply(y, X @ a) -
cp.log_sum_exp(cp.hstack([np.zeros((N, 1)), X @ a]).T, axis=0,
keepdims=True).T
)
prob = cp.Problem(
cp.Minimize(-log_likelihood + lam * cp.sum_squares(a)))
fit_logreg = CvxpyLayer(prob, [X, lam], [a])
with tf.GradientTape(persistent=True) as tape:
weights = fit_logreg(X_tf, lam_tf, solver_args={'eps': 1e-8})[0]
summed = tf.math.reduce_sum(weights)
grad_X_tf, grad_lam_tf = tape.gradient(summed, [X_tf, lam_tf])
def f_train():
prob.solve(solver=cp.SCS, eps=1e-8)
return np.sum(a.value)
numgrad_X_tf, numgrad_lam_tf = numerical_grad(
f_train, [X, lam], [X_tf, lam_tf], delta=1e-6)
np.testing.assert_allclose(grad_X_tf, numgrad_X_tf, atol=1e-2)
np.testing.assert_allclose(grad_lam_tf, numgrad_lam_tf, atol=1e-2)
def test_not_enough_parameters(self):
x = cp.Variable(1)
lam = cp.Parameter(1, nonneg=True)
lam2 = cp.Parameter(1, nonneg=True)
objective = lam * cp.norm(x, 1) + lam2 * cp.sum_squares(x)
prob = cp.Problem(cp.Minimize(objective))
with self.assertRaisesRegex(ValueError, "The layer's parameters.*"):
CvxpyLayer(prob, [lam], [x]) # noqa: F841
def test_not_enough_parameters_at_call_time(self):
x = cp.Variable(1)
lam = cp.Parameter(1, nonneg=True)
lam2 = cp.Parameter(1, nonneg=True)
objective = lam * cp.norm(x, 1) + lam2 * cp.sum_squares(x)
prob = cp.Problem(cp.Minimize(objective))
layer = CvxpyLayer(prob, [lam, lam2], [x])
with self.assertRaisesRegex(
ValueError,
'A tensor must be provided for each CVXPY parameter.*'):
layer(lam)
def test_non_dpp(self):
x = cp.Variable(1)
y = cp.Variable(1)
lam = cp.Parameter(1)
objective = lam * cp.norm(x, 1)
prob = cp.Problem(cp.Minimize(objective))
with self.assertRaisesRegex(ValueError, 'Problem must be DPP.'):
CvxpyLayer(prob, [lam], [x, y]) # noqa: F841
def test_too_many_variables(self):
x = cp.Variable(1)
y = cp.Variable(1)
lam = cp.Parameter(1, nonneg=True)
objective = lam * cp.norm(x, 1)
prob = cp.Problem(cp.Minimize(objective))
with self.assertRaisesRegex(ValueError, 'Argument `variables`.*'):
CvxpyLayer(prob, [lam], [x, y]) # noqa: F841
def test_infeasible(self):
x = cp.Variable(1)
param = cp.Parameter(1)
prob = cp.Problem(cp.Minimize(param), [x >= 1, x <= -1])
layer = CvxpyLayer(prob, [param], [x])
param_tf = tf.ones(1)
with self.assertRaises(diffcp.SolverError):
layer(param_tf)
def test_lml(self):
tf.random.set_seed(0)
k = 2
x = cp.Parameter(4)
y = cp.Variable(4)
obj = -x * y - cp.sum(cp.entr(y)) - cp.sum(cp.entr(1. - y))
cons = [cp.sum(y) == k]
problem = cp.Problem(cp.Minimize(obj), cons)
lml = CvxpyLayer(problem, [x], [y])
x_tf = tf.Variable([1., -1., -1., -1.], dtype=tf.float64)
with tf.GradientTape() as tape:
y_opt = lml(x_tf, solver_args={'eps': 1e-10})[0]
loss = -tf.math.log(y_opt[1])
def f():
problem.solve(solver=cp.SCS, eps=1e-10)
return -np.log(y.value[1])
grad = tape.gradient(loss, [x_tf])
numgrad = numerical_grad(f, [x], [x_tf])
np.testing.assert_almost_equal(grad, numgrad, decimal=3)
def test_sdp(self):
tf.random.set_seed(5)
n = 3
p = 3
C = cp.Parameter((n, n))
A = [cp.Parameter((n, n)) for _ in range(p)]
b = [cp.Parameter((1, 1)) for _ in range(p)]
C_tf = tf.Variable(tf.random.normal((n, n), dtype=tf.float64))
A_tf = [tf.Variable(tf.random.normal((n, n), dtype=tf.float64))
for _ in range(p)]
b_tf = [tf.Variable(tf.random.normal((1, 1), dtype=tf.float64))
for _ in range(p)]
X = cp.Variable((n, n), symmetric=True)
constraints = [X >> 0]
constraints += [
cp.trace(A[i]@X) == b[i] for i in range(p)
]
problem = cp.Problem(cp.Minimize(
cp.trace(C @ X) - cp.log_det(X) + cp.sum_squares(X)),
constraints)
layer = CvxpyLayer(problem, [C] + A + b, [X])
values = [C_tf] + A_tf + b_tf
with tf.GradientTape() as tape:
soln = layer(*values,
solver_args={'eps': 1e-10, 'max_iters': 10000})[0]
summed = tf.math.reduce_sum(soln)
grads = tape.gradient(summed, values)
def f():
problem.solve(cp.SCS, eps=1e-10, max_iters=10000)
return np.sum(X.value)
numgrads = numerical_grad(f, [C] + A + b, values, delta=1e-4)
for g, ng in zip(grads, numgrads):
np.testing.assert_allclose(g, ng, atol=1e-1)
def test_basic_gp(self):
tf.random.set_seed(243)
x = cp.Variable(pos=True)
y = cp.Variable(pos=True)
z = cp.Variable(pos=True)
a = cp.Parameter(pos=True, value=2.0)
b = cp.Parameter(pos=True, value=1.0)
c = cp.Parameter(value=0.5)
objective_fn = 1/(x*y*z)
constraints = [a*(x*y + x*z + y*z) <= b, x >= y**c]
problem = cp.Problem(cp.Minimize(objective_fn), constraints)
problem.solve(cp.SCS, gp=True, eps=1e-12)
layer = CvxpyLayer(
problem, parameters=[a, b, c], variables=[x, y, z], gp=True)
a_tf = tf.Variable(2.0, dtype=tf.float64)
b_tf = tf.Variable(1.0, dtype=tf.float64)
c_tf = tf.Variable(0.5, dtype=tf.float64)
with tf.GradientTape() as tape:
x_tf, y_tf, z_tf = layer(a_tf, b_tf, c_tf)
summed = x_tf + y_tf + z_tf
grads = tape.gradient(summed, [a_tf, b_tf, c_tf])
def f():
problem.solve(cp.SCS, eps=1e-12, max_iters=10000, gp=True)
return x.value + y.value + z.value
numgrads = numerical_grad(f, [a, b, c], [a_tf, b_tf, c_tf])
for g, ng in zip(grads, numgrads):
np.testing.assert_allclose(g, ng, atol=1e-2)
def test_broadcasting(self):
tf.random.set_seed(243)
n_batch, m, n = 2, 500, 20
A = cp.Parameter((m, n))
b = cp.Parameter(m)
x = cp.Variable(n)
obj = cp.sum_squares(A@x - b) + cp.sum_squares(x)
prob = cp.Problem(cp.Minimize(obj))
prob_tf = CvxpyLayer(prob, [A, b], [x])
A_tf = tf.Variable(tf.random.normal((m, n), dtype=tf.float64))
b_tf = tf.random.normal([m], dtype=tf.float64)
b_tf = tf.Variable(tf.stack([b_tf for _ in range(n_batch)]))
b_tf_0 = tf.Variable(b_tf[0])
with tf.GradientTape() as tape:
x = prob_tf(A_tf, b_tf, solver_args={"eps": 1e-12})[0]
grad_A_cvxpy, grad_b_cvxpy = tape.gradient(x, [A_tf, b_tf])
with tf.GradientTape() as tape:
x_lstsq = tf.linalg.lstsq(A_tf, tf.expand_dims(b_tf_0, 1))
grad_A_lstsq, grad_b_lstsq = tape.gradient(x_lstsq, [A_tf, b_tf_0])
grad_A_lstsq = tf.cast(grad_A_lstsq, tf.float64)
grad_b_lstsq = tf.cast(grad_b_lstsq, tf.float64)
self.assertAlmostEqual(
tf.linalg.norm(grad_A_cvxpy / n_batch - grad_A_lstsq).numpy(),
0.0, places=2)
self.assertAlmostEqual(
tf.linalg.norm(grad_b_cvxpy[0] - grad_b_lstsq).numpy(), 0.0,
places=2)
if __name__ == '__main__':
unittest.main()
| |
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tempfile
from django.conf import settings
from django.core.files.uploadedfile import InMemoryUploadedFile # noqa
from django.core.urlresolvers import reverse
from django.forms.widgets import HiddenInput # noqa
from django import http
from django.test.utils import override_settings
from mox import IsA # noqa
from horizon import tables as horizon_tables
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.dashboards.project.images.images import forms
from openstack_dashboard.dashboards.project.images.images import tables
IMAGES_INDEX_URL = reverse('horizon:project:images:index')
class CreateImageFormTests(test.TestCase):
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_no_location_or_file(self):
filters = {'disk_format': 'aki'}
api.glance.image_list_detailed(
IsA({}), filters=filters).AndReturn(
[self.images.list(), False, False])
filters = {'disk_format': 'ari'}
api.glance.image_list_detailed(
IsA({}), filters=filters).AndReturn(
[self.images.list(), False, False])
self.mox.ReplayAll()
post = {
'name': u'Ubuntu 11.10',
'source_type': u'file',
'description': u'Login with admin/admin',
'disk_format': u'qcow2',
'architecture': u'x86-64',
'minimum_disk': 15,
'minimum_ram': 512,
'is_public': 1}
files = {}
form = forms.CreateImageForm(post, files)
self.assertEqual(form.is_valid(), False)
@override_settings(HORIZON_IMAGES_ALLOW_UPLOAD=False)
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_image_upload_disabled(self):
filters = {'disk_format': 'aki'}
api.glance.image_list_detailed(
IsA({}), filters=filters).AndReturn(
[self.images.list(), False, False])
filters = {'disk_format': 'ari'}
api.glance.image_list_detailed(
IsA({}), filters=filters).AndReturn(
[self.images.list(), False, False])
self.mox.ReplayAll()
form = forms.CreateImageForm({})
self.assertEqual(
isinstance(form.fields['image_file'].widget, HiddenInput), True)
source_type_dict = dict(form.fields['source_type'].choices)
self.assertNotIn('file', source_type_dict)
def test_create_image_metadata_docker(self):
form_data = {
'name': u'Docker image',
'description': u'Docker image test',
'source_type': u'url',
'image_url': u'/',
'disk_format': u'docker',
'architecture': u'x86-64',
'minimum_disk': 15,
'minimum_ram': 512,
'is_public': False,
'protected': False,
'is_copying': False
}
meta = forms.create_image_metadata(form_data)
self.assertEqual(meta['disk_format'], 'raw')
self.assertEqual(meta['container_format'], 'docker')
self.assertIn('properties', meta)
self.assertNotIn('description', meta)
self.assertNotIn('architecture', meta)
self.assertEqual(meta['properties']['description'],
form_data['description'])
self.assertEqual(meta['properties']['architecture'],
form_data['architecture'])
class UpdateImageFormTests(test.TestCase):
def test_is_format_field_editable(self):
form = forms.UpdateImageForm({})
disk_format = form.fields['disk_format']
self.assertFalse(disk_format.widget.attrs.get('readonly', False))
@test.create_stubs({api.glance: ('image_get',)})
def test_image_update(self):
image = self.images.first()
api.glance.image_get(IsA(http.HttpRequest), str(image.id)) \
.AndReturn(image)
self.mox.ReplayAll()
url = reverse('horizon:project:images:images:update',
args=[image.id])
res = self.client.get(url)
self.assertNoFormErrors(res)
self.assertEqual(res.context['image'].disk_format,
image.disk_format)
@test.create_stubs({api.glance: ('image_update', 'image_get')})
def test_image_update_post(self):
image = self.images.first()
data = {
'name': u'Ubuntu 11.10',
'image_id': str(image.id),
'description': u'Login with admin/admin',
'source_type': u'url',
'image_url': u'http://cloud-images.ubuntu.com/releases/'
u'oneiric/release/ubuntu-11.10-server-cloudimg'
u'-amd64-disk1.img',
'disk_format': u'qcow2',
'architecture': u'x86-64',
'minimum_disk': 15,
'minimum_ram': 512,
'is_public': False,
'protected': False,
'method': 'UpdateImageForm'}
api.glance.image_get(IsA(http.HttpRequest), str(image.id)) \
.AndReturn(image)
api.glance.image_update(IsA(http.HttpRequest),
image.id,
is_public=data['is_public'],
protected=data['protected'],
disk_format=data['disk_format'],
container_format="bare",
name=data['name'],
min_ram=data['minimum_ram'],
min_disk=data['minimum_disk'],
properties={'description': data['description'],
'architecture':
data['architecture']},
purge_props=False).AndReturn(image)
self.mox.ReplayAll()
url = reverse('horizon:project:images:images:update',
args=[image.id])
res = self.client.post(url, data)
self.assertNoFormErrors(res)
self.assertEqual(res.status_code, 302)
class ImageViewTests(test.TestCase):
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_image_create_get(self):
filters = {'disk_format': 'aki'}
api.glance.image_list_detailed(
IsA(http.HttpRequest), filters=filters).AndReturn(
[self.images.list(), False, False])
filters = {'disk_format': 'ari'}
api.glance.image_list_detailed(
IsA(http.HttpRequest), filters=filters).AndReturn(
[self.images.list(), False, False])
self.mox.ReplayAll()
url = reverse('horizon:project:images:images:create')
res = self.client.get(url)
self.assertTemplateUsed(res,
'project/images/images/create.html')
@test.create_stubs({api.glance: ('image_create',)})
def test_image_create_post_copy_from(self):
data = {
'source_type': u'url',
'image_url': u'http://cloud-images.ubuntu.com/releases/'
u'oneiric/release/ubuntu-11.10-server-cloudimg'
u'-amd64-disk1.img',
'is_copying': True}
api_data = {'copy_from': data['image_url']}
self._test_image_create(data, api_data)
@test.create_stubs({api.glance: ('image_create',)})
def test_image_create_post_location(self):
data = {
'source_type': u'url',
'image_url': u'http://cloud-images.ubuntu.com/releases/'
u'oneiric/release/ubuntu-11.10-server-cloudimg'
u'-amd64-disk1.img',
'is_copying': False}
api_data = {'location': data['image_url']}
self._test_image_create(data, api_data)
@test.create_stubs({api.glance: ('image_create',)})
def test_image_create_post_upload(self):
temp_file = tempfile.TemporaryFile()
temp_file.write('123')
temp_file.flush()
temp_file.seek(0)
data = {'source_type': u'file',
'image_file': temp_file}
api_data = {'data': IsA(InMemoryUploadedFile)}
self._test_image_create(data, api_data)
@test.create_stubs({api.glance: ('image_create',)})
def test_image_create_post_with_kernel_ramdisk(self):
temp_file = tempfile.TemporaryFile()
temp_file.write('123')
temp_file.flush()
temp_file.seek(0)
data = {
'source_type': u'file',
'image_file': temp_file,
'kernel_id': '007e7d55-fe1e-4c5c-bf08-44b4a496482e',
'ramdisk_id': '007e7d55-fe1e-4c5c-bf08-44b4a496482a'
}
api_data = {'data': IsA(InMemoryUploadedFile)}
self._test_image_create(data, api_data)
@test.create_stubs({api.glance: ('image_list_detailed',)})
def _test_image_create(self, extra_form_data, extra_api_data):
data = {
'name': u'Ubuntu 11.10',
'description': u'Login with admin/admin',
'disk_format': u'qcow2',
'architecture': u'x86-64',
'minimum_disk': 15,
'minimum_ram': 512,
'is_public': True,
'protected': False,
'method': 'CreateImageForm'}
data.update(extra_form_data)
api_data = {'container_format': 'bare',
'disk_format': data['disk_format'],
'is_public': True,
'protected': False,
'min_disk': data['minimum_disk'],
'min_ram': data['minimum_ram'],
'properties': {
'description': data['description'],
'architecture': data['architecture']},
'name': data['name']}
api_data.update(extra_api_data)
filters = {'disk_format': 'aki'}
api.glance.image_list_detailed(
IsA(http.HttpRequest), filters=filters).AndReturn(
[self.images.list(), False, False])
filters = {'disk_format': 'ari'}
api.glance.image_list_detailed(
IsA(http.HttpRequest), filters=filters).AndReturn(
[self.images.list(), False, False])
api.glance.image_create(
IsA(http.HttpRequest),
**api_data).AndReturn(self.images.first())
self.mox.ReplayAll()
url = reverse('horizon:project:images:images:create')
res = self.client.post(url, data)
self.assertNoFormErrors(res)
self.assertEqual(res.status_code, 302)
@test.create_stubs({api.glance: ('image_get',)})
def test_image_detail_get(self):
image = self.images.first()
api.glance.image_get(IsA(http.HttpRequest), str(image.id)) \
.AndReturn(self.images.first())
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:images:images:detail',
args=[image.id]))
self.assertTemplateUsed(res,
'project/images/images/detail.html')
self.assertEqual(res.context['image'].name, image.name)
self.assertEqual(res.context['image'].protected, image.protected)
self.assertContains(res, "<h1>Image Details: %s</h1>" % image.name,
1, 200)
@test.create_stubs({api.glance: ('image_get',)})
def test_image_detail_custom_props_get(self):
image = self.images.list()[8]
api.glance.image_get(IsA(http.HttpRequest), str(image.id)) \
.AndReturn(image)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:images:images:detail',
args=[image.id]))
image_props = res.context['image_props']
# Test description property not displayed
image_keys = [prop[0] for prop in image_props]
self.assertNotIn(('description'), image_keys)
# Test custom properties are sorted
self.assertEqual(image_props[0], ('bar', 'bar', 'bar val'))
self.assertEqual(image_props[1], ('foo', 'foo', 'foo val'))
# Test all custom properties appear in template
self.assertContains(res, '<dt title="bar">bar</dt>')
self.assertContains(res, '<dd>bar val</dd>')
self.assertContains(res, '<dt title="foo">foo</dt>')
self.assertContains(res, '<dd>foo val</dd>')
@test.create_stubs({api.glance: ('image_get',)})
def test_protected_image_detail_get(self):
image = self.images.list()[2]
api.glance.image_get(IsA(http.HttpRequest), str(image.id)) \
.AndReturn(image)
self.mox.ReplayAll()
res = self.client.get(
reverse('horizon:project:images:images:detail',
args=[image.id]))
self.assertTemplateUsed(res,
'project/images/images/detail.html')
self.assertEqual(res.context['image'].protected, image.protected)
@test.create_stubs({api.glance: ('image_get',)})
def test_image_detail_get_with_exception(self):
image = self.images.first()
api.glance.image_get(IsA(http.HttpRequest), str(image.id)) \
.AndRaise(self.exceptions.glance)
self.mox.ReplayAll()
url = reverse('horizon:project:images:images:detail',
args=[image.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, IMAGES_INDEX_URL)
@test.create_stubs({api.glance: ('image_get',)})
def test_image_update_get(self):
image = self.images.first()
image.disk_format = "ami"
image.is_public = True
api.glance.image_get(IsA(http.HttpRequest), str(image.id)) \
.AndReturn(image)
self.mox.ReplayAll()
res = self.client.get(
reverse('horizon:project:images:images:update',
args=[image.id]))
self.assertTemplateUsed(res,
'project/images/images/_update.html')
self.assertEqual(res.context['image'].name, image.name)
# Bug 1076216 - is_public checkbox not being set correctly
self.assertContains(res, "<input type='checkbox' id='id_public'"
" name='public' checked='checked'>",
html=True,
msg_prefix="The is_public checkbox is not checked")
class OwnerFilterTests(test.TestCase):
def setUp(self):
super(OwnerFilterTests, self).setUp()
self.table = self.mox.CreateMock(horizon_tables.DataTable)
self.table.request = self.request
@override_settings(IMAGES_LIST_FILTER_TENANTS=[{'name': 'Official',
'tenant': 'officialtenant',
'icon': 'fa-check'}])
def test_filter(self):
self.mox.ReplayAll()
all_images = self.images.list()
table = self.table
self.filter_tenants = settings.IMAGES_LIST_FILTER_TENANTS
filter_ = tables.OwnerFilter()
images = filter_.filter(table, all_images, 'project')
self.assertEqual(images, self._expected('project'))
images = filter_.filter(table, all_images, 'public')
self.assertEqual(images, self._expected('public'))
images = filter_.filter(table, all_images, 'shared')
self.assertEqual(images, self._expected('shared'))
images = filter_.filter(table, all_images, 'officialtenant')
self.assertEqual(images, self._expected('officialtenant'))
def _expected(self, filter_string):
my_tenant_id = self.request.user.tenant_id
images = self.images.list()
special = map(lambda t: t['tenant'], self.filter_tenants)
if filter_string == 'public':
return filter(lambda im: im.is_public, images)
if filter_string == 'shared':
return filter(lambda im: (not im.is_public and
im.owner != my_tenant_id and
im.owner not in special),
images)
if filter_string == 'project':
filter_string = my_tenant_id
return filter(lambda im: im.owner == filter_string, images)
| |
# orm/unitofwork.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The internals for the unit of work system.
The session's flush() process passes objects to a contextual object
here, which assembles flush tasks based on mappers and their properties,
organizes them in order of dependency, and executes.
"""
from sqlalchemy import util, event
from sqlalchemy.util import topological
from sqlalchemy.orm import attributes, interfaces
from sqlalchemy.orm import util as mapperutil
session = util.importlater("sqlalchemy.orm", "session")
def track_cascade_events(descriptor, prop):
"""Establish event listeners on object attributes which handle
cascade-on-set/append.
"""
key = prop.key
def append(state, item, initiator):
# process "save_update" cascade rules for when
# an instance is appended to the list of another instance
sess = session._state_session(state)
if sess:
prop = state.manager.mapper._props[key]
item_state = attributes.instance_state(item)
if prop.cascade.save_update and \
(prop.cascade_backrefs or key == initiator.key) and \
not sess._contains_state(item_state):
sess._save_or_update_state(item_state)
return item
def remove(state, item, initiator):
sess = session._state_session(state)
if sess:
prop = state.manager.mapper._props[key]
# expunge pending orphans
item_state = attributes.instance_state(item)
if prop.cascade.delete_orphan and \
item_state in sess._new and \
prop.mapper._is_orphan(item_state):
sess.expunge(item)
def set_(state, newvalue, oldvalue, initiator):
# process "save_update" cascade rules for when an instance
# is attached to another instance
if oldvalue is newvalue:
return newvalue
sess = session._state_session(state)
if sess:
prop = state.manager.mapper._props[key]
if newvalue is not None:
newvalue_state = attributes.instance_state(newvalue)
if prop.cascade.save_update and \
(prop.cascade_backrefs or key == initiator.key) and \
not sess._contains_state(newvalue_state):
sess._save_or_update_state(newvalue_state)
if oldvalue is not None and prop.cascade.delete_orphan:
oldvalue_state = attributes.instance_state(oldvalue)
if oldvalue_state in sess._new and \
prop.mapper._is_orphan(oldvalue_state):
sess.expunge(oldvalue)
return newvalue
event.listen(descriptor, 'append', append, raw=True, retval=True)
event.listen(descriptor, 'remove', remove, raw=True, retval=True)
event.listen(descriptor, 'set', set_, raw=True, retval=True)
class UOWTransaction(object):
def __init__(self, session):
self.session = session
# dictionary used by external actors to
# store arbitrary state information.
self.attributes = {}
# dictionary of mappers to sets of
# DependencyProcessors, which are also
# set to be part of the sorted flush actions,
# which have that mapper as a parent.
self.deps = util.defaultdict(set)
# dictionary of mappers to sets of InstanceState
# items pending for flush which have that mapper
# as a parent.
self.mappers = util.defaultdict(set)
# a dictionary of Preprocess objects, which gather
# additional states impacted by the flush
# and determine if a flush action is needed
self.presort_actions = {}
# dictionary of PostSortRec objects, each
# one issues work during the flush within
# a certain ordering.
self.postsort_actions = {}
# a set of 2-tuples, each containing two
# PostSortRec objects where the second
# is dependent on the first being executed
# first
self.dependencies = set()
# dictionary of InstanceState-> (isdelete, listonly)
# tuples, indicating if this state is to be deleted
# or insert/updated, or just refreshed
self.states = {}
# tracks InstanceStates which will be receiving
# a "post update" call. Keys are mappers,
# values are a set of states and a set of the
# columns which should be included in the update.
self.post_update_states = util.defaultdict(lambda: (set(), set()))
@property
def has_work(self):
return bool(self.states)
def is_deleted(self, state):
"""return true if the given state is marked as deleted
within this uowtransaction."""
return state in self.states and self.states[state][0]
def memo(self, key, callable_):
if key in self.attributes:
return self.attributes[key]
else:
self.attributes[key] = ret = callable_()
return ret
def remove_state_actions(self, state):
"""remove pending actions for a state from the uowtransaction."""
isdelete = self.states[state][0]
self.states[state] = (isdelete, True)
def get_attribute_history(self, state, key,
passive=attributes.PASSIVE_NO_INITIALIZE):
"""facade to attributes.get_state_history(), including caching of results."""
hashkey = ("history", state, key)
# cache the objects, not the states; the strong reference here
# prevents newly loaded objects from being dereferenced during the
# flush process
if hashkey in self.attributes:
history, state_history, cached_passive = self.attributes[hashkey]
# if the cached lookup was "passive" and now
# we want non-passive, do a non-passive lookup and re-cache
if cached_passive is not attributes.PASSIVE_OFF \
and passive is attributes.PASSIVE_OFF:
impl = state.manager[key].impl
history = impl.get_history(state, state.dict,
attributes.PASSIVE_OFF)
if history and impl.uses_objects:
state_history = history.as_state()
else:
state_history = history
self.attributes[hashkey] = (history, state_history, passive)
else:
impl = state.manager[key].impl
# TODO: store the history as (state, object) tuples
# so we don't have to keep converting here
history = impl.get_history(state, state.dict, passive)
if history and impl.uses_objects:
state_history = history.as_state()
else:
state_history = history
self.attributes[hashkey] = (history, state_history, passive)
return state_history
def has_dep(self, processor):
return (processor, True) in self.presort_actions
def register_preprocessor(self, processor, fromparent):
key = (processor, fromparent)
if key not in self.presort_actions:
self.presort_actions[key] = Preprocess(processor, fromparent)
def register_object(self, state, isdelete=False,
listonly=False, cancel_delete=False,
operation=None, prop=None):
if not self.session._contains_state(state):
if not state.deleted and operation is not None:
util.warn("Object of type %s not in session, %s operation "
"along '%s' will not proceed" %
(mapperutil.state_class_str(state), operation, prop))
return False
if state not in self.states:
mapper = state.manager.mapper
if mapper not in self.mappers:
mapper._per_mapper_flush_actions(self)
self.mappers[mapper].add(state)
self.states[state] = (isdelete, listonly)
else:
if not listonly and (isdelete or cancel_delete):
self.states[state] = (isdelete, False)
return True
def issue_post_update(self, state, post_update_cols):
mapper = state.manager.mapper.base_mapper
states, cols = self.post_update_states[mapper]
states.add(state)
cols.update(post_update_cols)
@util.memoized_property
def _mapper_for_dep(self):
"""return a dynamic mapping of (Mapper, DependencyProcessor) to
True or False, indicating if the DependencyProcessor operates
on objects of that Mapper.
The result is stored in the dictionary persistently once
calculated.
"""
return util.PopulateDict(
lambda tup:tup[0]._props.get(tup[1].key) is tup[1].prop
)
def filter_states_for_dep(self, dep, states):
"""Filter the given list of InstanceStates to those relevant to the
given DependencyProcessor.
"""
mapper_for_dep = self._mapper_for_dep
return [s for s in states if mapper_for_dep[(s.manager.mapper, dep)]]
def states_for_mapper_hierarchy(self, mapper, isdelete, listonly):
checktup = (isdelete, listonly)
for mapper in mapper.base_mapper.self_and_descendants:
for state in self.mappers[mapper]:
if self.states[state] == checktup:
yield state
def _generate_actions(self):
"""Generate the full, unsorted collection of PostSortRecs as
well as dependency pairs for this UOWTransaction.
"""
# execute presort_actions, until all states
# have been processed. a presort_action might
# add new states to the uow.
while True:
ret = False
for action in list(self.presort_actions.values()):
if action.execute(self):
ret = True
if not ret:
break
# see if the graph of mapper dependencies has cycles.
self.cycles = cycles = topological.find_cycles(
self.dependencies,
self.postsort_actions.values())
if cycles:
# if yes, break the per-mapper actions into
# per-state actions
convert = dict(
(rec, set(rec.per_state_flush_actions(self)))
for rec in cycles
)
# rewrite the existing dependencies to point to
# the per-state actions for those per-mapper actions
# that were broken up.
for edge in list(self.dependencies):
if None in edge or \
edge[0].disabled or edge[1].disabled or \
cycles.issuperset(edge):
self.dependencies.remove(edge)
elif edge[0] in cycles:
self.dependencies.remove(edge)
for dep in convert[edge[0]]:
self.dependencies.add((dep, edge[1]))
elif edge[1] in cycles:
self.dependencies.remove(edge)
for dep in convert[edge[1]]:
self.dependencies.add((edge[0], dep))
return set([a for a in self.postsort_actions.values()
if not a.disabled
]
).difference(cycles)
def execute(self):
postsort_actions = self._generate_actions()
#sort = topological.sort(self.dependencies, postsort_actions)
#print "--------------"
#print self.dependencies
#print list(sort)
#print "COUNT OF POSTSORT ACTIONS", len(postsort_actions)
# execute
if self.cycles:
for set_ in topological.sort_as_subsets(
self.dependencies,
postsort_actions):
while set_:
n = set_.pop()
n.execute_aggregate(self, set_)
else:
for rec in topological.sort(
self.dependencies,
postsort_actions):
rec.execute(self)
def finalize_flush_changes(self):
"""mark processed objects as clean / deleted after a successful flush().
this method is called within the flush() method after the
execute() method has succeeded and the transaction has been committed.
"""
for state, (isdelete, listonly) in self.states.iteritems():
if isdelete:
self.session._remove_newly_deleted(state)
else:
# if listonly:
# debug... would like to see how many do this
self.session._register_newly_persistent(state)
class IterateMappersMixin(object):
def _mappers(self, uow):
if self.fromparent:
return iter(
m for m in self.dependency_processor.parent.self_and_descendants
if uow._mapper_for_dep[(m, self.dependency_processor)]
)
else:
return self.dependency_processor.mapper.self_and_descendants
class Preprocess(IterateMappersMixin):
def __init__(self, dependency_processor, fromparent):
self.dependency_processor = dependency_processor
self.fromparent = fromparent
self.processed = set()
self.setup_flush_actions = False
def execute(self, uow):
delete_states = set()
save_states = set()
for mapper in self._mappers(uow):
for state in uow.mappers[mapper].difference(self.processed):
(isdelete, listonly) = uow.states[state]
if not listonly:
if isdelete:
delete_states.add(state)
else:
save_states.add(state)
if delete_states:
self.dependency_processor.presort_deletes(uow, delete_states)
self.processed.update(delete_states)
if save_states:
self.dependency_processor.presort_saves(uow, save_states)
self.processed.update(save_states)
if (delete_states or save_states):
if not self.setup_flush_actions and (
self.dependency_processor.\
prop_has_changes(uow, delete_states, True) or
self.dependency_processor.\
prop_has_changes(uow, save_states, False)
):
self.dependency_processor.per_property_flush_actions(uow)
self.setup_flush_actions = True
return True
else:
return False
class PostSortRec(object):
disabled = False
def __new__(cls, uow, *args):
key = (cls, ) + args
if key in uow.postsort_actions:
return uow.postsort_actions[key]
else:
uow.postsort_actions[key] = \
ret = \
object.__new__(cls)
return ret
def execute_aggregate(self, uow, recs):
self.execute(uow)
def __repr__(self):
return "%s(%s)" % (
self.__class__.__name__,
",".join(str(x) for x in self.__dict__.values())
)
class ProcessAll(IterateMappersMixin, PostSortRec):
def __init__(self, uow, dependency_processor, delete, fromparent):
self.dependency_processor = dependency_processor
self.delete = delete
self.fromparent = fromparent
uow.deps[dependency_processor.parent.base_mapper].add(dependency_processor)
def execute(self, uow):
states = self._elements(uow)
if self.delete:
self.dependency_processor.process_deletes(uow, states)
else:
self.dependency_processor.process_saves(uow, states)
def per_state_flush_actions(self, uow):
# this is handled by SaveUpdateAll and DeleteAll,
# since a ProcessAll should unconditionally be pulled
# into per-state if either the parent/child mappers
# are part of a cycle
return iter([])
def __repr__(self):
return "%s(%s, delete=%s)" % (
self.__class__.__name__,
self.dependency_processor,
self.delete
)
def _elements(self, uow):
for mapper in self._mappers(uow):
for state in uow.mappers[mapper]:
(isdelete, listonly) = uow.states[state]
if isdelete == self.delete and not listonly:
yield state
class IssuePostUpdate(PostSortRec):
def __init__(self, uow, mapper, isdelete):
self.mapper = mapper
self.isdelete = isdelete
def execute(self, uow):
states, cols = uow.post_update_states[self.mapper]
states = [s for s in states if uow.states[s][0] == self.isdelete]
self.mapper._post_update(states, uow, cols)
class SaveUpdateAll(PostSortRec):
def __init__(self, uow, mapper):
self.mapper = mapper
assert mapper is mapper.base_mapper
def execute(self, uow):
self.mapper._save_obj(
uow.states_for_mapper_hierarchy(self.mapper, False, False),
uow
)
def per_state_flush_actions(self, uow):
states = list(uow.states_for_mapper_hierarchy(self.mapper, False, False))
for rec in self.mapper._per_state_flush_actions(
uow,
states,
False):
yield rec
for dep in uow.deps[self.mapper]:
states_for_prop = uow.filter_states_for_dep(dep, states)
dep.per_state_flush_actions(uow, states_for_prop, False)
class DeleteAll(PostSortRec):
def __init__(self, uow, mapper):
self.mapper = mapper
assert mapper is mapper.base_mapper
def execute(self, uow):
self.mapper._delete_obj(
uow.states_for_mapper_hierarchy(self.mapper, True, False),
uow
)
def per_state_flush_actions(self, uow):
states = list(uow.states_for_mapper_hierarchy(self.mapper, True, False))
for rec in self.mapper._per_state_flush_actions(
uow,
states,
True):
yield rec
for dep in uow.deps[self.mapper]:
states_for_prop = uow.filter_states_for_dep(dep, states)
dep.per_state_flush_actions(uow, states_for_prop, True)
class ProcessState(PostSortRec):
def __init__(self, uow, dependency_processor, delete, state):
self.dependency_processor = dependency_processor
self.delete = delete
self.state = state
def execute_aggregate(self, uow, recs):
cls_ = self.__class__
dependency_processor = self.dependency_processor
delete = self.delete
our_recs = [r for r in recs
if r.__class__ is cls_ and
r.dependency_processor is dependency_processor and
r.delete is delete]
recs.difference_update(our_recs)
states = [self.state] + [r.state for r in our_recs]
if delete:
dependency_processor.process_deletes(uow, states)
else:
dependency_processor.process_saves(uow, states)
def __repr__(self):
return "%s(%s, %s, delete=%s)" % (
self.__class__.__name__,
self.dependency_processor,
mapperutil.state_str(self.state),
self.delete
)
class SaveUpdateState(PostSortRec):
def __init__(self, uow, state, mapper):
self.state = state
self.mapper = mapper
def execute_aggregate(self, uow, recs):
cls_ = self.__class__
mapper = self.mapper
our_recs = [r for r in recs
if r.__class__ is cls_ and
r.mapper is mapper]
recs.difference_update(our_recs)
mapper._save_obj(
[self.state] +
[r.state for r in our_recs],
uow)
def __repr__(self):
return "%s(%s)" % (
self.__class__.__name__,
mapperutil.state_str(self.state)
)
class DeleteState(PostSortRec):
def __init__(self, uow, state, mapper):
self.state = state
self.mapper = mapper
def execute_aggregate(self, uow, recs):
cls_ = self.__class__
mapper = self.mapper
our_recs = [r for r in recs
if r.__class__ is cls_ and
r.mapper is mapper]
recs.difference_update(our_recs)
states = [self.state] + [r.state for r in our_recs]
mapper._delete_obj(
[s for s in states if uow.states[s][0]],
uow)
def __repr__(self):
return "%s(%s)" % (
self.__class__.__name__,
mapperutil.state_str(self.state)
)
| |
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ryu.services.protocols.bgp.base import Activity
from ryu.lib import hub
from ryu.lib.packet import bmp
from ryu.lib.packet import bgp
from ryu.services.protocols.bgp import constants as const
import socket
import logging
from calendar import timegm
from ryu.services.protocols.bgp.signals.emit import BgpSignalBus
LOG = logging.getLogger('bgpspeaker.bmp')
class BMPClient(Activity):
"""A BMP client.
Try to establish BMP session between a configured BMP server.
If BMP session is established, transfer information about peers
(e.g. received and sent open msgs, contents of adj-rib-in, other stats)
"""
def __init__(self, core_service, host, port):
super(BMPClient, self).__init__(name='BMPClient(%s:%s)' % (host, port))
self._core_service = core_service
self._core_service.signal_bus.register_listener(
BgpSignalBus.BGP_ADJ_RIB_IN_CHANGED,
lambda _, data: self.on_adj_rib_in_changed(data)
)
self._core_service.signal_bus.register_listener(
BgpSignalBus.BGP_ADJ_UP,
lambda _, data: self.on_adj_up(data)
)
self._core_service.signal_bus.register_listener(
BgpSignalBus.BGP_ADJ_DOWN,
lambda _, data: self.on_adj_down(data)
)
self._socket = None
self.server_address = (host, port)
self._connect_retry_event = hub.Event()
self._connect_retry_time = 5
def _run(self):
self._connect_retry_event.set()
while True:
self._connect_retry_event.wait()
try:
self._connect_retry_event.clear()
self._connect_tcp(self.server_address,
self._handle_bmp_session)
except socket.error:
self._connect_retry_event.set()
LOG.info('Will try to reconnect to %s after %s secs: %s' %
(self.server_address, self._connect_retry_time,
self._connect_retry_event.is_set()))
self.pause(self._connect_retry_time)
def _send(self, msg):
if not self._socket:
return
assert isinstance(msg, bmp.BMPMessage)
serialized_msg = msg.serialize()
ret = self._socket.send(msg.serialize())
def on_adj_rib_in_changed(self, data):
peer = data['peer']
path = data['received_route']
update_msg = peer._construct_update(path)
msg = self._construct_route_monitoring(peer, path)
self._send(msg)
def on_adj_up(self, data):
peer = data['peer']
msg = self._construct_peer_up_notification(peer)
self._send(msg)
for path in peer._adj_rib_in.itervalues():
update_msg = peer._construct_update(path)
msg = self._construct_route_monitoring(peer, path)
self._send(msg)
def on_adj_down(self, data):
peer = data['peer']
msg = self._construct_peer_down_notification(peer)
self._send(msg)
def _construct_peer_up_notification(self, peer):
if peer.is_mpbgp_cap_valid(bgp.RF_IPv4_VPN) or \
peer.is_mpbgp_cap_valid(bgp.RF_IPv6_VPN):
peer_type = bmp.BMP_PEER_TYPE_L3VPN
else:
peer_type = bmp.BMP_PEER_TYPE_GLOBAL
peer_distinguisher = 0
peer_as = peer._neigh_conf.remote_as
peer_bgp_id = self._core_service.router_id
timestamp = peer.state._established_time
local_address = peer.host_bind_ip
local_port = int(peer.host_bind_port)
peer_address, remote_port = peer.protocol._remotename
remote_port = int(remote_port)
sent_open_msg = peer.protocol.sent_open_msg
recv_open_msg = peer.protocol.recv_open_msg
msg = bmp.BMPPeerUpNotification(local_address=local_address,
local_port=local_port,
remote_port=remote_port,
sent_open_message=sent_open_msg,
received_open_message=recv_open_msg,
peer_type=peer_type,
is_post_policy=False,
peer_distinguisher=peer_distinguisher,
peer_address=peer_address,
peer_as=peer_as,
peer_bgp_id=peer_bgp_id,
timestamp=timestamp)
return msg
def _construct_peer_down_notification(self, peer):
if peer.is_mpbgp_cap_valid(bgp.RF_IPv4_VPN) or \
peer.is_mpbgp_cap_valid(bgp.RF_IPv6_VPN):
peer_type = bmp.BMP_PEER_TYPE_L3VPN
else:
peer_type = bmp.BMP_PEER_TYPE_GLOBAL
peer_as = peer._neigh_conf.remote_as
peer_bgp_id = self._core_service.router_id
peer_address, _ = peer.protocol._remotename
return bmp.BMPPeerDownNotification(bmp.BMP_PEER_DOWN_REASON_UNKNOWN,
data=None,
peer_type=peer_type,
is_post_policy=False,
peer_distinguisher=0,
peer_address=peer_address,
peer_as=peer_as,
peer_bgp_id=peer_bgp_id,
timestamp=0)
def _construct_route_monitoring(self, peer, path):
if peer.is_mpbgp_cap_valid(bgp.RF_IPv4_VPN) or \
peer.is_mpbgp_cap_valid(bgp.RF_IPv6_VPN):
peer_type = bmp.BMP_PEER_TYPE_L3VPN
else:
peer_type = bmp.BMP_PEER_TYPE_GLOBAL
peer_distinguisher = 0
peer_as = peer._neigh_conf.remote_as
peer_bgp_id = self._core_service.router_id
peer_address, _ = peer.protocol._remotename
bgp_update = peer._construct_update(path)
is_post_policy = not path.filtered
timestamp = timegm(path.timestamp)
msg = bmp.BMPRouteMonitoring(bgp_update=bgp_update,
peer_type=peer_type,
is_post_policy=is_post_policy,
peer_distinguisher=peer_distinguisher,
peer_address=peer_address,
peer_as=peer_as, peer_bgp_id=peer_bgp_id,
timestamp=timestamp)
return msg
def _handle_bmp_session(self, socket):
self._socket = socket
# send init message
init_info = {'type': bmp.BMP_INIT_TYPE_STRING,
'value': u'This is Ryu BGP BMP message'}
init_msg = bmp.BMPInitiation([init_info])
self._send(init_msg)
# send peer-up message for each peers
peer_manager = self._core_service.peer_manager
for peer in (p for p in peer_manager.iterpeers if p.in_established()):
msg = self._construct_peer_up_notification(peer)
self._send(msg)
for path in peer._adj_rib_in.itervalues():
update_msg = peer._construct_update(path)
msg = self._construct_route_monitoring(peer, path)
self._send(msg)
# TODO periodically send stats to bmpstation
while True:
# bmpstation shouldn't send any packet to bmpclient.
# this recv() is only meant to detect socket closed
ret = self._socket.recv(1)
if len(ret) == 0:
LOG.debug('BMP socket is closed. retry connecting..')
self._socket = None
self._connect_retry_event.set()
break
# silently ignore packets from the bmpstation
| |
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis Engemann <denis-alexander.engemann@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
import pytest
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.fixes import np_version
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import norm, squared_norm
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _incremental_mean_and_var
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils.extmath import softmax
from sklearn.utils.extmath import stable_cumsum
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_array_equal(mode, mode2)
assert_array_equal(score, score2)
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
@ignore_warnings # Test deprecated backport to be removed in 0.21
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def check_randomized_svd_low_rank(dtype):
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
decimal = 5 if dtype == np.float32 else 7
dtype = np.dtype(dtype)
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0).astype(dtype, copy=False)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
# Convert the singular values to the specific dtype
U = U.astype(dtype, copy=False)
s = s.astype(dtype, copy=False)
V = V.astype(dtype, copy=False)
for normalizer in ['auto', 'LU', 'QR']: # 'none' would not be stable
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(
X, k, power_iteration_normalizer=normalizer, random_state=0)
# If the input dtype is float, then the output dtype is float of the
# same bit size (f32 is not upcast to f64)
# But if the input dtype is int, the output dtype is float64
if dtype.kind == 'f':
assert Ua.dtype == dtype
assert sa.dtype == dtype
assert Va.dtype == dtype
else:
assert Ua.dtype == np.float64
assert sa.dtype == np.float64
assert Va.dtype == np.float64
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the
# real rank of the matrix
assert_almost_equal(s[:k], sa, decimal=decimal)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va),
decimal=decimal)
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = \
randomized_svd(X, k, power_iteration_normalizer=normalizer,
random_state=0)
if dtype.kind == 'f':
assert Ua.dtype == dtype
assert sa.dtype == dtype
assert Va.dtype == dtype
else:
assert Ua.dtype.kind == 'f'
assert sa.dtype.kind == 'f'
assert Va.dtype.kind == 'f'
assert_almost_equal(s[:rank], sa[:rank], decimal=decimal)
@pytest.mark.parametrize('dtype',
(np.int32, np.int64, np.float32, np.float64))
def test_randomized_svd_low_rank_all_dtypes(dtype):
check_randomized_svd_low_rank(dtype)
@ignore_warnings # extmath.norm is deprecated to be removed in 0.21
def test_norm_squared_norm():
X = np.random.RandomState(42).randn(50, 63)
X *= 100 # check stability
X += 200
assert_almost_equal(np.linalg.norm(X.ravel()), norm(X))
assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6)
assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6)
# Check the warning with an int array and np.dot potential overflow
assert_warns_message(
UserWarning, 'Array type is integer, np.dot may '
'overflow. Data should be float type to avoid this issue',
squared_norm, X.astype(int))
@pytest.mark.parametrize('dtype',
(np.float32, np.float64))
def test_row_norms(dtype):
X = np.random.RandomState(42).randn(100, 100)
if dtype is np.float32:
precision = 4
else:
precision = 5
X = X.astype(dtype)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True),
precision)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X), precision)
for csr_index_dtype in [np.int32, np.int64]:
Xcsr = sparse.csr_matrix(X, dtype=dtype)
# csr_matrix will use int32 indices by default,
# up-casting those to int64 when necessary
if csr_index_dtype is np.int64:
Xcsr.indptr = Xcsr.indptr.astype(csr_index_dtype)
Xcsr.indices = Xcsr.indices.astype(csr_index_dtype)
assert Xcsr.indices.dtype == csr_index_dtype
assert Xcsr.indptr.dtype == csr_index_dtype
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True),
precision)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr),
precision)
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.1,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate
# method without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer,
random_state=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.01)
# compute the singular values of X using the fast approximate
# method with iterated power method
_, sap, _ = randomized_svd(X, k,
power_iteration_normalizer=normalizer,
random_state=0)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method
# with iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5,
power_iteration_normalizer=normalizer)
# the iterated power method is still managing to get most of the
# structure at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limited impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_randomized_svd_power_iteration_normalizer():
# randomized_svd with power_iteration_normalized='none' diverges for
# large number of power iterations on this dataset
rng = np.random.RandomState(42)
X = make_low_rank_matrix(100, 500, effective_rank=50, random_state=rng)
X += 3 * rng.randint(0, 2, size=X.shape)
n_components = 50
# Check that it diverges with many (non-normalized) power iterations
U, s, V = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(V))
error_2 = linalg.norm(A, ord='fro')
U, s, V = randomized_svd(X, n_components, n_iter=20,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(V))
error_20 = linalg.norm(A, ord='fro')
assert_greater(np.abs(error_2 - error_20), 100)
for normalizer in ['LU', 'QR', 'auto']:
U, s, V = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer=normalizer,
random_state=0)
A = X - U.dot(np.diag(s).dot(V))
error_2 = linalg.norm(A, ord='fro')
for i in [5, 10, 50]:
U, s, V = randomized_svd(X, n_components, n_iter=i,
power_iteration_normalizer=normalizer,
random_state=0)
A = X - U.dot(np.diag(s).dot(V))
error = linalg.norm(A, ord='fro')
assert_greater(15, np.abs(error_2 - error))
def test_randomized_svd_sparse_warnings():
# randomized_svd throws a warning for lil and dok matrix
rng = np.random.RandomState(42)
X = make_low_rank_matrix(50, 20, effective_rank=10, random_state=rng)
n_components = 5
for cls in (sparse.lil_matrix, sparse.dok_matrix):
X = cls(X)
assert_warns_message(
sparse.SparseEfficiencyWarning,
"Calculating SVD of a {} is expensive. "
"csr_matrix is more efficient.".format(cls.__name__),
randomized_svd, X, n_components, n_iter=1,
power_iteration_normalizer='none')
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_randomized_svd_sign_flip_with_transpose():
# Check if the randomized_svd sign flipping is always done based on u
# irrespective of transpose.
# See https://github.com/scikit-learn/scikit-learn/issues/5608
# for more details.
def max_loading_is_positive(u, v):
"""
returns bool tuple indicating if the values maximising np.abs
are positive across all rows for u and across all columns for v.
"""
u_based = (np.abs(u).max(axis=0) == u.max(axis=0)).all()
v_based = (np.abs(v).max(axis=1) == v.max(axis=1)).all()
return u_based, v_based
mat = np.arange(10 * 8).reshape(10, -1)
# Without transpose
u_flipped, _, v_flipped = randomized_svd(mat, 3, flip_sign=True)
u_based, v_based = max_loading_is_positive(u_flipped, v_flipped)
assert_true(u_based)
assert_false(v_based)
# With transpose
u_flipped_with_transpose, _, v_flipped_with_transpose = randomized_svd(
mat, 3, flip_sign=True, transpose=True)
u_based, v_based = max_loading_is_positive(
u_flipped_with_transpose, v_flipped_with_transpose)
assert_true(u_based)
assert_false(v_based)
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
def naive_log_logistic(x):
return np.log(1 / (1 + np.exp(-x)))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from http://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = np.full(X1.shape[1], X1.shape[0], dtype=np.int32)
final_means, final_variances, final_count = \
_incremental_mean_and_var(X2, old_means, old_variances,
old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
def test_incremental_mean_and_variance_ignore_nan():
old_means = np.array([535., 535., 535., 535.])
old_variances = np.array([4225., 4225., 4225., 4225.])
old_sample_count = np.array([2, 2, 2, 2], dtype=np.int32)
X = np.array([[170, 170, 170, 170],
[430, 430, 430, 430],
[300, 300, 300, 300]])
X_nan = np.array([[170, np.nan, 170, 170],
[np.nan, 170, 430, 430],
[430, 430, np.nan, 300],
[300, 300, 300, np.nan]])
X_means, X_variances, X_count = _incremental_mean_and_var(
X, old_means, old_variances, old_sample_count)
X_nan_means, X_nan_variances, X_nan_count = _incremental_mean_and_var(
X_nan, old_means, old_variances, old_sample_count)
assert_allclose(X_nan_means, X_means)
assert_allclose(X_nan_variances, X_variances)
assert_allclose(X_nan_count, X_count)
@skip_if_32bit
def test_incremental_variance_numerical_stability():
# Test Youngs and Cramer incremental variance formulas.
def np_var(A):
return A.var(axis=0)
# Naive one pass variance computation - not numerically stable
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
def one_pass_var(X):
n = X.shape[0]
exp_x2 = (X ** 2).sum(axis=0) / n
expx_2 = (X.sum(axis=0) / n) ** 2
return exp_x2 - expx_2
# Two-pass algorithm, stable.
# We use it as a benchmark. It is not an online algorithm
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm
def two_pass_var(X):
mean = X.mean(axis=0)
Y = X.copy()
return np.mean((Y - mean)**2, axis=0)
# Naive online implementation
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm
# This works only for chunks for size 1
def naive_mean_variance_update(x, last_mean, last_variance,
last_sample_count):
updated_sample_count = (last_sample_count + 1)
samples_ratio = last_sample_count / float(updated_sample_count)
updated_mean = x / updated_sample_count + last_mean * samples_ratio
updated_variance = last_variance * samples_ratio + \
(x - last_mean) * (x - updated_mean) / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
# We want to show a case when one_pass_var has error > 1e-3 while
# _batch_mean_variance_update has less.
tol = 200
n_features = 2
n_samples = 10000
x1 = np.array(1e8, dtype=np.float64)
x2 = np.log(1e-5, dtype=np.float64)
A0 = np.full((n_samples // 2, n_features), x1, dtype=np.float64)
A1 = np.full((n_samples // 2, n_features), x2, dtype=np.float64)
A = np.vstack((A0, A1))
# Older versions of numpy have different precision
# In some old version, np.var is not stable
if np.abs(np_var(A) - two_pass_var(A)).max() < 1e-6:
stable_var = np_var
else:
stable_var = two_pass_var
# Naive one pass var: >tol (=1063)
assert_greater(np.abs(stable_var(A) - one_pass_var(A)).max(), tol)
# Starting point for online algorithms: after A0
# Naive implementation: >tol (436)
mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2
for i in range(A1.shape[0]):
mean, var, n = \
naive_mean_variance_update(A1[i, :], mean, var, n)
assert_equal(n, A.shape[0])
# the mean is also slightly unstable
assert_greater(np.abs(A.mean(axis=0) - mean).max(), 1e-6)
assert_greater(np.abs(stable_var(A) - var).max(), tol)
# Robust implementation: <tol (177)
mean, var = A0[0, :], np.zeros(n_features)
n = np.full(n_features, n_samples // 2, dtype=np.int32)
for i in range(A1.shape[0]):
mean, var, n = \
_incremental_mean_and_var(A1[i, :].reshape((1, A1.shape[1])),
mean, var, n)
assert_array_equal(n, A.shape[0])
assert_array_almost_equal(A.mean(axis=0), mean)
assert_greater(tol, np.abs(stable_var(A) - var).max())
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = np.full(batch.shape[1], batch.shape[0],
dtype=np.int32)
else:
result = _incremental_mean_and_var(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_array_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
def test_softmax():
rng = np.random.RandomState(0)
X = rng.randn(3, 5)
exp_X = np.exp(X)
sum_exp_X = np.sum(exp_X, axis=1).reshape((-1, 1))
assert_array_almost_equal(softmax(X), exp_X / sum_exp_X)
def test_stable_cumsum():
if np_version < (1, 9):
raise SkipTest("Sum is as unstable as cumsum for numpy < 1.9")
assert_array_equal(stable_cumsum([1, 2, 3]), np.cumsum([1, 2, 3]))
r = np.random.RandomState(0).rand(100000)
assert_warns(RuntimeWarning, stable_cumsum, r, rtol=0, atol=0)
# test axis parameter
A = np.random.RandomState(36).randint(1000, size=(5, 5, 5))
assert_array_equal(stable_cumsum(A, axis=0), np.cumsum(A, axis=0))
assert_array_equal(stable_cumsum(A, axis=1), np.cumsum(A, axis=1))
assert_array_equal(stable_cumsum(A, axis=2), np.cumsum(A, axis=2))
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import os
import pickle
from absl.testing import parameterized
import numpy
import six
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.layers import normalization
from tensorflow.python.layers import core as non_keras_core
from tensorflow.python.module import module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.training.tracking import tracking
from tensorflow.python.training.tracking import util
from tensorflow.python.util import nest
from tensorflow.python.util import serialization
class HasList(training.Model):
def __init__(self):
super(HasList, self).__init__()
self.layer_list = data_structures.List([core.Dense(3)])
self.layer_list.append(core.Dense(4))
self.layer_list.extend(
[core.Dense(5),
core.Dense(6, kernel_regularizer=math_ops.reduce_sum)])
self.layer_list += [
core.Dense(7, bias_regularizer=math_ops.reduce_sum),
core.Dense(8)
]
self.layer_list += (
data_structures.List([core.Dense(9)]) + data_structures.List(
[core.Dense(10)]))
self.layer_list.extend(
data_structures.List(
list([core.Dense(11)]) + [core.Dense(12)]))
self.layers_with_updates = data_structures.List(
(normalization.BatchNormalization(),))
def call(self, x):
aggregation = 0.
for l in self.layer_list:
x = l(x)
aggregation += math_ops.reduce_sum(x)
bn, = self.layers_with_updates
return bn(x) / aggregation
class ListTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testTracking(self):
model = HasList()
output = model(array_ops.ones([32, 2]))
self.assertAllEqual([32, 12], output.shape)
self.assertEqual(11, len(model.layers))
self.assertEqual(10, len(model.layer_list.layers))
six.assertCountEqual(
self,
model.layers,
model.layer_list.layers + model.layers_with_updates)
for index in range(10):
self.assertEqual(3 + index, model.layer_list.layers[index].units)
self.assertEqual(2, len(model._checkpoint_dependencies))
self.assertIs(model.layer_list, model._checkpoint_dependencies[0].ref)
self.assertIs(model.layers_with_updates,
model._checkpoint_dependencies[1].ref)
self.assertEqual(
10, len(model._checkpoint_dependencies[0].ref._checkpoint_dependencies))
self.evaluate([v.initializer for v in model.variables])
self.evaluate(model.variables[0].assign([[1., 2., 3.], [4., 5., 6.]]))
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
self.evaluate(model.variables[0].assign(array_ops.zeros([2, 3])))
model.load_weights(save_path)
self.assertAllEqual([[1., 2., 3.], [4., 5., 6.]],
self.evaluate(model.variables[0]))
v = variables.Variable(1.)
model.var_list = [v]
self.assertIn(v, model.variables)
self.assertIn(v, model.trainable_variables)
self.assertNotIn(v, model.non_trainable_variables)
self.assertIn(model.layer_list[0].trainable_weights[0],
model.trainable_weights)
def testSubModelTracking(self):
model = training.Model()
model.v = variables.Variable(1.)
self.assertIn(model.v, model.trainable_weights)
model2 = training.Model()
model2.m = [model]
self.assertIn(model.v, model2.trainable_weights)
def testSubSequentialTracking(self):
class _Subclassed(training.Model):
def __init__(self, wrapped):
super(_Subclassed, self).__init__()
self._wrapped = wrapped
def call(self, x):
return self._wrapped(x)
model = sequential.Sequential()
layer = core.Dense(1)
model.add(layer)
model2 = _Subclassed(model)
model2(array_ops.ones([1, 2]))
model2.m = [model]
self.assertIn(layer.kernel, model2.trainable_weights)
def testLayerTrackedThroughSequential(self):
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def ffnet(layer_sizes, name):
ff = sequential.Sequential(name=name)
for i, width in enumerate(layer_sizes):
ff.add(core.Dense(
width,
activation=("relu" if i < len(layer_sizes)-1 else None)))
return ff
class MyModel2(training.Model):
def __init__(self, config, name="my_model_2"):
super(MyModel2, self).__init__(name=name)
self._num_tokens = config.num_tokens
# list of sub-models
self._ffnet = [ffnet(config.module_layers + (self._num_tokens,), "ff")]
def null_input(self):
return array_ops.zeros([1, self._num_tokens], dtype=dtypes.float32)
def call(self, input_, module_index=None):
return self._ffnet[0](input_)
m2 = MyModel2(AttrDict(
num_tokens=5,
module_layers=(50, 30)))
# Construct
m2(m2.null_input())
self.assertLen(m2.trainable_variables, 6)
def testJSONSerialization(self):
obj = tracking.AutoTrackable()
obj.l = [1]
json.dumps(obj.l, default=serialization.get_json_type)
@test_util.run_v1_only("b/120545219")
def testUpdatesForwarded(self):
with context.graph_mode():
model = HasList()
model_input = array_ops.ones([32, 2])
model(model_input)
self.assertGreater(len(model.layers_with_updates[0].updates), 0)
self.assertEqual(set(model.layers_with_updates[0].updates),
set(model.updates))
with context.eager_mode():
model = HasList()
model_input = array_ops.ones([32, 2])
model(model_input)
self.assertEqual(0, len(model.updates))
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testLossesForwarded(self):
model = HasList()
model_input = array_ops.ones([32, 2])
model(model_input)
self.assertEqual(2, len(model.losses))
def testModelContainersCompareEqual(self):
class HasEqualContainers(training.Model):
def __init__(self):
super(HasEqualContainers, self).__init__()
self.l1 = []
self.l2 = []
model = HasEqualContainers()
first_layer = HasEqualContainers()
model.l1.append(first_layer)
second_layer = HasEqualContainers()
model.l2.append(second_layer)
self.assertEqual([first_layer, second_layer], model.layers)
def testNotTrackable(self):
class NotTrackable(object):
pass
with self.assertRaises(ValueError):
data_structures.List([NotTrackable()])
def testCallNotImplemented(self):
with self.assertRaisesRegexp(TypeError, "not callable"):
data_structures.List()(1.)
def testNoPop(self):
with self.assertRaises(AttributeError):
data_structures.List().pop()
@test_util.run_in_graph_and_eager_modes
def testTensorConversion(self):
class ListToTensor(training.Model):
def __init__(self):
super(ListToTensor, self).__init__()
self.l = [1., 2., 3.]
self.assertAllEqual(
[1., 2., 3.],
self.evaluate(constant_op.constant(ListToTensor().l)))
self.assertAllEqual(
[1., 2., 3.],
self.evaluate(array_ops.pack(ListToTensor().l)))
def testNesting(self):
with context.graph_mode():
inner = data_structures.List()
outer = data_structures.List([inner])
inner.append(non_keras_core.Dense(1))
inner[0](array_ops.ones([2, 3]))
self.assertEqual(2, len(outer.variables))
self.assertIsInstance(
outer.variables[0],
resource_variable_ops.ResourceVariable)
def testNonLayerVariables(self):
v = resource_variable_ops.ResourceVariable([1.])
l = data_structures.List([v])
self.assertTrue(l.trainable)
self.assertEqual([], l.layers)
self.assertEqual([v], l.variables)
self.assertEqual([v], l.trainable_weights)
self.assertEqual([], l.non_trainable_variables)
l.trainable = False
self.assertEqual([v], l.variables)
self.assertEqual([], l.trainable_variables)
self.assertEqual([v], l.non_trainable_variables)
l.trainable = True
v2 = resource_variable_ops.ResourceVariable(1., trainable=False)
l.append(v2)
self.assertEqual([v, v2], l.weights)
self.assertEqual([v], l.trainable_weights)
self.assertEqual([v2], l.non_trainable_weights)
def testCopy(self):
v1 = resource_variable_ops.ResourceVariable(1.)
v2 = resource_variable_ops.ResourceVariable(1.)
v3 = resource_variable_ops.ResourceVariable(1.)
l1 = data_structures.List([v1, v2])
l2 = l1.copy()
l2.append(v3)
self.assertEqual(list(l1), [v1, v2])
self.assertEqual(list(l2), [v1, v2, v3])
def testSlicing(self):
v1 = resource_variable_ops.ResourceVariable(1.)
v2 = resource_variable_ops.ResourceVariable(1.)
v3 = resource_variable_ops.ResourceVariable(1.)
v4 = resource_variable_ops.ResourceVariable(1.)
l = data_structures.List([v1, v2, v3, v4])
self.assertEqual(l[1:], [v2, v3, v4])
self.assertEqual(l[1:-1], [v2, v3])
self.assertEqual(l[:-1], [v1, v2, v3])
def testHash(self):
has_sequences = set([data_structures.List(),
data_structures.List()])
self.assertEqual(2, len(has_sequences))
self.assertNotIn(data_structures.List(), has_sequences)
def testIMul_zero(self):
l = data_structures.List([])
with self.assertRaisesRegexp(ValueError, "List only supports append"):
l *= 0
def testIMul(self):
v = resource_variable_ops.ResourceVariable(1.)
l = data_structures.List([v])
l *= 2
self.assertEqual(list(l), [v] * 2)
def testMul(self):
v = resource_variable_ops.ResourceVariable(1.)
l = data_structures.List([v, v, v])
self.assertEqual(list(l * 2), [v, v, v] * 2)
def testRMul(self):
v = resource_variable_ops.ResourceVariable(1.)
l = data_structures.List([v, v, v])
self.assertEqual(list(2 * l), [v, v, v] * 2)
class ListWrapperTest(test.TestCase):
IGNORED = ("__new__", "__init__", "__subclasshook__", "__getattribute__")
def test_overrides_all_list_methods(self):
not_overridden = []
for name in dir(list):
if name in ListWrapperTest.IGNORED:
continue
list_method = getattr(list, name)
if not callable(list_method):
continue
object_method = getattr(object, name, None)
if object_method is not None and object_method == list_method:
# Skip methods that aren't overridden from object.
continue
if list_method == getattr(data_structures.ListWrapper, name):
not_overridden.append(name)
if not_overridden:
self.fail("ListWrapper does not override %s" % (not_overridden))
def testPickle(self):
original = data_structures.ListWrapper([1, 2])
serialized = pickle.dumps(original)
del original
deserialized = pickle.loads(serialized)
self.assertEqual([1, 2], deserialized)
def testSameStructure(self):
l = [1]
nest.assert_same_structure(l, data_structures.ListWrapper(copy.copy(l)))
def testFunctionCaching(self):
@def_function.function
def f(list_input):
return list_input[0] + constant_op.constant(1.)
first_trace = f.get_concrete_function([constant_op.constant(2.)])
second_trace = f.get_concrete_function(
data_structures.ListWrapper([constant_op.constant(3.)]))
self.assertIs(first_trace, second_trace)
def testListWrapperBasic(self):
# ListWrapper, unlike List, compares like the built-in list type (since it
# is used to automatically replace lists).
a = tracking.AutoTrackable()
b = tracking.AutoTrackable()
self.assertEqual([a, a],
[a, a])
self.assertEqual(data_structures.ListWrapper([a, a]),
data_structures.ListWrapper([a, a]))
self.assertEqual([a, a],
data_structures.ListWrapper([a, a]))
self.assertEqual(data_structures.ListWrapper([a, a]),
[a, a])
self.assertNotEqual([a, a],
[b, a])
self.assertNotEqual(data_structures.ListWrapper([a, a]),
data_structures.ListWrapper([b, a]))
self.assertNotEqual([a, a],
data_structures.ListWrapper([b, a]))
self.assertLess([a], [a, b])
self.assertLess(data_structures.ListWrapper([a]),
data_structures.ListWrapper([a, b]))
self.assertLessEqual([a], [a, b])
self.assertLessEqual(data_structures.ListWrapper([a]),
data_structures.ListWrapper([a, b]))
self.assertGreater([a, b], [a])
self.assertGreater(data_structures.ListWrapper([a, b]),
data_structures.ListWrapper([a]))
self.assertGreaterEqual([a, b], [a])
self.assertGreaterEqual(data_structures.ListWrapper([a, b]),
data_structures.ListWrapper([a]))
self.assertEqual([a], data_structures.ListWrapper([a]))
self.assertEqual([a], list(data_structures.List([a])))
self.assertEqual([a, a], data_structures.ListWrapper([a]) + [a])
self.assertEqual([a, a], [a] + data_structures.ListWrapper([a]))
self.assertIsInstance(data_structures.ListWrapper([a]), list)
self.assertEqual(
tensor_shape.TensorShape([None, 2]).as_list(),
(data_structures.ListWrapper([None])
+ tensor_shape.TensorShape([2])).as_list())
def testAcceptsNonTrackableContent(self):
l = data_structures.ListWrapper([1, 2, 3])
self.assertEqual(l, [1, 2, 3])
def testWrapperChangesList(self):
l = []
l_wrapper = data_structures.ListWrapper(l)
l_wrapper.append(1)
self.assertEqual([1], l)
def testListChangesWrapper(self):
l = []
l_wrapper = data_structures.ListWrapper(l)
l.append(1)
self.assertEqual([1], l_wrapper)
def testLayerCollectionWithExternalMutation(self):
l = []
l_wrapper = data_structures.ListWrapper(l)
layer = core.Dense(1)
l.append(layer)
self.assertEqual([layer], l_wrapper.layers)
def testNotHashable(self):
with self.assertRaises(TypeError):
hash(data_structures.ListWrapper())
def testDelItem(self):
l = data_structures.ListWrapper([1, 2, 3, 4])
del l[0]
self.assertEqual(l, [2, 3, 4])
self.assertUnableToSave(l, "Unable to save .*__delitem__")
def testDelSlice(self):
l = data_structures.ListWrapper([1, 2, 3, 4])
del l[2:3]
self.assertEqual(l, [1, 2, 4])
self.assertUnableToSave(l, "Unable to save .*__delslice__")
def testSetSlice_canSaveForNonTrackableItems(self):
l = data_structures.ListWrapper([1, 2, 3, 4])
l[:] = 2, 8, 9, 0
self.assertEqual(l, [2, 8, 9, 0])
l._maybe_initialize_trackable() # pylint: disable=protected-access
self.assertEqual(len(l._checkpoint_dependencies), 0) # pylint: disable=protected-access
def testSetSlice_cannotSaveIfTrackableModified(self):
v1 = resource_variable_ops.ResourceVariable(1.)
v2 = resource_variable_ops.ResourceVariable(1.)
l = data_structures.ListWrapper([1, 2, v1, v2])
l[:] = 2, 8, 9, v2
self.assertEqual(l, [2, 8, 9, v2])
self.assertUnableToSave(l, "Unable to save .*__setslice__")
def testSetSlice_truncate(self):
l = data_structures.ListWrapper([1, 2, 3, 4])
l[:] = []
self.assertEqual(l, [])
def testSetSlice_extend(self):
l = data_structures.ListWrapper([1, 2, 3, 4])
l[2:] = 1, 2, 3, 4
self.assertEqual(l, [1, 2, 1, 2, 3, 4])
def testIMulNegative(self):
l = data_structures.ListWrapper([1, 2, 3, 4])
l *= -1
self.assertEqual(l, [1, 2, 3, 4] * -1)
self.assertUnableToSave(l, "Unable to save")
def testIMulPositive(self):
v = variables.Variable(1.)
l = data_structures.ListWrapper([1, 2, 3, 4, v])
self.assertEqual([("4", v)], l._checkpoint_dependencies)
root = util.Checkpoint(l=l)
prefix = os.path.join(self.get_temp_dir(), "ckpt")
path = root.save(prefix)
v.assign(5.)
l *= 2
self.assertEqual(l, [1, 2, 3, 4, v, 1, 2, 3, 4, v])
self.assertEqual([("4", v), ("9", v)], l._checkpoint_dependencies)
root.restore(path)
self.assertAllClose(1., v.numpy())
def testSort(self):
l = data_structures.ListWrapper([1, 2, 3, 4])
l.sort()
self.assertEqual(l, [1, 2, 3, 4])
# Regardless of being a no-op for the input list, we still refuse to save.
# This is intentional since otherwise we would end up with a hard to debug
# case for users (e.g. sometimes sort on a ListWrapper is trackable and
# other times it is not).
self.assertUnableToSave(l, "Unable to save .*sort")
def assertUnableToSave(self, l, msg):
l._maybe_initialize_trackable() # pylint: disable=protected-access
with self.assertRaisesRegexp(ValueError, msg):
return l._checkpoint_dependencies # pylint: disable=protected-access
class HasMapping(training.Model):
def __init__(self):
super(HasMapping, self).__init__()
self.layer_dict = data_structures.Mapping(output=core.Dense(7))
self.layer_dict["norm"] = data_structures.List()
self.layer_dict["dense"] = data_structures.List()
self.layer_dict["dense"].extend(
[core.Dense(5),
core.Dense(6, kernel_regularizer=math_ops.reduce_sum)])
self.layer_dict["norm"].append(
normalization.BatchNormalization())
self.layer_dict["norm"].append(
normalization.BatchNormalization())
def call(self, x):
aggregation = 0.
for norm, dense in zip(self.layer_dict["norm"], self.layer_dict["dense"]):
x = norm(dense(x))
aggregation += math_ops.reduce_sum(x)
return self.layer_dict["output"](x) / aggregation
class MappingTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testTracking(self):
model = HasMapping()
output = model(array_ops.ones([32, 2]))
self.assertAllEqual([32, 7], output.shape.as_list())
self.assertEqual(5, len(model.layers))
six.assertCountEqual(self, model.layers, model.layer_dict.layers)
self.assertEqual(1, len(model._checkpoint_dependencies))
self.assertIs(model.layer_dict, model._checkpoint_dependencies[0].ref)
self.evaluate([v.initializer for v in model.variables])
test_var = model.layer_dict["output"].kernel
self.evaluate(test_var.assign(array_ops.ones([6, 7])))
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
self.evaluate(test_var.assign(array_ops.zeros([6, 7])))
model.load_weights(save_path)
self.assertAllEqual(numpy.ones([6, 7]),
self.evaluate(test_var))
def testJSONSerialization(self):
obj = tracking.AutoTrackable()
obj.d = {"a": 2}
json.dumps(obj.d, default=serialization.get_json_type)
def testNoOverwrite(self):
mapping = data_structures.Mapping()
original = data_structures.List()
mapping["a"] = original
with self.assertRaises(ValueError):
mapping["a"] = data_structures.List()
self.assertIs(original, mapping["a"])
with self.assertRaises(AttributeError):
del mapping["a"]
mapping.update(b=data_structures.Mapping())
with self.assertRaises(ValueError):
mapping.update({"b": data_structures.Mapping()})
def testNonStringKeys(self):
mapping = data_structures.Mapping()
with self.assertRaises(TypeError):
mapping[1] = data_structures.List()
def testLayerCollectionWithExternalMutation(self):
d = {}
root = tracking.AutoTrackable()
root.wrapper = d
self.assertEqual([], root.wrapper.layers)
self.assertEqual([], root.wrapper.trainable_weights)
layer1 = core.Dense(1)
layer2 = core.Dense(1)
d["a"] = layer1
d["b"] = layer2
self.assertEqual([layer1, layer2], root.wrapper.layers)
# The layers have still not created variables
self.assertEqual([], root.wrapper.trainable_weights)
def testHashing(self):
has_mappings = set([data_structures.Mapping(),
data_structures.Mapping()])
self.assertEqual(2, len(has_mappings))
self.assertNotIn(data_structures.Mapping(), has_mappings)
# In contrast to Mapping, dict wrappers are not hashable
a = tracking.AutoTrackable()
a.d = {}
self.assertEqual({}, a.d)
self.assertFalse({} != a.d) # pylint: disable=g-explicit-bool-comparison
self.assertNotEqual({1: 2}, a.d)
with self.assertRaisesRegexp(TypeError, "unhashable"):
set([a.d])
def testDictWrapperBadKeys(self):
a = tracking.AutoTrackable()
a.d = {}
a.d[1] = data_structures.List()
model = training.Model()
model.sub = a
save_path = os.path.join(self.get_temp_dir(), "ckpt")
with self.assertRaisesRegexp(ValueError, "non-string key"):
model.save_weights(save_path)
def testDictWrapperNoDependency(self):
a = tracking.AutoTrackable()
a.d = data_structures.NoDependency({})
a.d[1] = [3]
self.assertEqual([a], util.list_objects(a))
model = training.Model()
model.sub = a
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
model.load_weights(save_path)
def testNonStringKeyNotTrackableValue(self):
a = tracking.AutoTrackable()
a.d = {}
a.d["a"] = [3]
a.d[1] = data_structures.NoDependency([3])
self.assertEqual([a, a.d, a.d["a"]], util.list_objects(a))
model = training.Model()
model.sub = a
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
model.load_weights(save_path)
def testNonAppendNotTrackable(self):
# Non-append mutations (deleting or overwriting values) are OK when the
# values aren't tracked.
a = tracking.AutoTrackable()
a.d = {}
a.d["a"] = [3]
a.d[1] = 3
a.d[1] = 2
self.assertEqual(2, a.d[1])
del a.d[1]
a.d[2] = data_structures.NoDependency(tracking.AutoTrackable())
second = tracking.AutoTrackable()
a.d[2] = data_structures.NoDependency(second)
self.assertIs(second, a.d[2])
self.assertEqual([a, a.d, a.d["a"]], util.list_objects(a))
model = training.Model()
model.sub = a
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
model.load_weights(save_path)
def testPopNoSave(self):
model = training.Model()
model.d = {}
model.d["a"] = []
model.d.pop("a")
save_path = os.path.join(self.get_temp_dir(), "ckpt")
with self.assertRaisesRegexp(ValueError, "Unable to save"):
model.save_weights(save_path)
def testExternalModificationNoSave(self):
model = training.Model()
external_reference = {}
model.d = external_reference
external_reference["a"] = []
save_path = os.path.join(self.get_temp_dir(), "ckpt")
with self.assertRaisesRegexp(ValueError, "modified outside the wrapper"):
model.save_weights(save_path)
def testOverwriteCanStillSave(self):
model = training.Model()
model.d = {}
model.d["a"] = {}
model.d["a"] = {}
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
def testIter(self):
model = training.Model()
model.d = {1: 3}
model.d[1] = 3
self.assertEqual([1], list(model.d))
new_dict = {}
# This update() is super tricky. If the dict wrapper subclasses dict,
# CPython will access its storage directly instead of calling any
# methods/properties on the object. So the options are either not to
# subclass dict (in which case update will call normal iter methods, but the
# object won't pass isinstance checks) or to subclass dict and keep that
# storage updated (no shadowing all its methods like ListWrapper).
new_dict.update(model.d)
self.assertEqual({1: 3}, new_dict)
def testListShallowCopy(self):
root = tracking.AutoTrackable()
orig_list = [[1.]]
root.a = orig_list
copied = copy.copy(root.a)
self.assertAllEqual([[1.]], copied)
self.assertIsNot(root.a, copied)
self.assertIs(root.a[0], copied[0])
# Dirtiness should be inherited
util.list_objects(root.a)
orig_list.append(1.)
with self.assertRaises(ValueError):
util.list_objects(root.a)
with self.assertRaises(ValueError):
util.list_objects(copy.copy(root.a))
def testListDeepCopy(self):
root = tracking.AutoTrackable()
orig_list = [[1.]]
root.a = orig_list
copied = copy.deepcopy(root.a)
self.assertAllEqual([[1.]], copied)
self.assertIsNot(root.a, copied)
self.assertIsNot(root.a[0], copied[0])
# Dirtiness should be inherited
util.list_objects(root.a)
orig_list.append(1.)
with self.assertRaises(ValueError):
util.list_objects(root.a)
with self.assertRaises(ValueError):
util.list_objects(copy.deepcopy(root.a))
def testDictShallowCopy(self):
root = tracking.AutoTrackable()
orig_dict = {"a": [1.]}
root.a = orig_dict
copied = copy.copy(root.a)
self.assertAllEqual([1.], copied["a"])
self.assertIsNot(root.a, copied)
self.assertIs(root.a["a"], copied["a"])
copied = root.a.copy()
self.assertAllEqual([1.], copied["a"])
self.assertIsNot(root.a, copied)
self.assertIs(root.a["a"], copied["a"])
# Dirtiness should be inherited
util.list_objects(root.a)
orig_dict["b"] = []
with self.assertRaises(ValueError):
util.list_objects(root.a)
with self.assertRaises(ValueError):
util.list_objects(copy.copy(root.a))
def testDictDeepCopy(self):
root = tracking.AutoTrackable()
orig_dict = {"a": [1.]}
root.a = orig_dict
copied = copy.deepcopy(root.a)
self.assertAllEqual([1.], copied["a"])
self.assertIsNot(root.a, copied)
self.assertIsNot(root.a["a"], copied["a"])
# Dirtiness should be inherited
util.list_objects(root.a)
orig_dict["b"] = []
with self.assertRaises(ValueError):
util.list_objects(root.a)
with self.assertRaises(ValueError):
util.list_objects(copy.deepcopy(root.a))
def testShallowCopyTrackable(self):
original = tracking.AutoTrackable()
original_sub = tracking.AutoTrackable()
original.a = [[1.]]
original.b = {"a": original_sub}
shallow_copied = copy.copy(original)
self.assertIs(original_sub, shallow_copied.b["a"])
self.assertIsNot(original, shallow_copied)
self.assertEqual([[1.]], shallow_copied.a)
shallow_deps = util.list_objects(shallow_copied)
self.assertIn(shallow_copied.a, shallow_deps)
self.assertIn(shallow_copied.b, shallow_deps)
self.assertIn(shallow_copied.b["a"], shallow_deps)
def testDeepCopyTrackable(self):
original = tracking.AutoTrackable()
original_sub = tracking.AutoTrackable()
original.a = [[1.]]
original.b = {"a": original_sub}
self.assertIsInstance(original.b, dict)
deep_copied = copy.deepcopy(original)
self.assertIsInstance(deep_copied.b, dict)
self.assertIsNot(original, deep_copied)
self.assertIsNot(original_sub, deep_copied.b["a"])
self.assertEqual([[1.]], deep_copied.a)
self.assertIsInstance(deep_copied.b["a"], tracking.AutoTrackable)
deps = util.list_objects(deep_copied)
self.assertIn(deep_copied.a, deps)
self.assertIn(deep_copied.b, deps)
self.assertIn(deep_copied.b["a"], deps)
self.assertNotIn(original_sub, deps)
def testConstructableFromSequence(self):
result = data_structures._DictWrapper([(1, 2), (3, 4)])
self.assertIsInstance(result, dict)
self.assertEqual({1: 2, 3: 4}, result)
def testPickle(self):
original = data_structures._DictWrapper(dict(a=1, b=2))
serialized = pickle.dumps(original)
del original
deserialized = pickle.loads(serialized)
self.assertEqual(dict(a=1, b=2), deserialized)
def testListAddOrder(self):
self.assertEqual([1., 2.],
data_structures.ListWrapper([1.])
+ data_structures.ListWrapper([2.]))
self.assertEqual([1., 2.],
data_structures.ListWrapper([1.])
+ [2.])
self.assertEqual([1., 2.],
[1.]
+ data_structures.ListWrapper([2.]))
def testSameStructure(self):
d = {1: "a"}
nest.assert_same_structure(d, data_structures._DictWrapper(d.copy()))
def testFunctionCaching(self):
@def_function.function
def f(dict_input):
return dict_input["x"] + constant_op.constant(1.)
first_trace = f.get_concrete_function({"x": constant_op.constant(2.)})
second_trace = f.get_concrete_function(
data_structures._DictWrapper({"x": constant_op.constant(3.)}))
self.assertIs(first_trace, second_trace)
class HasTuple(training.Model):
def __init__(self):
super(HasTuple, self).__init__()
self.layer_list = (
core.Dense(3), core.Dense(4),
core.Dense(5, kernel_regularizer=math_ops.reduce_sum))
self.layers_with_updates = (normalization.BatchNormalization(),)
def call(self, x):
aggregation = 0.
for l in self.layer_list:
x = l(x)
aggregation += math_ops.reduce_sum(x)
bn, = self.layers_with_updates
return bn(x) / aggregation
class TupleTests(test.TestCase, parameterized.TestCase):
@test_util.run_in_graph_and_eager_modes
def testTracking(self):
model = HasTuple()
output = model(array_ops.ones([32, 2]))
self.assertAllEqual([32, 5], output.shape.as_list())
self.assertLen(model.layers, 4)
self.assertLen(model.layer_list.layers, 3)
six.assertCountEqual(
self,
model.layers,
tuple(model.layer_list.layers) + model.layers_with_updates)
self.assertEqual(3, model.layer_list.layers[0].units)
self.assertEqual(4, model.layer_list.layers[1].units)
self.assertEqual(5, model.layer_list.layers[2].units)
self.assertLen(model._checkpoint_dependencies, 2)
self.assertIs(model.layer_list, model._checkpoint_dependencies[0].ref)
self.assertIs(model.layers_with_updates,
model._checkpoint_dependencies[1].ref)
self.assertLen(
model._checkpoint_dependencies[0].ref._checkpoint_dependencies, 3)
self.evaluate([v.initializer for v in model.variables])
self.evaluate(model.variables[0].assign([[1., 2., 3.], [4., 5., 6.]]))
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
self.evaluate(model.variables[0].assign(array_ops.zeros([2, 3])))
model.load_weights(save_path)
self.assertAllEqual([[1., 2., 3.], [4., 5., 6.]],
self.evaluate(model.variables[0]))
v = variables.Variable(1.)
model.var_list = (v,)
self.assertIn(id(v), [id(obj) for obj in model.variables])
self.assertIn(id(v), [id(obj) for obj in model.trainable_variables])
self.assertNotIn(id(v), [id(obj) for obj in model.non_trainable_variables])
self.assertIn(id(model.layer_list[0].trainable_weights[0]),
[id(obj) for obj in model.trainable_weights])
@parameterized.named_parameters(
("Module", module.Module),
("Model", training.Model),
)
def testSubModelTracking(self, module_subclass):
model = module_subclass()
model.v = variables.Variable(1.)
self.assertIn(model.v, model.trainable_variables)
model2 = module_subclass()
model2.m = (model,)
self.assertIn(model.v, model2.trainable_variables)
def testSubSequentialTracking(self):
class _Subclassed(training.Model):
def __init__(self, wrapped):
super(_Subclassed, self).__init__()
self._wrapped = wrapped
def call(self, x):
return self._wrapped(x)
model = sequential.Sequential()
layer = core.Dense(1)
model.add(layer)
model2 = _Subclassed(model)
model2(array_ops.ones([1, 2]))
model2.m = (model,)
self.assertIn(layer.kernel, model2.trainable_weights)
def testJSONSerialization(self):
obj = tracking.AutoTrackable()
obj.l = (1,)
json.dumps(obj.l, default=serialization.get_json_type)
def testUpdatesForwarded(self):
with ops.Graph().as_default():
model = HasTuple()
model_input = array_ops.ones([32, 2])
model(model_input)
self.assertNotEmpty(model.layers_with_updates[0].updates)
self.assertEqual(set(model.layers_with_updates[0].updates),
set(model.updates))
model = HasTuple()
model_input = array_ops.ones([32, 2])
model(model_input)
self.assertEmpty(model.updates)
@test_util.run_in_graph_and_eager_modes
def testLossesForwarded(self):
model = HasTuple()
model_input = array_ops.ones([32, 2])
model(model_input)
self.assertLen(model.losses, 1)
def testModelContainersCompareEqual(self):
class HasEqualContainers(training.Model):
def __init__(self):
super(HasEqualContainers, self).__init__()
self.l1 = ()
self.l2 = ()
model = HasEqualContainers()
first_layer = HasEqualContainers()
model.l1 = (first_layer,)
second_layer = HasEqualContainers()
model.l2 = (second_layer,)
self.assertEqual((first_layer,), model.l1)
d = {model.l1: 1, model.l2: 2}
self.assertEqual(1, d[model.l1])
self.assertEqual(1, d[(first_layer,)])
self.assertEqual(2, d[model.l2])
self.assertEqual(2, d[(second_layer,)])
self.assertEqual([first_layer, second_layer], model.layers)
@test_util.run_in_graph_and_eager_modes
def testTensorConversion(self):
class TupleToTensor(training.Model):
def __init__(self):
super(TupleToTensor, self).__init__()
self.l = (1., 2., 3.)
self.assertAllEqual(
(1., 2., 3.),
self.evaluate(constant_op.constant(TupleToTensor().l)))
self.assertAllEqual(
(1., 2., 3.),
self.evaluate(array_ops.pack(TupleToTensor().l)))
def testNonLayerVariables(self):
v = resource_variable_ops.ResourceVariable([1.])
l = data_structures._TupleWrapper((v,))
self.assertEqual([], l.layers)
self.assertEqual([v], l.variables)
self.assertEqual([v], l.trainable_weights)
self.assertEqual([], l.non_trainable_variables)
def testCopy(self):
v1 = resource_variable_ops.ResourceVariable(1.)
v2 = resource_variable_ops.ResourceVariable(1.)
l1 = data_structures._TupleWrapper((v1, v2))
l2 = copy.copy(l1)
self.assertEqual(l1, (v1, v2))
self.assertEqual(l2, (v1, v2))
self.assertIs(l1[0], l2[0])
l2_deep = copy.deepcopy(l1)
self.assertIsNot(l1[0], l2_deep[0])
with self.assertRaises(AttributeError):
l2.append(v1)
def testSlicing(self):
v1 = resource_variable_ops.ResourceVariable(1.)
v2 = resource_variable_ops.ResourceVariable(1.)
v3 = resource_variable_ops.ResourceVariable(1.)
v4 = resource_variable_ops.ResourceVariable(1.)
l = data_structures._TupleWrapper((v1, v2, v3, v4))
self.assertEqual(l[1:], (v2, v3, v4))
self.assertEqual(l[1:-1], (v2, v3))
self.assertEqual(l[:-1], (v1, v2, v3))
def testHash(self):
has_sequences = set([data_structures._TupleWrapper(),
data_structures._TupleWrapper()])
self.assertLen(has_sequences, 1)
self.assertIn(data_structures._TupleWrapper(), has_sequences)
def testIMul_zero(self):
l = data_structures._TupleWrapper((1,))
l *= 0
self.assertEqual((), l)
def testIMul(self):
# Note: tuple behavior differs from list behavior. Lists are mutated by
# imul/iadd, tuples assign a new object to the left hand side of the
# expression.
v = resource_variable_ops.ResourceVariable(1.)
l = data_structures._TupleWrapper((v,))
original = l
l *= 2
self.assertEqual(l, (v,) * 2)
self.assertNotEqual(original, (v,) * 2)
def testIAdd(self):
v = resource_variable_ops.ResourceVariable(1.)
l = data_structures._TupleWrapper((v,))
original = l
l += (1,)
self.assertEqual(l, (v, 1))
self.assertNotEqual(original, (v, 1))
self.assertEqual(original, (v,))
def testMul(self):
v = resource_variable_ops.ResourceVariable(1.)
l = data_structures._TupleWrapper((v, v, v))
self.assertEqual(l * 2, (v, v, v) * 2)
def testRMul(self):
v = resource_variable_ops.ResourceVariable(1.)
l = data_structures._TupleWrapper((v, v, v))
self.assertEqual(2 * l, (v, v, v) * 2)
def testPickle(self):
original = data_structures._TupleWrapper((1, 2))
serialized = pickle.dumps(original)
del original
deserialized = pickle.loads(serialized)
self.assertEqual((1, 2), deserialized)
def testNamedTuple(self):
named = collections.namedtuple("Named", ("x", "y"))
v = variables.Variable(2)
nt = named(x=v, y=2)
m = module.Module()
m.nt = nt
self.assertIs(v, m.nt.x)
self.assertIs(v, m.nt[0])
self.assertIs(
v, m._checkpoint_dependencies[0].ref._checkpoint_dependencies[0].ref)
self.assertEqual(2, m.nt.y)
def testNamedSubclassing(self):
named = collections.namedtuple("Named", ("x", "y"))
v = variables.Variable(2)
class NamedSubclass(named):
def __new__(cls, x, y):
del y # unused
return super(NamedSubclass, cls).__new__(cls, x, 3)
@property
def summed(self):
return self.x + self.y
nt = NamedSubclass(x=v, y=2)
m = module.Module()
m.nt = nt
self.assertEqual(3, m.nt.y)
self.assertIs(v, m.nt.x)
self.assertIs(
v, m._checkpoint_dependencies[0].ref._checkpoint_dependencies[0].ref)
self.assertEqual("x", m.nt._checkpoint_dependencies[0].name)
self.assertEqual("0", m.nt._checkpoint_dependencies[1].name)
self.assertEqual(5, self.evaluate(m.nt.summed))
def testUnnamedSubclassing(self):
v = variables.Variable(2)
class UnnamedSubclass(tuple):
@property
def summed(self):
return self[0] + self[1]
unt = UnnamedSubclass([v, 2])
m = module.Module()
m.unt = unt
self.assertEqual("0", m.unt._checkpoint_dependencies[0].name)
self.assertLen(m.unt._checkpoint_dependencies, 1)
self.assertEqual(4, self.evaluate(m.unt.summed))
nest.assert_same_structure(
[m.unt], nest.map_structure(lambda x: x, [m.unt]))
def testNamedtupleSubclassWithCustomNew(self):
class SubclassWithDifferentArgs(collections.namedtuple("A", ["x"])):
def __new__(cls):
return super(SubclassWithDifferentArgs, cls).__new__(cls, [])
nt = SubclassWithDifferentArgs()
m = module.Module()
m.nt = nt
m.nt.x.append(variables.Variable(1.))
prefix = os.path.join(self.get_temp_dir(), "ckpt")
ckpt = util.Checkpoint(m=m)
with self.assertRaises(ValueError):
ckpt.save(prefix)
def testSameStructure(self):
t = (variables.Variable(1.),)
m = module.Module()
m.t = t
nest.assert_same_structure(t, m.t)
nest.assert_same_structure(m.t, t)
nt_type = collections.namedtuple("nt", ["x", "y"])
nt = nt_type(x=1, y=2)
m.nt = nt
nest.assert_same_structure(m.nt, nt)
with self.assertRaises(TypeError): # pylint: disable=g-error-prone-assert-raises
nest.assert_same_structure(m.nt, m.t)
def testFlatten(self):
t = data_structures._TupleWrapper((1, data_structures._TupleWrapper((2,))))
self.assertEqual([1, 2], nest.flatten(t))
self.assertEqual(
nest.flatten_with_tuple_paths((1, (2,))),
nest.flatten_with_tuple_paths(t))
self.assertEqual((3, (4,)),
nest.pack_sequence_as(t, [3, 4]))
nt_type = collections.namedtuple("nt", ["x", "y"])
nt = nt_type(1., 2.)
wrapped_nt = data_structures._TupleWrapper(nt)
self.assertEqual(
nest.flatten_with_tuple_paths(nt),
nest.flatten_with_tuple_paths(wrapped_nt))
self.assertEqual((3, 4,),
nest.pack_sequence_as(wrapped_nt, [3, 4]))
self.assertEqual(3, nest.pack_sequence_as(wrapped_nt, [3, 4]).x)
def testFunctionCaching(self):
@def_function.function
def f(tuple_input):
return tuple_input[0] + constant_op.constant(1.)
first_trace = f.get_concrete_function((constant_op.constant(2.),))
second_trace = f.get_concrete_function(
data_structures._TupleWrapper((constant_op.constant(3.),)))
self.assertIs(first_trace, second_trace)
def testPythonMapImpl(self):
t = data_structures._TupleWrapper((1, data_structures._TupleWrapper((2,))))
self.assertEqual(
(4, (5,)),
nest.map_structure_up_to((None, (None,)), lambda x: x + 3, t,
check_types=True))
nest.assert_shallow_structure((None, None), t)
def testDatasetMap(self):
dataset = dataset_ops.Dataset.from_tensor_slices(
constant_op.constant([1, 2, 3]))
dataset = dataset.map(lambda x: data_structures._TupleWrapper((x,)))
for index, element in enumerate(dataset):
self.assertEqual((index + 1,), self.evaluate(element))
def testDatasetMapNamed(self):
nt_type = collections.namedtuple("A", ["x"])
dataset = dataset_ops.Dataset.from_tensor_slices(
constant_op.constant([1, 2, 3]))
dataset = dataset.map(lambda x: data_structures._TupleWrapper(nt_type(x)))
for index, element in enumerate(dataset):
self.assertEqual((index + 1,), self.evaluate(element))
def testLoopAssignedModule(self):
m = module.Module()
m.s = (m,)
self.assertLen(m._checkpoint_dependencies, 1)
self.assertIs(m.s, m._checkpoint_dependencies[0].ref)
self.assertIs("s", m._checkpoint_dependencies[0].name)
self.assertEqual((), m.trainable_variables)
if __name__ == "__main__":
test.main()
| |
from TASSELpy.net.maizegenetics.taxa.distance.DistanceMatrix import DistanceMatrix
from TASSELpy.net.maizegenetics.util.ProgressListener import ProgressListener
from TASSELpy.net.maizegenetics.util.BitSet import BitSet
from TASSELpy.java.lang.Boolean import metaBoolean
from TASSELpy.java.lang.Integer import metaInteger
from TASSELpy.net.maizegenetics.dna.snp.GenotypeTable import GenotypeTable
from TASSELpy.utils.Overloading import javaOverload,javaConstructorOverload,javaStaticOverload
from TASSELpy.utils.primativeArray import javaPrimativeArray, meta_long_array
from TASSELpy.utils.helper import make_sig
import numpy as np
java_imports = {'BitSet':'net/maizegenetics/util/BitSet',
'GenotypeTable':'net/maizegenetics/dna/snp/GenotypeTable',
'IBSDistanceMatrix':'net/maizegenetics/analysis/distance/IBSDistanceMatrix',
'ProgressListener':'net/maizegenetics/util/ProgressListener',
'String':'java/lang/String'}
class IBSDistanceMatrix(DistanceMatrix):
"""
This class calculates an identity by state matrix. It is scaled so only
non-missing comparison are used. It conducts bit level calculations of IBS for genotypes.
Only the two most common alleles are used in the distance calculations.
Please note that when heterozygous genotypes are used, Het to Het distance is 0.5 NOT 0.0. The default
along the identity diagonal is 0 (isTrueIBS = false), but changing isTrueIBS = true will calculate
the identity.
The distance estimates become wildly inaccurate when too few sites are used to calculate
distance. The minSiteComp parameter can be used to control the minimum number of sites
used for a calculation. If there are insufficient sites in the estimate, then Double.NaN
is returned.
"""
_java_name = java_imports['IBSDistanceMatrix']
@javaConstructorOverload(java_imports['IBSDistanceMatrix'],
(make_sig([java_imports['GenotypeTable']],'void'),(GenotypeTable,)),
(make_sig([java_imports['GenotypeTable'],java_imports['ProgressListener']],
'void'),(GenotypeTable,ProgressListener)),
(make_sig([java_imports['GenotypeTable'],'int',
java_imports['ProgressListener']],
'void'),(GenotypeTable,metaInteger,ProgressListener)),
(make_sig([java_imports['GenotypeTable'],'int','boolean',
java_imports['ProgressListener']],
'void'),(GenotypeTable,metaInteger,metaBoolean,ProgressListener)))
def __init__(self,*args,**kwargs):
"""
Compute observed distances for all taxa. Missing sites are ignored
Signatures:
IBSDistanceMatrix(GenotypeTable theAlignment)
IBSDistanceMatrix(GenotypeTable theAlignment, ProgressListener listener)
IBSDistanceMatrix(GenotypeTable theAlignment, int minSiteComp, ProgressListener listener)
IBSDistanceMatrix(GenotypeTable theAlignment, int minSiteComp, boolean trueIBS,
ProgressListener listener)
Arguments:
IBSDistanceMatrix(GenotypeTable theAlignment)
theAlignment -- Alignment used to compute distances
IBSDistanceMatrix(GenotypeTable theAlignment, ProgressListener listener)
theAlignment -- Alignment used to compute distances
listener -- Listener to track progress in calculations
IBSDistanceMatrix(GenotypeTable theAlignment, int minSiteComp, ProgressListener listener)
theAlignment -- Alignment used to compute distances
minSiteComp -- Minimum number of sites needed to estimate distance
listener -- Listener to track progress in calculations
IBSDistanceMatrix(GenotypeTable theAlignment, int minSiteComp, boolean trueIBS,
ProgressListener listener)
theAlignment -- Alignment used to compute distances
minSiteComp -- Minimum number of sites needed to estimate distance
trueIBS -- Estimate diagonal distance based IBS (default = false, i=i=0.0)
listener -- Listener to track progress in calculations
"""
pass
## Compute distance for a pair of taxa
# @param theTBA input alignment
# @param taxon1 index of taxon1
# @param taxon2 index of taxon2
# @return Array of {distance, number of sites used in comparison}
@javaStaticOverload(java_imports['IBSDistanceMatrix'],"computeHetBitDistances",
(make_sig([java_imports['GenotypeTable'],'int','int'],'double[]'), # First
(GenotypeTable,metaInteger,metaInteger),
lambda x: javaPrimativeArray.make_array_from_obj('double',x)),
(make_sig([java_imports['GenotypeTable'],'int','int','int','boolean'], # Second
'double[]'),
(GenotypeTable,metaInteger,metaInteger,metaInteger,metaBoolean),
lambda x: javaPrimativeArray.make_array_from_obj('double',x)),
(make_sig([java_imports['GenotypeTable'],'int','int','int','int','int', # Third
java_imports['BitSet']], 'double[]'),
(GenotypeTable,metaInteger,metaInteger,metaInteger,metaInteger,metaInteger,
BitSet),
lambda x: javaPrimativeArray.make_array_from_obj('double',x)),
(make_sig(['long[]','long[]','long[]','long[]','int'], 'double[]'), # Fourth
(meta_long_array,
meta_long_array,
meta_long_array,
meta_long_array,metaInteger),
lambda x: javaPrimativeArray.make_array_from_obj('double',x)),
(make_sig(['long[]','long[]','long[]','long[]','int','int','int'], # Fifth
'double[]'),
(meta_long_array,
meta_long_array,
meta_long_array,
meta_long_array,metaInteger,
metaInteger,metaInteger),
lambda x: javaPrimativeArray.make_array_from_obj('double',x)))
def computeHetBitDistances(*args):
"""
Compute distance for a pair of taxa
Signatures:
static double[] computeHetBitDistances(GenotypeTable theTBA, int taxon1, int taxon2)
static double[] computeHetBitDistances(GenotypeTable theTBA, int taxon1, int taxon2,
int minSitesCompared, boolean isTrueIBS)
static double[] computeHetBitDistances(GenotypeTable theTBA, int taxon1, int taxon2,
int minSitesCompared, int firstWord, int lastWord, BitSet maskBadSet)
static double[] computeHetBitDistances(long[] iMj, long[] iMn, long[] jMj, long[] jMn,
int minSitesCompared)
static double[] computeHetBitDistances(long[] iMj, long[] iMn, long[] jMj, long[] jMn,
int minSitesCompared, int firstWord, int lastWord)
Arguments:
static double[] computeHetBitDistances(GenotypeTable theTBA, int taxon1, int taxon2)
theTBA -- input alignment
taxon1 -- index of taxon 1
taxon2 -- index of taxon 2
static double[] computeHetBitDistances(GenotypeTable theTBA, int taxon1, int taxon2,
int minSitesCompared, boolean isTrueIBS)
theTBA -- input alignment
taxon1 -- index of taxon 1
taxon2 -- index of taxon 2
minSitesCompared -- Minimum number of sites needed to estimate distance
isTrueIBS -- estimate diagonal distance based IBS (default = False, i=i=0.0)
static double[] computeHetBitDistances(GenotypeTable theTBA, int taxon1, int taxon2,
int minSitesCompared, int firstWord, int lastWord, BitSet maskBadSet)
theTBA -- input alignment
taxon1 -- index of taxon 1
taxon2 -- index of taxon 2
minSitesCompared -- Minimum number of sites needed to estimate distance
firstWord -- Starting word for calculating distance site=(firstWord*64)
lastWord -- Ending word for calculating distance inclusive site=(lastWord*64+63)
maskBadSet -- Optional mask for sites (those set to 1 are kept)
static double[] computeHetBitDistances(long[] iMj, long[] iMn, long[] jMj, long[] jMn,
int minSitesCompared)
iMj -- Vector of major alleles for taxon i
iMn -- Vector of minor alleles for taxon i
jMj -- Vector of major alleles for taxon j
jMn -- Vector of minor alleles for taxon j
minSitesCompared -- Minimum number of sites needed to estimate distance
static double[] computeHetBitDistances(long[] iMj, long[] iMn, long[] jMj, long[] jMn,
int minSitesCompared, int firstWord, int lastWord)
iMj -- Vector of major alleles for taxon i
iMn -- Vector of minor alleles for taxon i
jMj -- Vector of major alleles for taxon j
jMn -- Vector of minor alleles for taxon j
minSitesCompared -- Minimum number of sites needed to estimate distance
firstWord -- Starting word for calculating distance site=(firstWord*64)
lastWord -- Ending word for calculating distance inclusive site=(lastWord*64+63)
Returns:
Array of {distance, number of sites used in comparison}
"""
pass
## Gets the average number of sites used in calculating the distance matrix
# @return Average number of sites used in calculating the distance matrix
@javaOverload("getAverageTotalSites",
(make_sig([],'double'),(),np.float64))
def getAverageTotalSites(self, *args):
"""
Gets average number of sites used in calculating the distance matrix
Signatures:
double getAverageTotalSits()
Returns:
Average number of sites used in calculating the distance matrix
"""
pass
## Gets string representation of this matrix with 'd' displayed digits
# @param d The number of digits to display
# @return String representation of this matrix
@javaOverload("toString",
(make_sig([],java_imports['String']),(),None),
(make_sig(['int'],java_imports['String']),(metaInteger,),None))
def toString(self, *args):
"""
Gets string representation of this matrix with 'd' displayed digits
Signatures:
String toString()
String toString(int d)
Arguments:
String toString(int d)
d -- The number of digits to display
Returns:
String representation of this matrix
"""
pass
## Returns whether true IBS is calculated for the diagonal
# @return Whether true IBS is calculated for the diagonal
@javaOverload("isTrueIBS",
(make_sig([],'boolean'),(),None))
def isTrueIBS(self, *args):
"""
Returns whether true IBS is calculated for the diagonal
Signatures:
boolean IsTrueIBS()
Returns:
Whether true IBS is calculated for the diagonal
"""
pass
| |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class DependentHostedNumberOrderList(ListResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, signing_document_sid):
"""
Initialize the DependentHostedNumberOrderList
:param Version version: Version that contains the resource
:param signing_document_sid: LOA document sid.
:returns: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderList
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderList
"""
super(DependentHostedNumberOrderList, self).__init__(version)
# Path Solution
self._solution = {'signing_document_sid': signing_document_sid, }
self._uri = '/AuthorizationDocuments/{signing_document_sid}/DependentHostedNumberOrders'.format(**self._solution)
def stream(self, status=values.unset, phone_number=values.unset,
incoming_phone_number_sid=values.unset, friendly_name=values.unset,
unique_name=values.unset, limit=None, page_size=None):
"""
Streams DependentHostedNumberOrderInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param DependentHostedNumberOrderInstance.Status status: The Status of this HostedNumberOrder.
:param unicode phone_number: An E164 formatted phone number.
:param unicode incoming_phone_number_sid: IncomingPhoneNumber sid.
:param unicode friendly_name: A human readable description of this resource.
:param unicode unique_name: A unique, developer assigned name of this HostedNumberOrder.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
status=status,
phone_number=phone_number,
incoming_phone_number_sid=incoming_phone_number_sid,
friendly_name=friendly_name,
unique_name=unique_name,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, status=values.unset, phone_number=values.unset,
incoming_phone_number_sid=values.unset, friendly_name=values.unset,
unique_name=values.unset, limit=None, page_size=None):
"""
Lists DependentHostedNumberOrderInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param DependentHostedNumberOrderInstance.Status status: The Status of this HostedNumberOrder.
:param unicode phone_number: An E164 formatted phone number.
:param unicode incoming_phone_number_sid: IncomingPhoneNumber sid.
:param unicode friendly_name: A human readable description of this resource.
:param unicode unique_name: A unique, developer assigned name of this HostedNumberOrder.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderInstance]
"""
return list(self.stream(
status=status,
phone_number=phone_number,
incoming_phone_number_sid=incoming_phone_number_sid,
friendly_name=friendly_name,
unique_name=unique_name,
limit=limit,
page_size=page_size,
))
def page(self, status=values.unset, phone_number=values.unset,
incoming_phone_number_sid=values.unset, friendly_name=values.unset,
unique_name=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of DependentHostedNumberOrderInstance records from the API.
Request is executed immediately
:param DependentHostedNumberOrderInstance.Status status: The Status of this HostedNumberOrder.
:param unicode phone_number: An E164 formatted phone number.
:param unicode incoming_phone_number_sid: IncomingPhoneNumber sid.
:param unicode friendly_name: A human readable description of this resource.
:param unicode unique_name: A unique, developer assigned name of this HostedNumberOrder.
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of DependentHostedNumberOrderInstance
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderPage
"""
params = values.of({
'Status': status,
'PhoneNumber': phone_number,
'IncomingPhoneNumberSid': incoming_phone_number_sid,
'FriendlyName': friendly_name,
'UniqueName': unique_name,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return DependentHostedNumberOrderPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of DependentHostedNumberOrderInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of DependentHostedNumberOrderInstance
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return DependentHostedNumberOrderPage(self._version, response, self._solution)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.HostedNumbers.DependentHostedNumberOrderList>'
class DependentHostedNumberOrderPage(Page):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, response, solution):
"""
Initialize the DependentHostedNumberOrderPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param signing_document_sid: LOA document sid.
:returns: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderPage
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderPage
"""
super(DependentHostedNumberOrderPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of DependentHostedNumberOrderInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderInstance
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderInstance
"""
return DependentHostedNumberOrderInstance(
self._version,
payload,
signing_document_sid=self._solution['signing_document_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.HostedNumbers.DependentHostedNumberOrderPage>'
class DependentHostedNumberOrderInstance(InstanceResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
class Status(object):
RECEIVED = "received"
PENDING_VERIFICATION = "pending-verification"
VERIFIED = "verified"
PENDING_LOA = "pending-loa"
CARRIER_PROCESSING = "carrier-processing"
TESTING = "testing"
COMPLETED = "completed"
FAILED = "failed"
ACTION_REQUIRED = "action-required"
class VerificationType(object):
PHONE_CALL = "phone-call"
PHONE_BILL = "phone-bill"
def __init__(self, version, payload, signing_document_sid):
"""
Initialize the DependentHostedNumberOrderInstance
:returns: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderInstance
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderInstance
"""
super(DependentHostedNumberOrderInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload.get('sid'),
'account_sid': payload.get('account_sid'),
'incoming_phone_number_sid': payload.get('incoming_phone_number_sid'),
'address_sid': payload.get('address_sid'),
'signing_document_sid': payload.get('signing_document_sid'),
'phone_number': payload.get('phone_number'),
'capabilities': payload.get('capabilities'),
'friendly_name': payload.get('friendly_name'),
'unique_name': payload.get('unique_name'),
'status': payload.get('status'),
'failure_reason': payload.get('failure_reason'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'verification_attempts': deserialize.integer(payload.get('verification_attempts')),
'email': payload.get('email'),
'cc_emails': payload.get('cc_emails'),
'verification_type': payload.get('verification_type'),
'verification_document_sid': payload.get('verification_document_sid'),
'extension': payload.get('extension'),
'call_delay': deserialize.integer(payload.get('call_delay')),
'verification_code': payload.get('verification_code'),
'verification_call_sids': payload.get('verification_call_sids'),
}
# Context
self._context = None
self._solution = {'signing_document_sid': signing_document_sid, }
@property
def sid(self):
"""
:returns: HostedNumberOrder sid.
:rtype: unicode
"""
return self._properties['sid']
@property
def account_sid(self):
"""
:returns: Account sid.
:rtype: unicode
"""
return self._properties['account_sid']
@property
def incoming_phone_number_sid(self):
"""
:returns: IncomingPhoneNumber sid.
:rtype: unicode
"""
return self._properties['incoming_phone_number_sid']
@property
def address_sid(self):
"""
:returns: Address sid.
:rtype: unicode
"""
return self._properties['address_sid']
@property
def signing_document_sid(self):
"""
:returns: LOA document sid.
:rtype: unicode
"""
return self._properties['signing_document_sid']
@property
def phone_number(self):
"""
:returns: An E164 formatted phone number.
:rtype: unicode
"""
return self._properties['phone_number']
@property
def capabilities(self):
"""
:returns: A mapping of phone number capabilities.
:rtype: unicode
"""
return self._properties['capabilities']
@property
def friendly_name(self):
"""
:returns: A human readable description of this resource.
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def unique_name(self):
"""
:returns: A unique, developer assigned name of this HostedNumberOrder.
:rtype: unicode
"""
return self._properties['unique_name']
@property
def status(self):
"""
:returns: The Status of this HostedNumberOrder.
:rtype: DependentHostedNumberOrderInstance.Status
"""
return self._properties['status']
@property
def failure_reason(self):
"""
:returns: Why a hosted_number_order reached status "action-required"
:rtype: unicode
"""
return self._properties['failure_reason']
@property
def date_created(self):
"""
:returns: The date this HostedNumberOrder was created.
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date this HostedNumberOrder was updated.
:rtype: datetime
"""
return self._properties['date_updated']
@property
def verification_attempts(self):
"""
:returns: The number of attempts made to verify ownership of the phone number.
:rtype: unicode
"""
return self._properties['verification_attempts']
@property
def email(self):
"""
:returns: Email.
:rtype: unicode
"""
return self._properties['email']
@property
def cc_emails(self):
"""
:returns: A list of emails.
:rtype: unicode
"""
return self._properties['cc_emails']
@property
def verification_type(self):
"""
:returns: The method used for verifying ownership of the number to be hosted.
:rtype: DependentHostedNumberOrderInstance.VerificationType
"""
return self._properties['verification_type']
@property
def verification_document_sid(self):
"""
:returns: Verification Document Sid.
:rtype: unicode
"""
return self._properties['verification_document_sid']
@property
def extension(self):
"""
:returns: Phone extension to use for ownership verification call.
:rtype: unicode
"""
return self._properties['extension']
@property
def call_delay(self):
"""
:returns: Seconds (0-30) to delay ownership verification call by.
:rtype: unicode
"""
return self._properties['call_delay']
@property
def verification_code(self):
"""
:returns: The digits passed during the ownership verification call.
:rtype: unicode
"""
return self._properties['verification_code']
@property
def verification_call_sids(self):
"""
:returns: List of IDs for ownership verification calls.
:rtype: unicode
"""
return self._properties['verification_call_sids']
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.HostedNumbers.DependentHostedNumberOrderInstance>'
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import unittest
import json
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import cpe_utils
class TestBasic(unittest.TestCase):
"""Test the basic functionality of cpe_utils
"""
def setUp(self):
pass
def tearDown(self):
pass
def test_cpe_parsing(self):
cpe_str = "cpe:/part:vendor:product:version:update:edition"
cpe_obj = cpe_utils.CPE(cpe_str)
self.assertEqual(cpe_obj.part, "part")
self.assertEqual(cpe_obj.vendor, "vendor")
self.assertEqual(cpe_obj.product, "product")
self.assertEqual(cpe_obj.version, "version")
self.assertEqual(cpe_obj.update, "update")
self.assertEqual(cpe_obj.edition, "edition")
# see issue #5
# TODO Test vendor
# TODO Test product
# TODO Test version
# TODO Test update
# TODO Test edition
def test_matches(self):
tests = [
["cpe:/a:vendor:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/X:vendor:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:X:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:X:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:X:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.1:X:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.1:sp3:X", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vandor:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:ndor:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:dor:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:or:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:r:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vbndo:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vand:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:ven:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:ve:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:v:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vbndor:produc:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:produ:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vcndor:prod:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vindor:pro:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vondor:pr:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vundor:p:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vondor::1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.0:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product::sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:product:1.1:sp:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.1:s:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.1::x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:product:1.1:sp3:x8", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.1:sp3:x", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.1:sp3:", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vndor:poduct:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vedor:prduct:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:venor:prouct:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendr:prodct:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendo:produt:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:produc:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:space:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:space:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.10:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.11:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.12:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.13:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.14:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.15:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.16:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.17:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.18:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.19:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.1:sp3:*", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:product:1.1:*:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:product:*:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:*:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:*:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/*:vendor:product:1.1:sp3:x8?", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:product:1.1:sp3:x?6", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:product:1.1:sp3:?86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:product:1.1:sp?:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:product:1.1:s?3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:product:1.1:?p3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:product:1.?:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:product:1?1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:product:?.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:produc?:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:produ?t:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:prod?ct:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:pro?uct:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:pr?duct:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:p?oduct:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:?roduct:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendo?:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vend?r:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:ven?or:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:ve?dor:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:v?ndor:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:?endor:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/?:vendor:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
]
count = 0
for test_info in tests:
count += 1
cpe_str1, cpe_str2, match_result = test_info
cpe1 = cpe_utils.CPE(cpe_str1)
cpe2 = cpe_utils.CPE(cpe_str2)
self.assertTrue(cpe1.matches(cpe2) == match_result, "[{}] {}.match({}) was not {}".format(
count,
cpe_str1,
cpe_str2,
match_result
))
def test_cpe_parsing_23(self):
cpe_str = "cpe:2.3:o:vendor:product:version:update:edition"
cpe_obj = cpe_utils.CPE(cpe_str)
self.assertEqual(cpe_obj.part, "o")
self.assertEqual(cpe_obj.vendor, "vendor")
self.assertEqual(cpe_obj.product, "product")
self.assertEqual(cpe_obj.version, "version")
self.assertEqual(cpe_obj.update, "update")
self.assertEqual(cpe_obj.edition, "edition")
# see issue #5
# TODO Test vendor
# TODO Test product
# TODO Test version
# TODO Test update
# TODO Test edition
def test_cpe_exception(self):
with self.assertRaises(cpe_utils.CPEException):
cpe_utils.CPE("cpe:::::")
def test_human(self):
tests = [
["cpe:/"
"a:vendor:product:1.1:sp3:x86", "Vendor Product 1.1 SP3 x86"],
["cpe:/a:vendor_name:product:1.1:sp3:x86", "Vendor Name Product 1.1 SP3 x86"],
["cpe:/a:vendor:product::sp3:x86", "Vendor Product SP3 x86"],
["cpe:/a:vendor:::sp3:x86", "Vendor SP3 x86"],
["cpe:/a:vendor::::", "Vendor"],
["cpe:/a::::sp3:x86", "SP3 x86"],
["cpe:/a:vendor:product:1.1::", "Vendor Product 1.1"],
["cpe:/a:::::", ""],
["cpe:/a::product:::", "Product"],
["cpe:/a:::1.1::", "1.1"],
["cpe:/a::::sp3:", "SP3"],
["cpe:/a:::::x86", "x86"],
["cpe:/a:vendor:product:::", "Vendor Product"],
["cpe:/a:vendor:product:1.1:sp3:", "Vendor Product 1.1 SP3"],
["cpe:/a:vendor_name::::x86", "Vendor Name x86"],
["cpe:/a:vendor_name:::sp3:", "Vendor Name SP3"],
["cpe:/a:vendor_name:product:1.1::", "Vendor Name Product 1.1"],
["cpe:/a:vendor_name::::", "Vendor Name"],
["cpe:/a:vendor::::x86", "Vendor x86"],
["cpe:/a:vendor:::sp3:", "Vendor SP3"],
]
for test_info in tests:
cpe_string = test_info[0]
correct_human = test_info[1]
cpe = cpe_utils.CPE(cpe_string)
self.assertEqual(cpe.human(), correct_human, "{!r} was not {!r} (for cpe {})".format(
cpe.human(),
correct_human,
cpe_string
))
def test_to_json(self):
tests = [
["cpe:/a:vendor:product:1.1:sp3:x86",{
"part": "a",
"vendor": "vendor",
"product": "product",
"version": "1.1",
"update": "sp3",
"edition": "x86"
}],
["cpe:/a::product:1.1:sp3:x86",{
"part": "a",
"vendor": "",
"product": "product",
"version": "1.1",
"update": "sp3",
"edition": "x86"
}],
["cpe:/a:vendor::1.1:sp3:x86",{
"part": "a",
"vendor": "vendor",
"product": "",
"version": "1.1",
"update": "sp3",
"edition": "x86"
}],
["cpe:/a:vendor:product::sp3:x86",{
"part": "a",
"vendor": "vendor",
"product": "product",
"version": "",
"update": "sp3",
"edition": "x86"
}],
["cpe:/a:vendor:product:1.1::x86",{
"part": "a",
"vendor": "vendor",
"product": "product",
"version": "1.1",
"update": "",
"edition": "x86"
}],
["cpe:/a:vendor:product:1.1:sp3",{
"part": "a",
"vendor": "vendor",
"product": "product",
"version": "1.1",
"update": "sp3",
"edition": ""
}],
]
for test_info in tests:
cpe_string = test_info[0]
correct_dict = test_info[1]
cpe = cpe_utils.CPE(cpe_string)
assert isinstance(cpe_string, object)
self.assertEqual(cpe.to_json(), json.dumps(correct_dict), "{!r} was not {!r} (for cpe {})".format(
cpe.to_json(),
correct_dict,
cpe_string
))
def test_cpe_obj_equals(self):
orig_cpe = "cpe:/o:vendor:product:version:update:edition"
cpe_obj1 = cpe_utils.CPE(orig_cpe)
cpe_obj2 = cpe_utils.CPE(orig_cpe)
false_cpes = [
"cpe:/a:vendor:product:version:update:edition",
"cpe:/o:vendor1:product:version:update:edition",
"cpe:/o:vendor:product1:version:update:edition",
"cpe:/o:vendor:product:version1:update:edition",
"cpe:/o:vendor:product:version:update1:edition",
"cpe:/o:vendor:product:version:update:edition1",
]
for false_cpe in false_cpes:
false_cpe_obj = cpe_utils.CPE(false_cpe)
self.assertFalse(cpe_obj1 == false_cpe_obj, "{} is not equal to {}".format(
false_cpe,
orig_cpe
))
def test_has_wildcards(self):
cpe_tests = [
"cpe:/*:vendor:product:version:update:edition",
"cpe:/?:vendor:product:version:update:edition",
"cpe:/o:v*ndor:product:version:update:edition",
"cpe:/o:v?ndor:product:version:update:edition",
"cpe:/o:vendor:pr*duct:version:update:edition",
"cpe:/o:vendor:pr?duct:version:update:edition",
"cpe:/o:vendor:product:vers*on:update:edition",
"cpe:/o:vendor:product:vers?on:update:edition",
"cpe:/o:vendor:product:version:upda*e:edition",
"cpe:/o:vendor:product:version:upda?e:edition",
"cpe:/o:vendor:product:version:update:ed*tion",
"cpe:/o:vendor:product:version:update:ed?tion",
]
for cpe_str in cpe_tests:
cpe_obj = cpe_utils.CPE(cpe_str)
self.assertTrue(cpe_obj.has_wildcards())
no_wildcards = cpe_utils.CPE("cpe:/o:vendor:product:version:update:edition")
self.assertFalse(no_wildcards.has_wildcards())
if __name__ == "__main__":
unittest.main()
| |
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------
# drawElements Quality Program utilities
# --------------------------------------
#
# Copyright 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------------------
import sys
import itertools
import operator
import genutil
from genutil import Scalar, Vec2, Vec3, Vec4, Uint, UVec2, UVec3, UVec4, CaseGroup
# Templates
ARTIHMETIC_CASE_TEMPLATE = """
case ${{NAME}}
version 310 es
require extension { "GL_EXT_shader_implicit_conversions" } in { vertex, fragment }
values
{
${{VALUES}}
}
both ""
#version 310 es
precision highp float;
${DECLARATIONS}
void main()
{
${SETUP}
out0 = ${{EXPR}};
${OUTPUT}
}
""
end
""".strip()
FUNCTIONS_CASE_TEMPLATE = """
case ${{NAME}}
version 310 es
require extension { "GL_EXT_shader_implicit_conversions" } in { vertex, fragment }
values
{
${{VALUES}}
}
both ""
#version 310 es
precision highp float;
${DECLARATIONS}
${{OUTTYPE}} func (${{OUTTYPE}} a)
{
return a * ${{OUTTYPE}}(2);
}
void main()
{
${SETUP}
out0 = func(in0);
${OUTPUT}
}
""
end
""".strip()
ARRAY_CASE_TEMPLATE = """
case ${{NAME}}
version 310 es
require extension { "GL_EXT_shader_implicit_conversions" } in { vertex, fragment }
values
{
${{VALUES}}
}
both ""
#version 310 es
precision highp float;
${DECLARATIONS}
void main()
{
${SETUP}
${{ARRAYTYPE}}[] x = ${{ARRAYTYPE}}[] (${{ARRAYVALUES}});
out0 = ${{EXPR}};
${OUTPUT}
}
""
end
""".strip()
STRUCT_CASE_TEMPLATE = """
case ${{NAME}}
version 310 es
require extension { "GL_EXT_shader_implicit_conversions" } in { vertex, fragment }
values
{
${{VALUES}}
}
both ""
#version 310 es
precision highp float;
${DECLARATIONS}
void main()
{
${SETUP}
struct {
${{OUTTYPE}} val;
} x;
x.val = ${{STRUCTVALUE}};
out0 = ${{EXPR}};
${OUTPUT}
}
""
end
""".strip()
INVALID_CASE_TEMPLATE = """
case ${{NAME}}
expect compile_fail
version 310 es
require extension { "GL_EXT_shader_implicit_conversions" } in { vertex, fragment }
values
{
${{VALUES}}
}
both ""
#version 310 es
precision highp float;
${DECLARATIONS}
void main()
{
${SETUP}
out0 = in0 + ${{OPERAND}};
${OUTPUT}
}
""
end
""".strip()
INVALID_ARRAY_CASE_TEMPLATE = """
case ${{NAME}}
expect compile_fail
version 310 es
require extension { "GL_EXT_shader_implicit_conversions" } in { vertex, fragment }
values {}
both ""
#version 310 es
precision highp float;
${DECLARATIONS}
void main()
{
${SETUP}
${{EXPR}}
${OUTPUT}
}
""
end
""".strip()
INVALID_STRUCT_CASE_TEMPLATE = """
case ${{NAME}}
expect compile_fail
version 310 es
require extension { "GL_EXT_shader_implicit_conversions" } in { vertex, fragment }
values {}
both ""
#version 310 es
precision highp float;
${DECLARATIONS}
void main()
{
${SETUP}
struct { ${{INTYPE}} value; } a;
struct { ${{OUTTYPE}} value; } b;
a = ${{INVALUE}};
b = a;
${OUTPUT}
}
""
end
""".strip()
# Input values
IN_ISCALAR = [ 2, 1, 1, 3, 5 ]
IN_USCALAR = [ 1, 3, 4, 7, 11 ]
IN_IVECTOR = [
( 1, 2, 3, 4),
( 2, 1, 2, 6),
( 3, 7, 2, 5),
]
IN_UVECTOR = [
( 2, 3, 5, 8),
( 4, 6, 2, 9),
( 1, 13, 7, 4),
]
IN_VALUES = {
"int": [Scalar(x) for x in IN_ISCALAR],
"uint": [Scalar(x) for x in IN_USCALAR],
"ivec2": [Vec2(x[0], x[1]) for x in IN_IVECTOR],
"uvec2": [Vec2(x[0], x[1]) for x in IN_UVECTOR],
"ivec3": [Vec3(x[0], x[1], x[2]) for x in IN_IVECTOR],
"uvec3": [Vec3(x[0], x[1], x[2]) for x in IN_UVECTOR],
"ivec4": [Vec4(x[0], x[1], x[2], x[3]) for x in IN_IVECTOR],
"uvec4": [Vec4(x[0], x[1], x[2], x[3]) for x in IN_UVECTOR],
"float": [Scalar(x).toFloat() for x in IN_ISCALAR],
"vec2": [Vec2(x[0], x[1]).toFloat() for x in IN_IVECTOR],
"vec3": [Vec3(x[0], x[1], x[2]).toFloat() for x in IN_IVECTOR],
"vec4": [Vec4(x[0], x[1], x[2], x[3]).toFloat() for x in IN_IVECTOR],
}
VALID_CONVERSIONS = {
"int": ["float", "uint"],
"uint": ["float"],
"ivec2": ["uvec2", "vec2"],
"uvec2": ["vec2"],
"ivec3": ["uvec3", "vec3"],
"uvec3": ["vec3"],
"ivec4": ["uvec4", "vec4"],
"uvec4": ["vec4"]
}
SCALAR_TO_VECTOR_CONVERSIONS = {
"int": ["vec2", "vec3", "vec4", "uvec2", "uvec3", "uvec4"],
"uint": ["vec2", "vec3", "vec4"]
}
VALID_ASSIGNMENTS = {
"int": ["ivec2", "ivec3", "ivec4"],
"uint": ["uvec2", "uvec3", "uvec4"],
"ivec2": ["int", "float"],
"ivec3": ["int", "float"],
"ivec4": ["int", "float"],
"uvec2": ["uint", "float"],
"uvec3": ["uint", "float"],
"uvec4": ["uint", "float"],
"float": ["vec2", "vec3", "vec4"],
"vec2": ["float"],
"vec3": ["float"],
"vec4": ["float"]
}
IN_TYPE_ORDER = [
"int", "uint",
"ivec2", "uvec2", "ivec3",
"uvec3", "ivec4", "uvec4",
"float",
"vec2", "vec3", "vec4"
]
def isScalarTypeName (name):
return name in ["float", "int", "uint"]
def isVec2TypeName (name):
return name in ["vec2", "ivec2", "uvec2"]
def isVec3TypeName (name):
return name in ["vec3", "ivec3", "uvec3"]
def isVec4TypeName (name):
return name in ["vec4", "ivec4", "uvec4"]
# Utilities
def scalarToVector(a, b):
if isinstance(a, Scalar) and isinstance(b, Vec2):
a = a.toVec2()
elif isinstance(a, Scalar) and isinstance(b, Vec3):
a = a.toVec3()
elif isinstance(a, Scalar) and isinstance(b, Vec4):
a = a.toVec4()
return a
def isUintTypeName (type_name):
return type_name in ["uint", "uvec2", "uvec3", "uvec4"]
def convLiteral (type, value):
if isUintTypeName(type):
return int(value)
else:
return value
def valueToStr(value_type, value):
if isinstance(value, Scalar):
return str(value)
else:
assert isinstance(value, genutil.Vec)
out = value_type + "("
out += ", ".join([str(convLiteral(value_type, x)) for x in value.getScalars()])
out += ")"
return out
def valuesToStr(prefix, value_type, values):
def gen_value_strs(value_list, value_type):
for value in value_list:
yield valueToStr(value_type, value)
return "%s = [ %s ];" % (prefix, " | ".join(gen_value_strs(values, value_type)))
# Test cases
class ArithmeticCase(genutil.ShaderCase):
def __init__(self, name, op, in_type, out_type, reverse=False):
self.op_func = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.div,
}
self.name = name
self.op = op
self.in_type = in_type
self.out_type = out_type
self.reverse = reverse
def __str__(self):
params = {
"NAME": self.name,
"EXPR": self.get_expr(),
"VALUES": self.gen_values(),
}
return genutil.fillTemplate(ARTIHMETIC_CASE_TEMPLATE, params)
def apply(self, a, b):
assert(self.op in self.op_func)
a = scalarToVector(a, b)
if self.reverse:
b, a = a, b
return self.op_func[self.op](a, b)
def get_expr(self):
expr = ["in0", self.op, str(self.get_operand())]
if self.reverse:
expr.reverse()
return " ".join(expr)
def get_operand(self):
operands = {
"float": Scalar(2.0),
"vec2": Vec2(1.0, 2.0),
"vec3": Vec3(1.0, 2.0, 3.0),
"vec4": Vec4(1.0, 2.0, 3.0, 4.0),
"uint": Uint(2),
"uvec2": UVec2(1, 2),
"uvec3": UVec3(1, 2, 3),
"uvec4": UVec4(1, 2, 3, 4),
}
assert self.out_type in operands
return operands[self.out_type]
def gen_values(self):
in_values = IN_VALUES[self.in_type]
y = self.get_operand()
out_values = [self.apply(x, y) for x in in_values]
out = []
out.append(valuesToStr("input %s in0" % (self.in_type), self.in_type, in_values))
out.append(valuesToStr("output %s out0" % (self.out_type), self.out_type, out_values))
return "\n".join(out)
class ComparisonsCase(ArithmeticCase):
def __init__(self, name, op, in_type, out_type, reverse=False):
super(ComparisonsCase, self).__init__(name, op, in_type, out_type, reverse)
self.op_func = {
"==": operator.eq,
"!=": operator.ne,
"<": operator.lt,
">": operator.gt,
"<=": operator.le,
">=": operator.ge,
}
def apply(self, a, b):
assert(self.op in self.op_func)
if isinstance(a, Scalar) and isinstance(b, Scalar):
a, b = float(a), float(b)
if self.reverse:
b, a = a, b
return Scalar(self.op_func[self.op](a, b))
def gen_values(self):
in_values = IN_VALUES[self.in_type]
y = self.get_operand()
out_values = [self.apply(x, y) for x in in_values]
out = []
out.append(valuesToStr("input %s in0" % (self.in_type), self.in_type, in_values))
out.append(valuesToStr("output bool out0", "bool", out_values))
return "\n".join(out)
class ParenthesizedCase(genutil.ShaderCase):
def __init__(self, name, in_type, out_type, reverse=False, input_in_parens=False):
self.name = name
self.in_type = in_type
self.out_type = out_type
self.reverse = reverse
self.input_in_parens = input_in_parens
def __str__(self):
params = {
"NAME": self.name,
"EXPR": self.get_expr(),
"VALUES": self.gen_values(),
}
return genutil.fillTemplate(ARTIHMETIC_CASE_TEMPLATE, params)
def apply(self, a):
b, c = self.get_operand(0), self.get_operand(1)
a = scalarToVector(a, b)
if self.input_in_parens:
return b*(a+c)
else:
return a*(b+c)
def get_expr(self):
def make_paren_expr():
out = [
"in0" if self.input_in_parens else self.get_operand(0),
"+",
self.get_operand(1)
]
return "(%s)" % (" ".join([str(x) for x in out]))
expr = [
"in0" if not self.input_in_parens else self.get_operand(0),
"*",
make_paren_expr()
]
if self.reverse:
expr.reverse()
return " ".join([str(x) for x in expr])
def get_operand(self, ndx=0):
return IN_VALUES[self.out_type][ndx]
def gen_values(self):
in_values = IN_VALUES[self.in_type]
out_values = [self.apply(x) for x in in_values]
out = []
out.append(valuesToStr("input %s in0" % (self.in_type), self.in_type, in_values))
out.append(valuesToStr("output %s out0" % (self.out_type), self.out_type, out_values))
return "\n".join(out)
class FunctionsCase(genutil.ShaderCase):
def __init__(self, name, in_type, out_type):
self.name = name
self.in_type = in_type
self.out_type = out_type
def __str__(self):
params = {
"NAME": self.name,
"OUTTYPE": self.out_type,
"VALUES": self.gen_values(),
}
return genutil.fillTemplate(FUNCTIONS_CASE_TEMPLATE, params)
def apply(self, a):
if isUintTypeName(self.out_type):
return a.toUint() * Uint(2)
else:
return a.toFloat() * Scalar(2.0)
def gen_values(self):
in_values = IN_VALUES[self.in_type]
out_values = [self.apply(x) for x in in_values]
out = []
out.append(valuesToStr("input %s in0" % (self.in_type), self.in_type, in_values))
out.append(valuesToStr("output %s out0" % (self.out_type), self.out_type, out_values))
return "\n".join(out)
class ArrayCase(genutil.ShaderCase):
def __init__(self, name, in_type, out_type, reverse=False):
self.name = name
self.in_type = in_type
self.out_type = out_type
self.reverse = reverse
def __str__(self):
params = {
"NAME": self.name,
"VALUES": self.gen_values(),
"ARRAYTYPE": self.out_type,
"ARRAYVALUES": self.gen_array_values(),
"EXPR": self.get_expr(),
}
return genutil.fillTemplate(ARRAY_CASE_TEMPLATE, params)
def apply(self, a):
b = IN_VALUES[self.out_type][1]
a = scalarToVector(a, b)
return a + b
def get_expr(self):
if not self.reverse:
return "in0 + x[1]"
else:
return "x[1] + in0"
def gen_values(self):
in_values = IN_VALUES[self.in_type]
out_values = [self.apply(x) for x in in_values]
out = []
out.append(valuesToStr("input %s in0" % (self.in_type), self.in_type, in_values))
out.append(valuesToStr("output %s out0" % (self.out_type), self.out_type, out_values))
return "\n".join(out)
def gen_array_values(self):
out = [valueToStr(self.out_type, x) for x in IN_VALUES[self.out_type]]
return ", ".join(out)
class ArrayUnpackCase(genutil.ShaderCase):
def __init__(self, name, in_type, out_type):
self.name = name
self.in_type = in_type
self.out_type = out_type
def __str__(self):
params = {
"NAME": self.name,
"VALUES": self.gen_values(),
"ARRAYTYPE": "float",
"ARRAYVALUES": self.gen_array_values(),
"EXPR": self.get_expr(),
}
return genutil.fillTemplate(ARRAY_CASE_TEMPLATE, params)
def apply(self, a):
if isinstance(a, Scalar) and isVec2TypeName(self.out_type):
a = a.toVec2()
elif isinstance(a, Scalar) and isVec3TypeName(self.out_type):
a = a.toVec3()
elif isinstance(a, Scalar) and isVec4TypeName(self.out_type):
a = a.toVec4()
b = IN_VALUES["float"]
out = [Scalar(x)+y for x, y in zip(a.getScalars(), b)]
if self.out_type == "float":
return out[0].toFloat()
elif self.out_type == "uint":
return out[0].toUint()
elif self.out_type == "vec2":
return Vec2(out[0], out[1]).toFloat()
elif self.out_type == "uvec2":
return Vec2(out[0], out[1]).toUint()
elif self.out_type == "vec3":
return Vec3(out[0], out[1], out[2]).toFloat()
elif self.out_type == "uvec3":
return Vec3(out[0], out[1], out[2]).toUint()
elif self.out_type == "vec4":
return Vec4(out[0], out[1], out[2], out[3]).toFloat()
elif self.out_type == "uvec4":
return Vec4(out[0], out[1], out[2], out[3]).toUint()
def get_expr(self):
def num_scalars(typename):
return IN_VALUES[typename][0].getNumScalars()
def gen_sums():
in_scalars = num_scalars(self.in_type)
out_scalars = num_scalars(self.out_type)
for ndx in range(out_scalars):
if in_scalars > 1:
yield "in0[%i] + x[%i]" % (ndx, ndx)
else:
yield "in0 + x[%i]" % (ndx)
return "%s(%s)" % (self.out_type, ", ".join(gen_sums()))
def gen_values(self):
in_values = IN_VALUES[self.in_type]
out_values = [self.apply(x) for x in in_values]
out = []
out.append(valuesToStr("input %s in0" % (self.in_type), self.in_type, in_values))
out.append(valuesToStr("output %s out0" % (self.out_type), self.out_type, out_values))
return "\n".join(out)
def gen_array_values(self):
out = [valueToStr(self.out_type, x) for x in IN_VALUES["float"]]
return ", ".join(out)
class StructCase(genutil.ShaderCase):
def __init__(self, name, in_type, out_type, reverse=False):
self.name = name
self.in_type = in_type
self.out_type = out_type
self.reverse = reverse
def __str__(self):
params = {
"NAME": self.name,
"VALUES": self.gen_values(),
"OUTTYPE": self.out_type,
"STRUCTVALUE": self.get_struct_value(),
"EXPR": self.get_expr(),
}
return genutil.fillTemplate(STRUCT_CASE_TEMPLATE, params)
def apply(self, a):
if isinstance(a, Scalar) and isVec2TypeName(self.out_type):
a = a.toVec2()
elif isinstance(a, Scalar) and isVec3TypeName(self.out_type):
a = a.toVec3()
elif isinstance(a, Scalar) and isVec4TypeName(self.out_type):
a = a.toVec4()
b = IN_VALUES[self.out_type][0]
return a + b
def get_expr(self):
if not self.reverse:
return "in0 + x.val"
else:
return "x.val + in0"
def gen_values(self):
in_values = IN_VALUES[self.in_type]
out_values = [self.apply(x) for x in in_values]
out = []
out.append(valuesToStr("input %s in0" % (self.in_type), self.in_type, in_values))
out.append(valuesToStr("output %s out0" % (self.out_type), self.out_type, out_values))
return "\n".join(out)
def get_struct_value(self):
return valueToStr(self.out_type, IN_VALUES[self.out_type][0])
class InvalidCase(genutil.ShaderCase):
def __init__(self, name, in_type, out_type):
self.name = name
self.in_type = in_type
self.out_type = out_type
def __str__(self):
params = {
"NAME": self.name,
"OPERAND": str(self.get_operand()),
"VALUES": self.gen_values(),
}
return genutil.fillTemplate(INVALID_CASE_TEMPLATE, params)
def apply(self, a, b):
return b
def get_operand(self):
return IN_VALUES[self.out_type][0]
def gen_values(self):
in_values = IN_VALUES[self.in_type]
y = self.get_operand()
out_values = [self.apply(x, y) for x in in_values]
out = []
out.append(valuesToStr("input %s in0" % (self.in_type), self.in_type, in_values))
out.append(valuesToStr("output %s out0" % (self.out_type), self.out_type, out_values))
return "\n".join(out)
class InvalidArrayCase(genutil.ShaderCase):
def __init__(self, name, in_type, out_type):
self.name = name
self.in_type = in_type
self.out_type = out_type
def __str__(self):
params = {
"NAME": self.name,
"EXPR": self.gen_expr(),
}
return genutil.fillTemplate(INVALID_ARRAY_CASE_TEMPLATE, params)
def gen_expr(self):
in_values = [valueToStr(self.out_type, x) for x in IN_VALUES[self.in_type]]
out = "%s a[] = %s[] (%s);" % (self.out_type, self.in_type, ", ".join(in_values))
return out
class InvalidStructCase(genutil.ShaderCase):
def __init__(self, name, in_type, out_type):
self.name = name
self.in_type = in_type
self.out_type = out_type
def __str__(self):
params = {
"NAME": self.name,
"INTYPE": self.in_type,
"OUTTYPE": self.out_type,
"INVALUE": self.get_value(),
}
return genutil.fillTemplate(INVALID_STRUCT_CASE_TEMPLATE, params)
def get_value(self):
return valueToStr(self.in_type, IN_VALUES[self.in_type][0])
# Case file generation
def genConversionPairs(order=IN_TYPE_ORDER, scalar_to_vector=True, additional={}):
def gen_order(conversions):
key_set = set(conversions.iterkeys())
for typename in order:
if typename in key_set:
yield typename
conversions = {}
for in_type in VALID_CONVERSIONS:
conversions[in_type] = [] + VALID_CONVERSIONS[in_type]
if in_type in SCALAR_TO_VECTOR_CONVERSIONS and scalar_to_vector:
conversions[in_type] += SCALAR_TO_VECTOR_CONVERSIONS[in_type]
for key in additional.iterkeys():
value = conversions.get(key, [])
conversions[key] = value + additional[key]
for in_type in gen_order(conversions):
for out_type in conversions[in_type]:
yield (in_type, out_type)
def genInvalidConversions():
types = IN_TYPE_ORDER
valid_pairs = set(genConversionPairs(additional=VALID_ASSIGNMENTS))
for pair in itertools.permutations(types, 2):
if pair not in valid_pairs:
yield pair
def genArithmeticCases(reverse=False):
op_names = [
("add", "Addition", "+"),
("sub", "Subtraction", "-"),
("mul", "Multiplication", "*"),
("div", "Division", "/")
]
for name, desc, op in op_names:
casegroup = CaseGroup(name, desc, [])
for in_type, out_type in genConversionPairs():
if op == "-" and isUintTypeName(out_type):
continue # Can't handle at the moment
name = in_type + "_to_" + out_type
casegroup.children.append(ArithmeticCase(name, op, in_type, out_type, reverse))
yield casegroup
def genComparisonCases(reverse=False):
op_names = [
("equal", "Equal", "=="),
("not_equal", "Not equal", "!="),
("less", "Less than", "<"),
("greater", "Greater than", ">"),
("less_or_equal", "Less than or equal", "<="),
("greater_or_equal", "Greater than or equal", ">="),
]
for name, desc, op in op_names:
casegroup = CaseGroup(name, desc, [])
type_order = IN_TYPE_ORDER if name in ["equal", "not_equal"] else ["int", "uint"]
for in_type, out_type in genConversionPairs(order=type_order, scalar_to_vector=False):
name = in_type + "_to_" + out_type
casegroup.children.append(ComparisonsCase(name, op, in_type, out_type, reverse))
yield casegroup
def genParenthesizedCases():
for reverse in [True, False]:
if reverse:
name = "paren_expr_before_literal"
desc = "Parenthesized expression before literal"
else:
name = "literal_before_paren_expr"
desc = "Literal before parenthesized expression"
reversegroup = CaseGroup(name, desc, [])
for input_in_parens in [True, False]:
if input_in_parens:
name = "input_in_parens"
desc = "Input variable in parenthesized expression"
else:
name = "input_outside_parens"
desc = "Input variable outside parenthesized expression"
casegroup = CaseGroup(name, desc, [])
for in_type, out_type in genConversionPairs():
name = in_type + "_to_" + out_type
casegroup.children.append(
ParenthesizedCase(name, in_type, out_type, reverse, input_in_parens)
)
reversegroup.children.append(casegroup)
yield reversegroup
def genArrayCases(reverse=False):
for in_type, out_type in genConversionPairs():
name = in_type + "_to_" + out_type
yield ArrayCase(name, in_type, out_type, reverse)
def genArrayUnpackCases(reverse=False):
for in_type, out_type in genConversionPairs():
name = in_type + "_to_" + out_type
yield ArrayUnpackCase(name, in_type, out_type)
def genFunctionsCases():
for in_type, out_type in genConversionPairs(scalar_to_vector=False):
name = in_type + "_to_" + out_type
yield FunctionsCase(name, in_type, out_type)
def genStructCases(reverse=False):
for in_type, out_type in genConversionPairs():
name = in_type + "_to_" + out_type
yield StructCase(name, in_type, out_type, reverse)
def genInvalidCases(reverse=False):
for in_type, out_type in genInvalidConversions():
name = in_type + "_to_" + out_type
yield InvalidCase(name, in_type, out_type)
def genInvalidArrayCases():
for in_type, out_type in genConversionPairs(scalar_to_vector=False):
name = in_type + "_to_" + out_type
yield InvalidArrayCase(name, in_type, out_type)
def genInvalidStructCases():
for in_type, out_type in genConversionPairs(scalar_to_vector=False):
name = in_type + "_to_" + out_type
yield InvalidStructCase(name, in_type, out_type)
def genAllCases():
yield CaseGroup(
"arithmetic", "Arithmetic operations",
[
CaseGroup("input_before_literal", "Input before literal",
genArithmeticCases(reverse=False)),
CaseGroup("literal_before_input", "Literal before input",
genArithmeticCases(reverse=True)),
]
)
yield CaseGroup(
"comparisons", "Comparisons",
[
CaseGroup("input_before_literal", "Input before literal",
genComparisonCases(reverse=False)),
CaseGroup("literal_before_input", "Literal before input",
genComparisonCases(reverse=True)),
]
)
yield CaseGroup(
"array_subscripts", "Array subscripts",
[
CaseGroup("input_before_subscript", "Input before subscript",
genArrayCases(reverse=False)),
CaseGroup("subscript_before_input", "Subscript before input",
genArrayCases(reverse=True)),
# CaseGroup("unpack", "Unpack array and repack as value",
# genArrayUnpackCases()),
]
)
yield CaseGroup("functions", "Function calls",
genFunctionsCases())
yield CaseGroup("struct_fields", "Struct field selectors",
[
CaseGroup("input_before_field", "Input before field",
genStructCases(reverse=False)),
CaseGroup("field_before_input", "Field before input",
genStructCases(reverse=True)),
]
)
yield CaseGroup("parenthesized_expressions", "Parenthesized expressions",
genParenthesizedCases())
yield CaseGroup(
"invalid", "Invalid conversions",
[
CaseGroup("variables", "Single variables",
genInvalidCases()),
CaseGroup("arrays", "Arrays",
genInvalidArrayCases()),
CaseGroup("structs", "Structs",
genInvalidStructCases()),
]
)
if __name__ == "__main__":
print("Generating shader case files.")
genutil.writeAllCases("implicit_conversions.test", genAllCases())
| |
import re
import urlparse
from time import sleep
from datetime import datetime
from calaccess_campaign_browser.management.commands import ScrapeCommand
from calaccess_campaign_browser.models import (
Filer,
Election,
Proposition,
PropositionFiler
)
class Command(ScrapeCommand):
"""
Scrape propositions and ballot measures.
"""
help = "Scrape links between filers and propositions from \
the CAL-ACCESS site"
def build_results(self):
self.header("Scraping propositions")
# Build the link list from the 2013 page because otherwise the
# other years are hidden under the "Historical" link.
url = urlparse.urljoin(
self.base_url,
'Campaign/Measures/list.aspx?session=2013'
)
soup = self.get(url)
# Filter links for uniqueness.
links = soup.findAll('a', href=re.compile(r'^.*\?session=\d+'))
links = list(set([link['href'] for link in links]))
results = []
for link in links:
link = urlparse.urljoin(self.base_url, link)
data = self.scrape_year_page(link)
# Parse the year from the URL
data['year'] = int(re.match(r'.+session=(\d+)', link).group(1))
# Add it to the list
results.append(data)
sleep(0.5)
# Pass it out
return results
def scrape_year_page(self, url):
"""
Scrape data from a CAL-ACCESS page that publishes the list
of propositions in a particular election year.
"""
# Get the URL of the year page
soup = self.get(url)
# Loop through all the tables on the page
data_dict = {}
table_list = soup.findAll(
'table',
{'id': re.compile(r'ListElections1__[a-z0-9]+')}
)
for table in table_list:
# Pull the title
election_title = table.select('caption span')[0].text
# Pull the date
election_date = re.match(
r'[A-Z]+ \d{1,2}, \d{4}',
election_title
).group(0)
# Pull the type
election_type = election_title.replace(election_date, '').strip()
election_type = self.parse_election_name(election_type)
# Get a list of the propositions in this table
prop_links = table.findAll('a')
# Log what we're up to
if self.verbosity > 2:
msg = " Scraped: %s %s (%s props)"
msg = msg % (
election_date,
election_type,
len(prop_links),
)
self.log(msg)
# Scrape them one by one
prop_list = [
self.scrape_prop_page(
urlparse.urljoin(
self.base_url,
'/Campaign/Measures/%s' % link['href'],
)
) for link in prop_links
]
# Add the data to our data dict
data_dict["%s|%s" % (election_date, election_type)] = {
'type': election_type,
'props': prop_list,
}
# Pass the data back out
return data_dict
def scrape_prop_page(self, url):
"""
Scrape data from a proposition detail page
"""
# Pull the page
soup = self.get(url)
# Create a data dictionary to put the good stuff in
data_dict = {}
# Add the title and id out of the page
data_dict['name'] = soup.find('span', id='measureName').text
data_dict['description'] = ''
# If there is a " - " separating a name from a description
# split it out below.
if ' - ' in data_dict['name']:
split = data_dict['name'].split(" - ", 1)
data_dict['name'], data_dict['description'] = split
data_dict['name'] = data_dict['name'].strip()
data_dict['name'] = data_dict['name'].replace(
"PROPOSITION",
""
).strip()
data_dict['name'] = data_dict['name'].replace("PROP", "").strip()
data_dict['description'] = data_dict['description'].strip()
data_dict['id'] = re.match(r'.+id=(\d+)', url).group(1)
data_dict['committees'] = []
# Loop through all the tables on the page
# which contain the committees on each side of the measure
for table in soup.findAll('table', cellpadding='4'):
# Pull the data box
data = table.findAll('span', {'class': 'txt7'})
# The URL
url = table.find('a', {'class': 'sublink2'})
# The name
name = url.text
# ID sometimes refers to xref_filer_id rather than filer_id_raw
id_ = data[0].text
# Does the committee support or oppose the measure?
support = data[1].text.strip() == 'SUPPORT'
# Put together a a data dictionary and add it to the list
data_dict['committees'].append({
'name': name,
'id': id_,
'support': support
})
if self.verbosity > 2:
msg = " Scraped: %s %s (%s committees)"
msg = msg % (
data_dict['name'],
data_dict['description'],
len(data_dict['committees'])
)
self.log(msg)
# Pass the data out
return data_dict
def process_results(self, results):
"""
Add the data to the database.
"""
for d in results:
for datekey, election_dict in d.items():
# The years as extracted from the urls are actually not always
# right, so get it from the date.
date = datekey.split("|")[0].strip()
if date == 'year':
continue
date = datetime.strptime(date, '%B %d, %Y').date()
# Loop through the propositions
for prop in election_dict['props']:
# Get or create it
prop_obj, c = Proposition.objects.get_or_create(
name=prop['name'],
description=prop['description'],
id_raw=prop['id']
)
# Log it
if self.verbosity > 2:
if c:
self.log(' Created %s' % prop_obj)
# Set the election if we have it
try:
election = Election.objects.get(
year=date.year,
election_type=election_dict['type']
)
# Set the election date since we have it here
if not election.date:
election.date = date
election.save()
# Can't figure out to connect ambiguous elections...
except (
Election.MultipleObjectsReturned,
Election.DoesNotExist
):
# Hardcode in some ones we've looked up
if prop['id'] in [
'1316044',
'1316047',
'1316048',
'1316060',
'1316061',
'1316062'
]:
election = Election.objects.get(
year=2009,
election_type='SPECIAL_RUNOFF'
)
if not election.date:
election.date = datetime(2009, 5, 19)
election.save()
# Put the other ones in with a null link
else:
election = None
# Make the connection
if election:
prop_obj.election = election
prop_obj.save()
# Now loop through the committees
for committee in prop['committees']:
# This filer_id could mean a lot of things, so try
filer_id = committee['id']
try:
filer = Filer.objects.get(filer_id_raw=filer_id)
except Filer.DoesNotExist:
try:
filer = Filer.objects.get(
xref_filer_id=filer_id
)
except Filer.DoesNotExist:
msg = ' Could not find filer for %s (%s)'
msg = msg % (
committee['name'],
committee['id'],
)
self.warn(msg)
continue
# Set the position
if committee['support']:
position = 'SUPPORT'
else:
position = 'OPPOSE'
# Associate the filer with the prop
pf, c = PropositionFiler.objects.get_or_create(
proposition=prop_obj,
filer=filer,
position=position
)
# Log it
if self.verbosity > 2:
self.log(' Linked %s' % pf)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._database_extensions_operations import build_create_or_update_request_initial, build_get_request, build_list_by_database_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DatabaseExtensionsOperations:
"""DatabaseExtensionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.sql.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get(
self,
resource_group_name: str,
server_name: str,
database_name: str,
extension_name: str,
**kwargs: Any
) -> None:
"""Gets a database extension. This will return resource not found as it is not supported.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database.
:type database_name: str
:param extension_name:
:type extension_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
extension_name=extension_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/extensions/{extensionName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
server_name: str,
database_name: str,
extension_name: str,
parameters: "_models.DatabaseExtensions",
**kwargs: Any
) -> Optional["_models.ImportExportExtensionsOperationResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ImportExportExtensionsOperationResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'DatabaseExtensions')
request = build_create_or_update_request_initial(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
extension_name=extension_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ImportExportExtensionsOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/extensions/{extensionName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
server_name: str,
database_name: str,
extension_name: str,
parameters: "_models.DatabaseExtensions",
**kwargs: Any
) -> AsyncLROPoller["_models.ImportExportExtensionsOperationResult"]:
"""Perform a database extension operation, like polybase import.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database.
:type database_name: str
:param extension_name:
:type extension_name: str
:param parameters: The database import request parameters.
:type parameters: ~azure.mgmt.sql.models.DatabaseExtensions
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either
ImportExportExtensionsOperationResult or the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.sql.models.ImportExportExtensionsOperationResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImportExportExtensionsOperationResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
extension_name=extension_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ImportExportExtensionsOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/extensions/{extensionName}'} # type: ignore
@distributed_trace
def list_by_database(
self,
resource_group_name: str,
server_name: str,
database_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ImportExportExtensionsOperationListResult"]:
"""List database extension. This will return an empty list as it is not supported.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ImportExportExtensionsOperationListResult or the
result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.sql.models.ImportExportExtensionsOperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImportExportExtensionsOperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_database_request(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
subscription_id=self._config.subscription_id,
template_url=self.list_by_database.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_database_request(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ImportExportExtensionsOperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_database.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/extensions'} # type: ignore
| |
"""Apache Configuration based off of Augeas Configurator."""
# pylint: disable=too-many-lines
import itertools
import logging
import os
import re
import shutil
import socket
import subprocess
import zope.interface
from acme import challenges
from letsencrypt import achallenges
from letsencrypt import errors
from letsencrypt import interfaces
from letsencrypt import le_util
from letsencrypt.plugins import common
from letsencrypt_apache import augeas_configurator
from letsencrypt_apache import constants
from letsencrypt_apache import display_ops
from letsencrypt_apache import dvsni
from letsencrypt_apache import obj
from letsencrypt_apache import parser
logger = logging.getLogger(__name__)
# TODO: Augeas sections ie. <VirtualHost>, <IfModule> beginning and closing
# tags need to be the same case, otherwise Augeas doesn't recognize them.
# This is not able to be completely remedied by regular expressions because
# Augeas views <VirtualHost> </Virtualhost> as an error. This will just
# require another check_parsing_errors() after all files are included...
# (after a find_directive search is executed currently). It can be a one
# time check however because all of LE's transactions will ensure
# only properly formed sections are added.
# Note: This protocol works for filenames with spaces in it, the sites are
# properly set up and directives are changed appropriately, but Apache won't
# recognize names in sites-enabled that have spaces. These are not added to the
# Apache configuration. It may be wise to warn the user if they are trying
# to use vhost filenames that contain spaces and offer to change ' ' to '_'
# Note: FILEPATHS and changes to files are transactional. They are copied
# over before the updates are made to the existing files. NEW_FILES is
# transactional due to the use of register_file_creation()
# TODO: Verify permissions on configuration root... it is easier than
# checking permissions on each of the relative directories and less error
# prone.
# TODO: Write a server protocol finder. Listen <port> <protocol> or
# Protocol <protocol>. This can verify partial setups are correct
# TODO: Add directives to sites-enabled... not sites-available.
# sites-available doesn't allow immediate find_dir search even with save()
# and load()
class ApacheConfigurator(augeas_configurator.AugeasConfigurator):
# pylint: disable=too-many-instance-attributes,too-many-public-methods
"""Apache configurator.
State of Configurator: This code has been been tested and built for Ubuntu
14.04 Apache 2.4 and it works for Ubuntu 12.04 Apache 2.2
:ivar config: Configuration.
:type config: :class:`~letsencrypt.interfaces.IConfig`
:ivar parser: Handles low level parsing
:type parser: :class:`~letsencrypt_apache.parser`
:ivar tup version: version of Apache
:ivar list vhosts: All vhosts found in the configuration
(:class:`list` of :class:`~letsencrypt_apache.obj.VirtualHost`)
:ivar dict assoc: Mapping between domains and vhosts
"""
zope.interface.implements(interfaces.IAuthenticator, interfaces.IInstaller)
zope.interface.classProvides(interfaces.IPluginFactory)
description = "Apache Web Server - Alpha"
@classmethod
def add_parser_arguments(cls, add):
add("ctl", default=constants.CLI_DEFAULTS["ctl"],
help="Path to the 'apache2ctl' binary, used for 'configtest', "
"retrieving the Apache2 version number, and initialization "
"parameters.")
add("enmod", default=constants.CLI_DEFAULTS["enmod"],
help="Path to the Apache 'a2enmod' binary.")
add("dismod", default=constants.CLI_DEFAULTS["dismod"],
help="Path to the Apache 'a2enmod' binary.")
add("init-script", default=constants.CLI_DEFAULTS["init_script"],
help="Path to the Apache init script (used for server "
"reload/restart).")
add("le-vhost-ext", default=constants.CLI_DEFAULTS["le_vhost_ext"],
help="SSL vhost configuration extension.")
add("server-root", default=constants.CLI_DEFAULTS["server_root"],
help="Apache server root directory.")
def __init__(self, *args, **kwargs):
"""Initialize an Apache Configurator.
:param tup version: version of Apache as a tuple (2, 4, 7)
(used mostly for unittesting)
"""
version = kwargs.pop("version", None)
super(ApacheConfigurator, self).__init__(*args, **kwargs)
# Add name_server association dict
self.assoc = dict()
# Outstanding challenges
self._chall_out = set()
# These will be set in the prepare function
self.parser = None
self.version = version
self.vhosts = None
self._enhance_func = {"redirect": self._enable_redirect}
@property
def mod_ssl_conf(self):
"""Full absolute path to SSL configuration file."""
return os.path.join(self.config.config_dir, constants.MOD_SSL_CONF_DEST)
def prepare(self):
"""Prepare the authenticator/installer.
:raises .errors.NoInstallationError: If Apache configs cannot be found
:raises .errors.MisconfigurationError: If Apache is misconfigured
:raises .errors.NotSupportedError: If Apache version is not supported
:raises .errors.PluginError: If there is any other error
"""
# Make sure configuration is valid
self.config_test()
self.parser = parser.ApacheParser(
self.aug, self.conf("server-root"), self.conf("ctl"))
# Check for errors in parsing files with Augeas
self.check_parsing_errors("httpd.aug")
# Set Version
if self.version is None:
self.version = self.get_version()
if self.version < (2, 2):
raise errors.NotSupportedError(
"Apache Version %s not supported.", str(self.version))
# Get all of the available vhosts
self.vhosts = self.get_virtual_hosts()
temp_install(self.mod_ssl_conf)
def deploy_cert(self, domain, cert_path, key_path, chain_path=None):
"""Deploys certificate to specified virtual host.
Currently tries to find the last directives to deploy the cert in
the VHost associated with the given domain. If it can't find the
directives, it searches the "included" confs. The function verifies that
it has located the three directives and finally modifies them to point
to the correct destination. After the certificate is installed, the
VirtualHost is enabled if it isn't already.
.. todo:: Might be nice to remove chain directive if none exists
This shouldn't happen within letsencrypt though
:raises errors.PluginError: When unable to deploy certificate due to
a lack of directives
"""
vhost = self.choose_vhost(domain)
# This is done first so that ssl module is enabled and cert_path,
# cert_key... can all be parsed appropriately
self.prepare_server_https("443")
path = {}
path["cert_path"] = self.parser.find_dir(
"SSLCertificateFile", None, vhost.path)
path["cert_key"] = self.parser.find_dir(
"SSLCertificateKeyFile", None, vhost.path)
# Only include if a certificate chain is specified
if chain_path is not None:
path["chain_path"] = self.parser.find_dir(
"SSLCertificateChainFile", None, vhost.path)
if not path["cert_path"] or not path["cert_key"]:
# Throw some can't find all of the directives error"
logger.warn(
"Cannot find a cert or key directive in %s. "
"VirtualHost was not modified", vhost.path)
# Presumably break here so that the virtualhost is not modified
raise errors.PluginError(
"Unable to find cert and/or key directives")
logger.info("Deploying Certificate to VirtualHost %s", vhost.filep)
# Assign the final directives; order is maintained in find_dir
self.aug.set(path["cert_path"][-1], cert_path)
self.aug.set(path["cert_key"][-1], key_path)
if chain_path is not None:
if not path["chain_path"]:
self.parser.add_dir(
vhost.path, "SSLCertificateChainFile", chain_path)
else:
self.aug.set(path["chain_path"][-1], chain_path)
# Save notes about the transaction that took place
self.save_notes += ("Changed vhost at %s with addresses of %s\n"
"\tSSLCertificateFile %s\n"
"\tSSLCertificateKeyFile %s\n" %
(vhost.filep,
", ".join(str(addr) for addr in vhost.addrs),
cert_path, key_path))
if chain_path is not None:
self.save_notes += "\tSSLCertificateChainFile %s\n" % chain_path
# Make sure vhost is enabled
if not vhost.enabled:
self.enable_site(vhost)
def choose_vhost(self, target_name):
"""Chooses a virtual host based on the given domain name.
If there is no clear virtual host to be selected, the user is prompted
with all available choices.
:param str target_name: domain name
:returns: ssl vhost associated with name
:rtype: :class:`~letsencrypt_apache.obj.VirtualHost`
:raises .errors.PluginError: If no vhost is available or chosen
"""
# Allows for domain names to be associated with a virtual host
if target_name in self.assoc:
return self.assoc[target_name]
# Try to find a reasonable vhost
vhost = self._find_best_vhost(target_name)
if vhost is not None:
if not vhost.ssl:
vhost = self.make_vhost_ssl(vhost)
self.assoc[target_name] = vhost
return vhost
return self._choose_vhost_from_list(target_name)
def _choose_vhost_from_list(self, target_name):
# Select a vhost from a list
vhost = display_ops.select_vhost(target_name, self.vhosts)
if vhost is None:
logger.error(
"No vhost exists with servername or alias of: %s. "
"No vhost was selected. Please specify servernames "
"in the Apache config", target_name)
raise errors.PluginError("No vhost selected")
elif not vhost.ssl:
addrs = self._get_proposed_addrs(vhost, "443")
# TODO: Conflicts is too conservative
if not any(vhost.enabled and vhost.conflicts(addrs) for vhost in self.vhosts):
vhost = self.make_vhost_ssl(vhost)
else:
logger.error(
"The selected vhost would conflict with other HTTPS "
"VirtualHosts within Apache. Please select another "
"vhost or add ServerNames to your configuration.")
raise errors.PluginError(
"VirtualHost not able to be selected.")
self.assoc[target_name] = vhost
return vhost
def _find_best_vhost(self, target_name):
"""Finds the best vhost for a target_name.
This does not upgrade a vhost to HTTPS... it only finds the most
appropriate vhost for the given target_name.
:returns: VHost or None
"""
# Points 4 - Servername SSL
# Points 3 - Address name with SSL
# Points 2 - Servername no SSL
# Points 1 - Address name with no SSL
best_candidate = None
best_points = 0
for vhost in self.vhosts:
if target_name in vhost.get_names():
points = 2
elif any(addr.get_addr() == target_name for addr in vhost.addrs):
points = 1
else:
# No points given if names can't be found.
# This gets hit but doesn't register
continue # pragma: no cover
if vhost.ssl:
points += 2
if points > best_points:
best_points = points
best_candidate = vhost
# No winners here... is there only one reasonable vhost?
if best_candidate is None:
# reasonable == Not all _default_ addrs
reasonable_vhosts = self._non_default_vhosts()
if len(reasonable_vhosts) == 1:
best_candidate = reasonable_vhosts[0]
return best_candidate
def _non_default_vhosts(self):
"""Return all non _default_ only vhosts."""
return [vh for vh in self.vhosts if not all(
addr.get_addr() == "_default_" for addr in vh.addrs
)]
def get_all_names(self):
"""Returns all names found in the Apache Configuration.
:returns: All ServerNames, ServerAliases, and reverse DNS entries for
virtual host addresses
:rtype: set
"""
all_names = set()
for vhost in self.vhosts:
all_names.update(vhost.get_names())
for addr in vhost.addrs:
if common.hostname_regex.match(addr.get_addr()):
all_names.add(addr.get_addr())
else:
name = self.get_name_from_ip(addr)
if name:
all_names.add(name)
return all_names
def get_name_from_ip(self, addr): # pylint: disable=no-self-use
"""Returns a reverse dns name if available.
:param addr: IP Address
:type addr: ~.common.Addr
:returns: name or empty string if name cannot be determined
:rtype: str
"""
# If it isn't a private IP, do a reverse DNS lookup
if not common.private_ips_regex.match(addr.get_addr()):
try:
socket.inet_aton(addr.get_addr())
return socket.gethostbyaddr(addr.get_addr())[0]
except (socket.error, socket.herror, socket.timeout):
pass
return ""
def _add_servernames(self, host):
"""Helper function for get_virtual_hosts().
:param host: In progress vhost whose names will be added
:type host: :class:`~letsencrypt_apache.obj.VirtualHost`
"""
# Take the final ServerName as each overrides the previous
servername_match = self.parser.find_dir(
"ServerName", None, start=host.path, exclude=False)
serveralias_match = self.parser.find_dir(
"ServerAlias", None, start=host.path, exclude=False)
for alias in serveralias_match:
host.aliases.add(self.parser.get_arg(alias))
if servername_match:
# Get last ServerName as each overwrites the previous
host.name = self.parser.get_arg(servername_match[-1])
def _create_vhost(self, path):
"""Used by get_virtual_hosts to create vhost objects
:param str path: Augeas path to virtual host
:returns: newly created vhost
:rtype: :class:`~letsencrypt_apache.obj.VirtualHost`
"""
addrs = set()
args = self.aug.match(path + "/arg")
for arg in args:
addrs.add(obj.Addr.fromstring(self.parser.get_arg(arg)))
is_ssl = False
if self.parser.find_dir("SSLEngine", "on", start=path, exclude=False):
is_ssl = True
filename = get_file_path(path)
is_enabled = self.is_site_enabled(filename)
vhost = obj.VirtualHost(filename, path, addrs, is_ssl, is_enabled)
self._add_servernames(vhost)
return vhost
# TODO: make "sites-available" a configurable directory
def get_virtual_hosts(self):
"""Returns list of virtual hosts found in the Apache configuration.
:returns: List of :class:`~letsencrypt_apache.obj.VirtualHost`
objects found in configuration
:rtype: list
"""
# Search sites-available, httpd.conf for possible virtual hosts
paths = self.aug.match(
("/files%s/sites-available//*[label()=~regexp('%s')]" %
(self.parser.root, parser.case_i("VirtualHost"))))
vhs = []
for path in paths:
vhs.append(self._create_vhost(path))
return vhs
def is_name_vhost(self, target_addr):
"""Returns if vhost is a name based vhost
NameVirtualHost was deprecated in Apache 2.4 as all VirtualHosts are
now NameVirtualHosts. If version is earlier than 2.4, check if addr
has a NameVirtualHost directive in the Apache config
:param letsencrypt_apache.obj.Addr target_addr: vhost address
:returns: Success
:rtype: bool
"""
# Mixed and matched wildcard NameVirtualHost with VirtualHost
# behavior is undefined. Make sure that an exact match exists
# search for NameVirtualHost directive for ip_addr
# note ip_addr can be FQDN although Apache does not recommend it
return (self.version >= (2, 4) or
self.parser.find_dir("NameVirtualHost", str(target_addr)))
def add_name_vhost(self, addr):
"""Adds NameVirtualHost directive for given address.
:param addr: Address that will be added as NameVirtualHost directive
:type addr: :class:`~letsencrypt_apache.obj.Addr`
"""
loc = parser.get_aug_path(self.parser.loc["name"])
if addr.get_port() == "443":
path = self.parser.add_dir_to_ifmodssl(
loc, "NameVirtualHost", [str(addr)])
else:
path = self.parser.add_dir(loc, "NameVirtualHost", [str(addr)])
msg = ("Setting %s to be NameBasedVirtualHost\n"
"\tDirective added to %s\n" % (addr, path))
logger.debug(msg)
self.save_notes += msg
def prepare_server_https(self, port, temp=False):
"""Prepare the server for HTTPS.
Make sure that the ssl_module is loaded and that the server
is appropriately listening on port.
:param str port: Port to listen on
"""
if "ssl_module" not in self.parser.modules:
self.enable_mod("ssl", temp=temp)
# Check for Listen <port>
# Note: This could be made to also look for ip:443 combo
if not self.parser.find_dir("Listen", port):
logger.debug("No Listen %s directive found. Setting the "
"Apache Server to Listen on port %s", port, port)
if port == "443":
args = [port]
else:
# Non-standard ports should specify https protocol
args = [port, "https"]
self.parser.add_dir_to_ifmodssl(
parser.get_aug_path(
self.parser.loc["listen"]), "Listen", args)
self.save_notes += "Added Listen %s directive to %s\n" % (
port, self.parser.loc["listen"])
def make_addrs_sni_ready(self, addrs):
"""Checks to see if the server is ready for SNI challenges.
:param addrs: Addresses to check SNI compatibility
:type addrs: :class:`~letsencrypt_apache.obj.Addr`
"""
# Version 2.4 and later are automatically SNI ready.
if self.version >= (2, 4):
return
for addr in addrs:
if not self.is_name_vhost(addr):
logger.debug("Setting VirtualHost at %s to be a name "
"based virtual host", addr)
self.add_name_vhost(addr)
def make_vhost_ssl(self, nonssl_vhost): # pylint: disable=too-many-locals
"""Makes an ssl_vhost version of a nonssl_vhost.
Duplicates vhost and adds default ssl options
New vhost will reside as (nonssl_vhost.path) +
``letsencrypt_apache.constants.CLI_DEFAULTS["le_vhost_ext"]``
.. note:: This function saves the configuration
:param nonssl_vhost: Valid VH that doesn't have SSLEngine on
:type nonssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:returns: SSL vhost
:rtype: :class:`~letsencrypt_apache.obj.VirtualHost`
:raises .errors.PluginError: If more than one virtual host is in
the file or if plugin is unable to write/read vhost files.
"""
avail_fp = nonssl_vhost.filep
ssl_fp = self._get_ssl_vhost_path(avail_fp)
self._copy_create_ssl_vhost_skeleton(avail_fp, ssl_fp)
# Reload augeas to take into account the new vhost
self.aug.load()
# Get Vhost augeas path for new vhost
vh_p = self.aug.match("/files%s//* [label()=~regexp('%s')]" %
(ssl_fp, parser.case_i("VirtualHost")))
if len(vh_p) != 1:
logger.error("Error: should only be one vhost in %s", avail_fp)
raise errors.PluginError("Only one vhost per file is allowed")
else:
# This simplifies the process
vh_p = vh_p[0]
# Update Addresses
self._update_ssl_vhosts_addrs(vh_p)
# Add directives
self._add_dummy_ssl_directives(vh_p)
# Log actions and create save notes
logger.info("Created an SSL vhost at %s", ssl_fp)
self.save_notes += "Created ssl vhost at %s\n" % ssl_fp
self.save()
# We know the length is one because of the assertion above
# Create the Vhost object
ssl_vhost = self._create_vhost(vh_p)
self.vhosts.append(ssl_vhost)
# NOTE: Searches through Augeas seem to ruin changes to directives
# The configuration must also be saved before being searched
# for the new directives; For these reasons... this is tacked
# on after fully creating the new vhost
# Now check if addresses need to be added as NameBasedVhost addrs
# This is for compliance with versions of Apache < 2.4
self._add_name_vhost_if_necessary(ssl_vhost)
return ssl_vhost
def _get_ssl_vhost_path(self, non_ssl_vh_fp):
# Get filepath of new ssl_vhost
if non_ssl_vh_fp.endswith(".conf"):
return non_ssl_vh_fp[:-(len(".conf"))] + self.conf("le_vhost_ext")
else:
return non_ssl_vh_fp + self.conf("le_vhost_ext")
def _copy_create_ssl_vhost_skeleton(self, avail_fp, ssl_fp):
"""Copies over existing Vhost with IfModule mod_ssl.c> skeleton.
:param str avail_fp: Pointer to the original available non-ssl vhost
:param str ssl_fp: Full path where the new ssl_vhost will reside.
A new file is created on the filesystem.
"""
# First register the creation so that it is properly removed if
# configuration is rolled back
self.reverter.register_file_creation(False, ssl_fp)
try:
with open(avail_fp, "r") as orig_file:
with open(ssl_fp, "w") as new_file:
new_file.write("<IfModule mod_ssl.c>\n")
for line in orig_file:
new_file.write(line)
new_file.write("</IfModule>\n")
except IOError:
logger.fatal("Error writing/reading to file in make_vhost_ssl")
raise errors.PluginError("Unable to write/read in make_vhost_ssl")
def _update_ssl_vhosts_addrs(self, vh_path):
ssl_addrs = set()
ssl_addr_p = self.aug.match(vh_path + "/arg")
for addr in ssl_addr_p:
old_addr = obj.Addr.fromstring(
str(self.parser.get_arg(addr)))
ssl_addr = old_addr.get_addr_obj("443")
self.aug.set(addr, str(ssl_addr))
ssl_addrs.add(ssl_addr)
return ssl_addrs
def _add_dummy_ssl_directives(self, vh_path):
self.parser.add_dir(vh_path, "SSLCertificateFile",
"insert_cert_file_path")
self.parser.add_dir(vh_path, "SSLCertificateKeyFile",
"insert_key_file_path")
self.parser.add_dir(vh_path, "Include", self.mod_ssl_conf)
def _add_name_vhost_if_necessary(self, vhost):
"""Add NameVirtualHost Directives if necessary for new vhost.
NameVirtualHosts was a directive in Apache < 2.4
https://httpd.apache.org/docs/2.2/mod/core.html#namevirtualhost
:param vhost: New virtual host that was recently created.
:type vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
"""
need_to_save = False
# See if the exact address appears in any other vhost
# Remember 1.1.1.1:* == 1.1.1.1 -> hence any()
for addr in vhost.addrs:
for test_vh in self.vhosts:
if (vhost.filep != test_vh.filep and
any(test_addr == addr for test_addr in test_vh.addrs) and
not self.is_name_vhost(addr)):
self.add_name_vhost(addr)
logger.info("Enabling NameVirtualHosts on %s", addr)
need_to_save = True
if need_to_save:
self.save()
############################################################################
# Enhancements
############################################################################
def supported_enhancements(self): # pylint: disable=no-self-use
"""Returns currently supported enhancements."""
return ["redirect"]
def enhance(self, domain, enhancement, options=None):
"""Enhance configuration.
:param str domain: domain to enhance
:param str enhancement: enhancement type defined in
:const:`~letsencrypt.constants.ENHANCEMENTS`
:param options: options for the enhancement
See :const:`~letsencrypt.constants.ENHANCEMENTS`
documentation for appropriate parameter.
:raises .errors.PluginError: If Enhancement is not supported, or if
there is any other problem with the enhancement.
"""
try:
func = self._enhance_func[enhancement]
except KeyError:
raise errors.PluginError(
"Unsupported enhancement: {0}".format(enhancement))
try:
func(self.choose_vhost(domain), options)
except errors.PluginError:
logger.warn("Failed %s for %s", enhancement, domain)
raise
def _enable_redirect(self, ssl_vhost, unused_options):
"""Redirect all equivalent HTTP traffic to ssl_vhost.
.. todo:: This enhancement should be rewritten and will
unfortunately require lots of debugging by hand.
Adds Redirect directive to the port 80 equivalent of ssl_vhost
First the function attempts to find the vhost with equivalent
ip addresses that serves on non-ssl ports
The function then adds the directive
.. note:: This function saves the configuration
:param ssl_vhost: Destination of traffic, an ssl enabled vhost
:type ssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:param unused_options: Not currently used
:type unused_options: Not Available
:returns: Success, general_vhost (HTTP vhost)
:rtype: (bool, :class:`~letsencrypt_apache.obj.VirtualHost`)
:raises .errors.PluginError: If no viable HTTP host can be created or
used for the redirect.
"""
if "rewrite_module" not in self.parser.modules:
self.enable_mod("rewrite")
general_vh = self._get_http_vhost(ssl_vhost)
if general_vh is None:
# Add virtual_server with redirect
logger.debug("Did not find http version of ssl virtual host "
"attempting to create")
redirect_addrs = self._get_proposed_addrs(ssl_vhost)
for vhost in self.vhosts:
if vhost.enabled and vhost.conflicts(redirect_addrs):
raise errors.PluginError(
"Unable to find corresponding HTTP vhost; "
"Unable to create one as intended addresses conflict; "
"Current configuration does not support automated "
"redirection")
self._create_redirect_vhost(ssl_vhost)
else:
# Check if redirection already exists
self._verify_no_redirects(general_vh)
# Add directives to server
# Note: These are not immediately searchable in sites-enabled
# even with save() and load()
self.parser.add_dir(general_vh.path, "RewriteEngine", "on")
self.parser.add_dir(general_vh.path, "RewriteRule",
constants.REWRITE_HTTPS_ARGS)
self.save_notes += ("Redirecting host in %s to ssl vhost in %s\n" %
(general_vh.filep, ssl_vhost.filep))
self.save()
logger.info("Redirecting vhost in %s to ssl vhost in %s",
general_vh.filep, ssl_vhost.filep)
def _verify_no_redirects(self, vhost):
"""Checks to see if existing redirect is in place.
Checks to see if virtualhost already contains a rewrite or redirect
returns boolean, integer
:param vhost: vhost to check
:type vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:raises errors.PluginError: When another redirection exists
"""
rewrite_path = self.parser.find_dir(
"RewriteRule", None, start=vhost.path)
redirect_path = self.parser.find_dir("Redirect", None, start=vhost.path)
if redirect_path:
# "Existing Redirect directive for virtualhost"
raise errors.PluginError("Existing Redirect present on HTTP vhost.")
if rewrite_path:
# "No existing redirection for virtualhost"
if len(rewrite_path) != len(constants.REWRITE_HTTPS_ARGS):
raise errors.PluginError("Unknown Existing RewriteRule")
for match, arg in itertools.izip(
rewrite_path, constants.REWRITE_HTTPS_ARGS):
if self.aug.get(match) != arg:
raise errors.PluginError("Unknown Existing RewriteRule")
raise errors.PluginError(
"Let's Encrypt has already enabled redirection")
def _create_redirect_vhost(self, ssl_vhost):
"""Creates an http_vhost specifically to redirect for the ssl_vhost.
:param ssl_vhost: ssl vhost
:type ssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:returns: tuple of the form
(`success`, :class:`~letsencrypt_apache.obj.VirtualHost`)
:rtype: tuple
"""
text = self._get_redirect_config_str(ssl_vhost)
redirect_filepath = self._write_out_redirect(ssl_vhost, text)
self.aug.load()
# Make a new vhost data structure and add it to the lists
new_vhost = self._create_vhost(parser.get_aug_path(redirect_filepath))
self.vhosts.append(new_vhost)
# Finally create documentation for the change
self.save_notes += ("Created a port 80 vhost, %s, for redirection to "
"ssl vhost %s\n" %
(new_vhost.filep, ssl_vhost.filep))
def _get_redirect_config_str(self, ssl_vhost):
# get servernames and serveraliases
serveralias = ""
servername = ""
if ssl_vhost.name is not None:
servername = "ServerName " + ssl_vhost.name
if ssl_vhost.aliases:
serveralias = "ServerAlias " + " ".join(ssl_vhost.aliases)
return ("<VirtualHost %s>\n"
"%s \n"
"%s \n"
"ServerSignature Off\n"
"\n"
"RewriteEngine On\n"
"RewriteRule %s\n"
"\n"
"ErrorLog /var/log/apache2/redirect.error.log\n"
"LogLevel warn\n"
"</VirtualHost>\n"
% (" ".join(str(addr) for addr in self._get_proposed_addrs(ssl_vhost)),
servername, serveralias,
" ".join(constants.REWRITE_HTTPS_ARGS)))
def _write_out_redirect(self, ssl_vhost, text):
# This is the default name
redirect_filename = "le-redirect.conf"
# See if a more appropriate name can be applied
if ssl_vhost.name is not None:
# make sure servername doesn't exceed filename length restriction
if len(ssl_vhost.name) < (255 - (len(redirect_filename) + 1)):
redirect_filename = "le-redirect-%s.conf" % ssl_vhost.name
redirect_filepath = os.path.join(
self.parser.root, "sites-available", redirect_filename)
# Register the new file that will be created
# Note: always register the creation before writing to ensure file will
# be removed in case of unexpected program exit
self.reverter.register_file_creation(False, redirect_filepath)
# Write out file
with open(redirect_filepath, "w") as redirect_file:
redirect_file.write(text)
logger.info("Created redirect file: %s", redirect_filename)
return redirect_filepath
def _get_http_vhost(self, ssl_vhost):
"""Find appropriate HTTP vhost for ssl_vhost."""
# First candidate vhosts filter
candidate_http_vhs = [
vhost for vhost in self.vhosts if not vhost.ssl
]
# Second filter - check addresses
for http_vh in candidate_http_vhs:
if http_vh.same_server(ssl_vhost):
return http_vh
return None
def _get_proposed_addrs(self, vhost, port="80"): # pylint: disable=no-self-use
"""Return all addrs of vhost with the port replaced with the specified.
:param obj.VirtualHost ssl_vhost: Original Vhost
:param str port: Desired port for new addresses
:returns: `set` of :class:`~obj.Addr`
"""
redirects = set()
for addr in vhost.addrs:
redirects.add(addr.get_addr_obj(port))
return redirects
def get_all_certs_keys(self):
"""Find all existing keys, certs from configuration.
Retrieve all certs and keys set in VirtualHosts on the Apache server
:returns: list of tuples with form [(cert, key, path)]
cert - str path to certificate file
key - str path to associated key file
path - File path to configuration file.
:rtype: list
"""
c_k = set()
for vhost in self.vhosts:
if vhost.ssl:
cert_path = self.parser.find_dir(
"SSLCertificateFile", None,
start=vhost.path, exclude=False)
key_path = self.parser.find_dir(
"SSLCertificateKeyFile", None,
start=vhost.path, exclude=False)
if cert_path and key_path:
cert = os.path.abspath(self.parser.get_arg(cert_path[-1]))
key = os.path.abspath(self.parser.get_arg(key_path[-1]))
c_k.add((cert, key, get_file_path(cert_path[-1])))
else:
logger.warning(
"Invalid VirtualHost configuration - %s", vhost.filep)
return c_k
def is_site_enabled(self, avail_fp):
"""Checks to see if the given site is enabled.
.. todo:: fix hardcoded sites-enabled, check os.path.samefile
:param str avail_fp: Complete file path of available site
:returns: Success
:rtype: bool
"""
enabled_dir = os.path.join(self.parser.root, "sites-enabled")
for entry in os.listdir(enabled_dir):
if os.path.realpath(os.path.join(enabled_dir, entry)) == avail_fp:
return True
return False
def enable_site(self, vhost):
"""Enables an available site, Apache restart required.
.. note:: Does not make sure that the site correctly works or that all
modules are enabled appropriately.
.. todo:: This function should number subdomains before the domain vhost
.. todo:: Make sure link is not broken...
:param vhost: vhost to enable
:type vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:raises .errors.NotSupportedError: If filesystem layout is not
supported.
"""
if self.is_site_enabled(vhost.filep):
return
if "/sites-available/" in vhost.filep:
enabled_path = ("%s/sites-enabled/%s" %
(self.parser.root, os.path.basename(vhost.filep)))
self.reverter.register_file_creation(False, enabled_path)
os.symlink(vhost.filep, enabled_path)
vhost.enabled = True
logger.info("Enabling available site: %s", vhost.filep)
self.save_notes += "Enabled site %s\n" % vhost.filep
else:
raise errors.NotSupportedError(
"Unsupported filesystem layout. "
"sites-available/enabled expected.")
def enable_mod(self, mod_name, temp=False):
"""Enables module in Apache.
Both enables and restarts Apache so module is active.
:param str mod_name: Name of the module to enable. (e.g. 'ssl')
:param bool temp: Whether or not this is a temporary action.
:raises .errors.NotSupportedError: If the filesystem layout is not
supported.
:raises .errors.MisconfigurationError: If a2enmod or a2dismod cannot be
run.
"""
# Support Debian specific setup
avail_path = os.path.join(self.parser.root, "mods-available")
enabled_path = os.path.join(self.parser.root, "mods-enabled")
if not os.path.isdir(avail_path) or not os.path.isdir(enabled_path):
raise errors.NotSupportedError(
"Unsupported directory layout. You may try to enable mod %s "
"and try again." % mod_name)
deps = _get_mod_deps(mod_name)
# Enable all dependencies
for dep in deps:
if (dep + "_module") not in self.parser.modules:
self._enable_mod_debian(dep, temp)
self._add_parser_mod(dep)
note = "Enabled dependency of %s module - %s" % (mod_name, dep)
if not temp:
self.save_notes += note + os.linesep
logger.debug(note)
# Enable actual module
self._enable_mod_debian(mod_name, temp)
self._add_parser_mod(mod_name)
if not temp:
self.save_notes += "Enabled %s module in Apache\n" % mod_name
logger.info("Enabled Apache %s module", mod_name)
# Modules can enable additional config files. Variables may be defined
# within these new configuration sections.
# Restart is not necessary as DUMP_RUN_CFG uses latest config.
self.parser.update_runtime_variables(self.conf("ctl"))
def _add_parser_mod(self, mod_name):
"""Shortcut for updating parser modules."""
self.parser.modules.add(mod_name + "_module")
self.parser.modules.add("mod_" + mod_name + ".c")
def _enable_mod_debian(self, mod_name, temp):
"""Assumes mods-available, mods-enabled layout."""
# Generate reversal command.
# Try to be safe here... check that we can probably reverse before
# applying enmod command
if not le_util.exe_exists(self.conf("dismod")):
raise errors.MisconfigurationError(
"Unable to find a2dismod, please make sure a2enmod and "
"a2dismod are configured correctly for letsencrypt.")
self.reverter.register_undo_command(
temp, [self.conf("dismod"), mod_name])
le_util.run_script([self.conf("enmod"), mod_name])
def restart(self):
"""Restarts apache server.
.. todo:: This function will be converted to using reload
:raises .errors.MisconfigurationError: If unable to restart due
to a configuration problem, or if the restart subprocess
cannot be run.
"""
return apache_restart(self.conf("init-script"))
def config_test(self): # pylint: disable=no-self-use
"""Check the configuration of Apache for errors.
:raises .errors.MisconfigurationError: If config_test fails
"""
try:
le_util.run_script([self.conf("ctl"), "configtest"])
except errors.SubprocessError as err:
raise errors.MisconfigurationError(str(err))
def get_version(self):
"""Return version of Apache Server.
Version is returned as tuple. (ie. 2.4.7 = (2, 4, 7))
:returns: version
:rtype: tuple
:raises .PluginError: if unable to find Apache version
"""
try:
stdout, _ = le_util.run_script([self.conf("ctl"), "-v"])
except errors.SubprocessError:
raise errors.PluginError(
"Unable to run %s -v" % self.conf("ctl"))
regex = re.compile(r"Apache/([0-9\.]*)", re.IGNORECASE)
matches = regex.findall(stdout)
if len(matches) != 1:
raise errors.PluginError("Unable to find Apache version")
return tuple([int(i) for i in matches[0].split(".")])
def more_info(self):
"""Human-readable string to help understand the module"""
return (
"Configures Apache to authenticate and install HTTPS.{0}"
"Server root: {root}{0}"
"Version: {version}".format(
os.linesep, root=self.parser.loc["root"],
version=".".join(str(i) for i in self.version))
)
###########################################################################
# Challenges Section
###########################################################################
def get_chall_pref(self, unused_domain): # pylint: disable=no-self-use
"""Return list of challenge preferences."""
return [challenges.DVSNI]
def perform(self, achalls):
"""Perform the configuration related challenge.
This function currently assumes all challenges will be fulfilled.
If this turns out not to be the case in the future. Cleanup and
outstanding challenges will have to be designed better.
"""
self._chall_out.update(achalls)
responses = [None] * len(achalls)
apache_dvsni = dvsni.ApacheDvsni(self)
for i, achall in enumerate(achalls):
if isinstance(achall, achallenges.DVSNI):
# Currently also have dvsni hold associated index
# of the challenge. This helps to put all of the responses back
# together when they are all complete.
apache_dvsni.add_chall(achall, i)
sni_response = apache_dvsni.perform()
if sni_response:
# Must restart in order to activate the challenges.
# Handled here because we may be able to load up other challenge
# types
self.restart()
# Go through all of the challenges and assign them to the proper
# place in the responses return value. All responses must be in the
# same order as the original challenges.
for i, resp in enumerate(sni_response):
responses[apache_dvsni.indices[i]] = resp
return responses
def cleanup(self, achalls):
"""Revert all challenges."""
self._chall_out.difference_update(achalls)
# If all of the challenges have been finished, clean up everything
if not self._chall_out:
self.revert_challenge_config()
self.restart()
self.parser.init_modules()
def _get_mod_deps(mod_name):
"""Get known module dependencies.
.. note:: This does not need to be accurate in order for the client to
run. This simply keeps things clean if the user decides to revert
changes.
.. warning:: If all deps are not included, it may cause incorrect parsing
behavior, due to enable_mod's shortcut for updating the parser's
currently defined modules (`.ApacheConfigurator._add_parser_mod`)
This would only present a major problem in extremely atypical
configs that use ifmod for the missing deps.
"""
deps = {
"ssl": ["setenvif", "mime", "socache_shmcb"]
}
return deps.get(mod_name, [])
def apache_restart(apache_init_script):
"""Restarts the Apache Server.
:param str apache_init_script: Path to the Apache init script.
.. todo:: Try to use reload instead. (This caused timing problems before)
.. todo:: On failure, this should be a recovery_routine call with another
restart. This will confuse and inhibit developers from testing code
though. This change should happen after
the ApacheConfigurator has been thoroughly tested. The function will
need to be moved into the class again. Perhaps
this version can live on... for testing purposes.
:raises .errors.MisconfigurationError: If unable to restart due to a
configuration problem, or if the restart subprocess cannot be run.
"""
try:
proc = subprocess.Popen([apache_init_script, "restart"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except (OSError, ValueError):
logger.fatal(
"Unable to restart the Apache process with %s", apache_init_script)
raise errors.MisconfigurationError(
"Unable to restart Apache process with %s" % apache_init_script)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
# Enter recovery routine...
logger.error("Apache Restart Failed!\n%s\n%s", stdout, stderr)
raise errors.MisconfigurationError(
"Error while restarting Apache:\n%s\n%s" % (stdout, stderr))
def get_file_path(vhost_path):
"""Get file path from augeas_vhost_path.
Takes in Augeas path and returns the file name
:param str vhost_path: Augeas virtual host path
:returns: filename of vhost
:rtype: str
"""
# Strip off /files
avail_fp = vhost_path[6:]
# This can be optimized...
while True:
# Cast both to lowercase to be case insensitive
find_if = avail_fp.lower().find("/ifmodule")
if find_if != -1:
avail_fp = avail_fp[:find_if]
continue
find_vh = avail_fp.lower().find("/virtualhost")
if find_vh != -1:
avail_fp = avail_fp[:find_vh]
continue
break
return avail_fp
def temp_install(options_ssl):
"""Temporary install for convenience."""
# WARNING: THIS IS A POTENTIAL SECURITY VULNERABILITY
# THIS SHOULD BE HANDLED BY THE PACKAGE MANAGER
# AND TAKEN OUT BEFORE RELEASE, INSTEAD
# SHOWING A NICE ERROR MESSAGE ABOUT THE PROBLEM.
# Check to make sure options-ssl.conf is installed
if not os.path.isfile(options_ssl):
shutil.copyfile(constants.MOD_SSL_CONF_SRC, options_ssl)
| |
#!/usr/bin/env python
# encoding: utf-8
"""
PyRoc.py
Created by Marcel Caraciolo on 2009-11-16.
Copyright (c) 2009 Federal University of Pernambuco. All rights reserved.
IMPORTANT:
Based on the original code by Eithon Cadag (http://www.eithoncadag.com/files/pyroc.txt)
Python Module for calculating the area under the receive operating characteristic curve, given a dataset.
0.1 - First Release
0.2 - Updated the code by adding new metrics for analysis with the confusion matrix.
"""
from __future__ import print_function
import random
import math
try:
import pylab
except:
print("error: can't import pylab module, you must install the module:\n")
print(" matplotlib to plot charts!'\n")
def random_mixture_model(pos_mu=.6,pos_sigma=.1,neg_mu=.4,neg_sigma=.1,size=200):
pos = [(1,random.gauss(pos_mu,pos_sigma),) for x in list(range(int(size/2)))]
neg = [(0,random.gauss(neg_mu,neg_sigma),) for x in list(range(int(size/2)))]
return pos+neg
def plot_multiple_rocs_separate(rocList,title='', labels = None, equal_aspect = True):
""" Plot multiples ROC curves as separate at the same painting area. """
pylab.clf()
pylab.title(title)
for ix, r in enumerate(rocList):
ax = pylab.subplot(4,4,ix+1)
pylab.ylim((0,1))
pylab.xlim((0,1))
ax.set_yticklabels([])
ax.set_xticklabels([])
if equal_aspect:
cax = pylab.gca()
cax.set_aspect('equal')
if not labels:
labels = ['' for x in rocList]
pylab.text(0.2,0.1,labels[ix],fontsize=8)
pylab.plot([x[0] for x in r.derived_points],[y[1] for y in r.derived_points], 'r-',linewidth=2)
pylab.show()
def _remove_duplicate_styles(rocList):
""" Checks for duplicate linestyles and replaces duplicates with a random one."""
pref_styles = ['cx-','mx-','yx-','gx-','bx-','rx-']
points = 'ov^>+xd'
colors = 'bgrcmy'
lines = ['-','-.',':']
rand_ls = []
for r in rocList:
if r.linestyle not in rand_ls:
rand_ls.append(r.linestyle)
else:
while True:
if len(pref_styles) > 0:
pstyle = pref_styles.pop()
if pstyle not in rand_ls:
r.linestyle = pstyle
rand_ls.append(pstyle)
break
else:
ls = ''.join(random.sample(colors,1) + random.sample(points,1)+ random.sample(lines,1))
if ls not in rand_ls:
r.linestyle = ls
rand_ls.append(ls)
break
def plot_multiple_roc(rocList,title='',labels=None, include_baseline=False, equal_aspect=True):
""" Plots multiple ROC curves on the same chart.
Parameters:
rocList: the list of ROCData objects
title: The tile of the chart
labels: The labels of each ROC curve
include_baseline: if it's True include the random baseline
equal_aspect: keep equal aspect for all roc curves
"""
pylab.clf()
pylab.ylim((0,1))
pylab.xlim((0,1))
pylab.xticks(pylab.arange(0,1.1,.1))
pylab.yticks(pylab.arange(0,1.1,.1))
pylab.grid(True)
if equal_aspect:
cax = pylab.gca()
cax.set_aspect('equal')
pylab.xlabel("1 - Specificity")
pylab.ylabel("Sensitivity")
pylab.title(title)
if not labels:
labels = [ '' for x in rocList]
_remove_duplicate_styles(rocList)
for ix, r in enumerate(rocList):
pylab.plot([x[0] for x in r.derived_points], [y[1] for y in r.derived_points], r.linestyle, linewidth=1, label=labels[ix])
if include_baseline:
pylab.plot([0.0,1.0], [0.0, 1.0], 'k-', label= 'random')
if labels:
pylab.legend(loc='lower right')
pylab.show()
def load_decision_function(path):
""" Function to load the decision function (DataSet)
Parameters:
path: The dataset file path
Return:
model_data: The data modeled
"""
fileHandler = open(path,'r')
reader = fileHandler.readlines()
reader = [line.strip().split() for line in reader]
model_data = []
for line in reader:
if len(line) == 0: continue
fClass,fValue = line
model_data.append((int(fClass), float(fValue)))
fileHandler.close()
return model_data
class ROCData(object):
""" Class that generates an ROC Curve for the data.
Data is in the following format: a list l of tutples t
where:
t[0] = 1 for positive class and t[0] = 0 for negative class
t[1] = score
t[2] = label
"""
def __init__(self,data,linestyle='rx-'):
""" Constructor takes the data and the line style for plotting the ROC Curve.
Parameters:
data: The data a listl of tuples t (l = [t_0,t_1,...t_n]) where:
t[0] = 1 for positive class and 0 for negative class
t[1] = a score
t[2] = any label (optional)
lineStyle: THe matplotlib style string for plots.
Note: The ROCData is still usable w/o matplotlib. The AUC is still available,
but plots cannot be generated.
"""
self.data = sorted(data, lambda x,y: cmp(y[1], x[1]))
self.linestyle = linestyle
self.auc() #Seed initial points with default full ROC
def auc(self,fpnum=0):
""" Uses the trapezoidal ruel to calculate the area under the curve. If fpnum is supplied, it will
calculate a partial AUC, up to the number of false positives in fpnum (the partial AUC is scaled
to between 0 and 1).
It assumes that the positive class is expected to have the higher of the scores (s(+) < s(-))
Parameters:
fpnum: The cumulativr FP count (fps)
Return:
"""
fps_count = 0
relevant_pauc = []
current_index = 0
max_n = len([x for x in self.data if x[0] == 0])
if fpnum == 0:
relevant_pauc = [x for x in self.data]
elif fpnum > max_n:
fpnum = max_n
#Find the upper limit of the data that does not exceed n FPs
else:
while fps_count < fpnum:
relevant_pauc.append(self.data[current_index])
if self.data[current_index][0] == 0:
fps_count += 1
current_index +=1
total_n = len([x for x in relevant_pauc if x[0] == 0])
total_p = len(relevant_pauc) - total_n
#Convert to points in a ROC
previous_df = -1000000.0
current_index = 0
points = []
tp_count, fp_count = 0.0 , 0.0
tpr, fpr = 0, 0
while current_index < len(relevant_pauc):
df = relevant_pauc[current_index][1]
if previous_df != df:
points.append((fpr,tpr,fp_count))
if relevant_pauc[current_index][0] == 0:
fp_count +=1
elif relevant_pauc[current_index][0] == 1:
tp_count +=1
fpr = fp_count/total_n
tpr = tp_count/total_p
previous_df = df
current_index +=1
points.append((fpr,tpr,fp_count)) #Add last point
points.sort(key=lambda i: (i[0],i[1]))
self.derived_points = points
return self._trapezoidal_rule(points)
def _trapezoidal_rule(self,curve_pts):
""" Method to calculate the area under the ROC curve"""
cum_area = 0.0
for ix,x in enumerate(curve_pts[0:-1]):
cur_pt = x
next_pt = curve_pts[ix+1]
cum_area += ((cur_pt[1]+next_pt[1])/2.0) * (next_pt[0]-cur_pt[0])
return cum_area
def calculateStandardError(self,fpnum=0):
""" Returns the standard error associated with the curve.
Parameters:
fpnum: The cumulativr FP count (fps)
Return:
the standard error.
"""
area = self.auc(fpnum)
#real positive cases
Na = len([ x for x in self.data if x[0] == 1])
#real negative cases
Nn = len([ x for x in self.data if x[0] == 0])
Q1 = area / (2.0 - area)
Q2 = 2 * area * area / (1.0 + area)
return math.sqrt( ( area * (1.0 - area) + (Na - 1.0) * (Q1 - area*area) +
(Nn - 1.0) * (Q2 - area * area)) / (Na * Nn))
def plot(self,title='',include_baseline=False,equal_aspect=True):
""" Method that generates a plot of the ROC curve
Parameters:
title: Title of the chart
include_baseline: Add the baseline plot line if it's True
equal_aspect: Aspects to be equal for all plot
"""
pylab.clf()
pylab.plot([x[0] for x in self.derived_points], [y[1] for y in self.derived_points], self.linestyle)
if include_baseline:
pylab.plot([0.0,1.0], [0.0,1.0],'k-.')
pylab.ylim((0,1))
pylab.xlim((0,1))
pylab.xticks(pylab.arange(0,1.1,.1))
pylab.yticks(pylab.arange(0,1.1,.1))
pylab.grid(True)
if equal_aspect:
cax = pylab.gca()
cax.set_aspect('equal')
pylab.xlabel('1 - Specificity')
pylab.ylabel('Sensitivity')
pylab.title(title)
pylab.show()
def confusion_matrix(self,threshold,do_print=False):
""" Returns the confusion matrix (in dictionary form) for a fiven threshold
where all elements > threshold are considered 1 , all else 0.
Parameters:
threshold: threshold to check the decision function
do_print: if it's True show the confusion matrix in the screen
Return:
the dictionary with the TP, FP, FN, TN
"""
pos_points = [x for x in self.data if x[1] >= threshold]
neg_points = [x for x in self.data if x[1] < threshold]
tp,fp,fn,tn = self._calculate_counts(pos_points,neg_points)
if do_print:
print(" Actual class")
print(" +(1) -(0)")
print("+(1) %i %i Predicted" % (tp,fp))
print("-(0) %i %i class" % (fn,tn))
return {'TP': tp, 'FP': fp, 'FN': fn, 'TN': tn}
def evaluateMetrics(self,matrix,metric=None,do_print=False):
""" Returns the metrics evaluated from the confusion matrix.
Parameters:
matrix: the confusion matrix
metric: the specific metric of the default value is None (all metrics).
do_print: if it's True show the metrics in the screen
Return:
the dictionary with the Accuracy, Sensitivity, Specificity,Efficiency,
PositivePredictiveValue, NegativePredictiveValue, PhiCoefficient
"""
accuracy = (matrix['TP'] + matrix['TN'])/ float(sum(matrix.values()))
sensitivity = (matrix['TP'])/ float(matrix['TP'] + matrix['FN'])
specificity = (matrix['TN'])/float(matrix['TN'] + matrix['FP'])
efficiency = (sensitivity + specificity) / 2.0
positivePredictiveValue = matrix['TP'] / float(matrix['TP'] + matrix['FP'])
NegativePredictiveValue = matrix['TN'] / float(matrix['TN'] + matrix['FN'])
PhiCoefficient = (matrix['TP'] * matrix['TN'] - matrix['FP'] * matrix['FN'])/(
math.sqrt( (matrix['TP'] + matrix['FP']) *
(matrix['TP'] + matrix['FN']) *
(matrix['TN'] + matrix['FP']) *
(matrix['TN'] + matrix['FN']))) or 1.0
if do_print:
print('Sensitivity: ' , sensitivity)
print('Specificity: ' , specificity)
print('Efficiency: ' , efficiency)
print('Accuracy: ' , accuracy)
print('PositivePredictiveValue: ' , positivePredictiveValue)
print('NegativePredictiveValue' , NegativePredictiveValue)
print('PhiCoefficient' , PhiCoefficient)
return {'SENS': sensitivity, 'SPEC': specificity, 'ACC': accuracy, 'EFF': efficiency,
'PPV':positivePredictiveValue, 'NPV':NegativePredictiveValue , 'PHI': PhiCoefficient}
def _calculate_counts(self,pos_data,neg_data):
""" Calculates the number of false positives, true positives, false negatives and true negatives """
tp_count = len([x for x in pos_data if x[0] == 1])
fp_count = len([x for x in pos_data if x[0] == 0])
fn_count = len([x for x in neg_data if x[0] == 1])
tn_count = len([x for x in neg_data if x[0] == 0])
return tp_count,fp_count,fn_count, tn_count
if __name__ == '__main__':
print("PyRoC - ROC Curve Generator")
print("By Marcel Pinheiro Caraciolo (@marcelcaraciolo)")
print("http://aimotion.bogspot.com\n")
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-f', '--file', dest='origFile', help="Path to a file with the class and decision function. The first column of each row is the class, and the second the decision score.")
parser.add_option("-n", "--max fp", dest = "fp_n", default=0, help= "Maximum false positives to calculate up to (for partial AUC).")
parser.add_option("-p","--plot", action="store_true",dest='plotFlag', default=False, help="Plot the ROC curve (matplotlib required)")
parser.add_option("-t",'--title', dest= 'ptitle' , default='' , help = 'Title of plot.')
(options,args) = parser.parse_args()
if (not options.origFile):
parser.print_help()
exit()
df_data = load_decision_function(options.origFile)
roc = ROCData(df_data)
roc_n = int(options.fp_n)
print("ROC AUC: %s" % (str(roc.auc(roc_n)),))
print('Standard Error: %s' % (str(roc.calculateStandardError(roc_n)),))
print('')
for pt in roc.derived_points:
print(pt[0],pt[1])
if options.plotFlag:
roc.plot(options.ptitle,True,True)
| |
import httplib, urllib2
from datetime import datetime
from decimal import Decimal
from xml.etree.ElementTree import fromstring
from base64 import b64encode
API_VERSION = 'v4'
class Client:
def __init__(self, token, site_name):
self.auth = b64encode('%s:x' % token)
self.base_host = 'subs.pinpayments.com'
self.base_path = '/api/%s/%s' % (API_VERSION, site_name)
self.base_url = 'https://%s%s' % (self.base_host, self.base_path)
self.url = None
def get_response(self):
return self.response
def get_url(self):
return self.url
def set_url(self, url):
self.url = '%s/%s' % (self.base_url, url)
def query(self, data=None):
req = urllib2.Request(url=self.get_url())
req.add_header('User-agent', 'python-spreedly 1.0')
req.add_header('Authorization', 'Basic %s' % self.auth)
# Convert to POST if we got some data
if data:
req.add_header('Content-Type', 'application/xml')
req.add_data(data)
f = urllib2.urlopen(req)
self.response = f.read()
def get_plans(self):
self.set_url('subscription_plans.xml')
self.query()
# Parse
result = []
tree = fromstring(self.get_response())
for plan in tree.getiterator('subscription-plan'):
data = {
'name': plan.findtext('name'),
'description': plan.findtext('description'),
'terms': plan.findtext('terms'),
'plan_type': plan.findtext('plan-type'),
'price': Decimal(plan.findtext('price')),
'enabled': True if plan.findtext('enabled') == 'true' else False,
'force_recurring': \
True if plan.findtext('force-recurring') == 'true' else False,
'force_renew': \
True if plan.findtext('needs-to-be-renewed') == 'true' else False,
'duration': int(plan.findtext('duration-quantity')),
'duration_units': plan.findtext('duration-units'),
'feature_level': plan.findtext('feature-level'),
'return_url': plan.findtext('return-url'),
'version': int(plan.findtext('version')) \
if plan.findtext('version') else 0,
'speedly_id': int(plan.findtext('id')),
'speedly_site_id': int(plan.findtext('site-id')) \
if plan.findtext('site-id') else 0,
'created_at': datetime.strptime(
plan.findtext('created-at'), '%Y-%m-%dT%H:%M:%SZ'
),
'date_changed': datetime.strptime(
plan.findtext('updated-at'), '%Y-%m-%dT%H:%M:%SZ'
),
'currency_code': plan.findtext('currency-code'),
}
result.append(data)
return result
def create_subscriber(self, customer_id, screen_name):
'''
Creates a subscription
'''
data = '''
<subscriber>
<customer-id>%d</customer-id>
<screen-name>%s</screen-name>
</subscriber>
''' % (customer_id, screen_name)
self.set_url('subscribers.xml')
self.query(data)
# Parse
result = []
tree = fromstring(self.get_response())
for plan in tree.getiterator('subscriber'):
data = {
'customer_id': int(plan.findtext('customer-id')),
'first_name': plan.findtext('billing-first-name'),
'last_name': plan.findtext('billing-last-name'),
'active': True if plan.findtext('active') == 'true' else False,
'trial_active': \
True if plan.findtext('on-trial') == 'true' else False,
'trial_elegible': \
True if plan.findtext('eligible-for-free-trial') == 'true' \
else False,
'lifetime': \
True if plan.findtext('lifetime-subscription') == 'true' \
else False,
'recurring': \
True if plan.findtext('recurring') == 'true' \
else False,
'card_expires_before_next_auto_renew': \
True if plan.findtext('card-expires-before-next-auto-renew') == 'true' \
else False,
'token': plan.findtext('token'),
'name': plan.findtext('subscription-plan-name'),
'feature_level': plan.findtext('feature-level'),
'store_credit': Decimal(plan.findtext('store-credit')),
'created_at': datetime.strptime(
plan.findtext('created-at'), '%Y-%m-%dT%H:%M:%SZ'
),
'date_changed': datetime.strptime(
plan.findtext('updated-at'), '%Y-%m-%dT%H:%M:%SZ'
),
'active_until': datetime.strptime(
plan.findtext('active-until'), '%Y-%m-%dT%H:%M:%SZ'
) if plan.findtext('active-until') else None,
'on_trial': True if plan.findtext('on-trial') == 'true' \
else False,
}
result.append(data)
return result[0]
def delete_subscriber(self, id):
if 'test' in self.base_path:
headers = {'Authorization': 'Basic %s' % self.auth}
conn = httplib.HTTPSConnection(self.base_host)
conn.request(
'DELETE', '%s/subscribers/%d.xml' % (self.base_path, id),
'',
headers
)
response = conn.getresponse()
return response.status
return
def change_subscription(self, subscriber_id, plan_id):
'''
Subscribe a user to some plan
'''
data = '<subscription_plan><id>%d</id></subscription_plan>' % plan_id
headers = {'Authorization': 'Basic %s' % self.auth}
conn = httplib.HTTPSConnection(self.base_host)
conn.request(
'PUT', '%s/subscribers/%d/change_subscription_plan.xml' % (self.base_path, subscriber_id),
data,
headers
)
response = conn.getresponse()
return response.status
def subscribe(self, subscriber_id, plan_id, trial=False):
'''
Subscribe a user to some plan
'''
data = '<subscription_plan><id>%d</id></subscription_plan>' % plan_id
if trial:
self.set_url('subscribers/%d/subscribe_to_free_trial.xml' % subscriber_id)
self.query(data)
# Parse
result = []
tree = fromstring(self.get_response())
for plan in tree.getiterator('subscriber'):
data = {
'customer_id': int(plan.findtext('customer-id')),
'first_name': plan.findtext('billing-first-name'),
'last_name': plan.findtext('billing-last-name'),
'active': True if plan.findtext('active') == 'true' else False,
'trial_active': \
True if plan.findtext('on-trial') == 'true' else False,
'trial_elegible': \
True if plan.findtext('eligible-for-free-trial') == 'true' \
else False,
'lifetime': \
True if plan.findtext('lifetime-subscription') == 'true' \
else False,
'recurring': \
True if plan.findtext('recurring') == 'true' \
else False,
'card_expires_before_next_auto_renew': \
True if plan.findtext('card-expires-before-next-auto-renew') == 'true' \
else False,
'token': plan.findtext('token'),
'name': plan.findtext('subscription-plan-name'),
'feature_level': plan.findtext('feature-level'),
'store_credit': Decimal(plan.findtext('store-credit')),
'created_at': datetime.strptime(
plan.findtext('created-at'), '%Y-%m-%dT%H:%M:%SZ'
),
'date_changed': datetime.strptime(
plan.findtext('updated-at'), '%Y-%m-%dT%H:%M:%SZ'
),
'active_until': datetime.strptime(
plan.findtext('active-until'), '%Y-%m-%dT%H:%M:%SZ'
) if plan.findtext('active-until') else None,
'on_trial': True if plan.findtext('on-trial') == 'true' \
else False,
}
result.append(data)
return result[0]
def complimentary_subscription(self, subscriber_id, duration_quantity, duration_units, feature_level):
'''
Creates a complimentary subscription for the specified feature level
'''
data = '''
<complimentary_subscription>
<duration_quantity>%s</duration_quantity>
<duration_units>%s</duration_units>
<feature_level>%s</feature_level>
</complimentary_subscription>
''' % (duration_quantity, duration_units, feature_level)
self.set_url('subscribers/%d/complimentary_subscriptions.xml' % subscriber_id)
self.query(data)
# Parse
result = []
tree = fromstring(self.get_response())
for plan in tree.getiterator('subscriber'):
data = {
'customer_id': int(plan.findtext('customer-id')),
'first_name': plan.findtext('billing-first-name'),
'last_name': plan.findtext('billing-last-name'),
'active': True if plan.findtext('active') == 'true' else False,
'trial_active': \
True if plan.findtext('on-trial') == 'true' else False,
'trial_elegible': \
True if plan.findtext('eligible-for-free-trial') == 'true' \
else False,
'lifetime': \
True if plan.findtext('lifetime-subscription') == 'true' \
else False,
'recurring': \
True if plan.findtext('recurring') == 'true' \
else False,
'card_expires_before_next_auto_renew': \
True if plan.findtext('card-expires-before-next-auto-renew') == 'true' \
else False,
'token': plan.findtext('token'),
'name': plan.findtext('subscription-plan-name'),
'feature_level': plan.findtext('feature-level'),
'store_credit': Decimal(plan.findtext('store-credit')),
'created_at': datetime.strptime(
plan.findtext('created-at'), '%Y-%m-%dT%H:%M:%SZ'
),
'date_changed': datetime.strptime(
plan.findtext('updated-at'), '%Y-%m-%dT%H:%M:%SZ'
),
'active_until': datetime.strptime(
plan.findtext('active-until'), '%Y-%m-%dT%H:%M:%SZ'
) if plan.findtext('active-until') else None,
'on_trial': True if plan.findtext('on-trial') == 'true' \
else False,
}
result.append(data)
return result[0]
def lifetime_complimentary_subscription(self, subscriber_id, feature_level):
'''
Creates a lifetime complimentary subscription for the specified feature level
'''
data = '''
<lifetime_complimentary_subscription>
<feature_level>%s</feature_level>
</lifetime_complimentary_subscription>
''' % feature_level
self.set_url('subscribers/%d/lifetime_complimentary_subscriptions.xml' % subscriber_id)
self.query(data)
# Parse
result = []
tree = fromstring(self.get_response())
for plan in tree.getiterator('subscriber'):
data = {
'customer_id': int(plan.findtext('customer-id')),
'first_name': plan.findtext('billing-first-name'),
'last_name': plan.findtext('billing-last-name'),
'active': True if plan.findtext('active') == 'true' else False,
'trial_active': \
True if plan.findtext('on-trial') == 'true' else False,
'trial_elegible': \
True if plan.findtext('eligible-for-free-trial') == 'true' \
else False,
'lifetime': \
True if plan.findtext('lifetime-subscription') == 'true' \
else False,
'recurring': \
True if plan.findtext('recurring') == 'true' \
else False,
'card_expires_before_next_auto_renew': \
True if plan.findtext('card-expires-before-next-auto-renew') == 'true' \
else False,
'token': plan.findtext('token'),
'name': plan.findtext('subscription-plan-name'),
'feature_level': plan.findtext('feature-level'),
'store_credit': Decimal(plan.findtext('store-credit')),
'created_at': datetime.strptime(
plan.findtext('created-at'), '%Y-%m-%dT%H:%M:%SZ'
),
'date_changed': datetime.strptime(
plan.findtext('updated-at'), '%Y-%m-%dT%H:%M:%SZ'
),
'active_until': datetime.strptime(
plan.findtext('active-until'), '%Y-%m-%dT%H:%M:%SZ'
) if plan.findtext('active-until') else None,
'on_trial': True if plan.findtext('on-trial') == 'true' \
else False,
}
result.append(data)
return result[0]
def complimentary_time_extension(self, subscriber_id, duration_quantity, duration_units):
'''
Creates a complimentary time extension
'''
data = '''
<complimentary_time_extension>
<duration_quantity>%s</duration_quantity>
<duration_units>%s</duration_units>
</complimentary_time_extension>
''' % (duration_quantity, duration_units)
self.set_url('subscribers/%d/complimentary_time_extensions.xml' % subscriber_id)
self.query(data)
# Parse
result = []
tree = fromstring(self.get_response())
for plan in tree.getiterator('subscriber'):
data = {
'customer_id': int(plan.findtext('customer-id')),
'first_name': plan.findtext('billing-first-name'),
'last_name': plan.findtext('billing-last-name'),
'active': True if plan.findtext('active') == 'true' else False,
'trial_active': \
True if plan.findtext('on-trial') == 'true' else False,
'trial_elegible': \
True if plan.findtext('eligible-for-free-trial') == 'true' \
else False,
'lifetime': \
True if plan.findtext('lifetime-subscription') == 'true' \
else False,
'recurring': \
True if plan.findtext('recurring') == 'true' \
else False,
'card_expires_before_next_auto_renew': \
True if plan.findtext('card-expires-before-next-auto-renew') == 'true' \
else False,
'token': plan.findtext('token'),
'name': plan.findtext('subscription-plan-name'),
'feature_level': plan.findtext('feature-level'),
'store_credit': Decimal(plan.findtext('store-credit')),
'created_at': datetime.strptime(
plan.findtext('created-at'), '%Y-%m-%dT%H:%M:%SZ'
),
'date_changed': datetime.strptime(
plan.findtext('updated-at'), '%Y-%m-%dT%H:%M:%SZ'
),
'active_until': datetime.strptime(
plan.findtext('active-until'), '%Y-%m-%dT%H:%M:%SZ'
) if plan.findtext('active-until') else None,
'on_trial': True if plan.findtext('on-trial') == 'true' \
else False,
}
result.append(data)
return result[0]
def add_store_credit(self, subscriber_id, amount):
'''
Adds store credit to a users subscription account
'''
data = '''
<credit>
<amount>%f</amount>
</credit>
''' % amount
self.set_url('subscribers/%d/credits.xml' % subscriber_id)
self.query(data)
return self.get_response()
def cleanup(self):
'''
Removes ALL subscribers. NEVER USE IN PRODUCTION!
'''
if 'test' in self.base_path:
headers = {'Authorization': 'Basic %s' % self.auth}
conn = httplib.HTTPSConnection(self.base_host)
conn.request(
'DELETE', '%s/subscribers.xml' % self.base_path,
'',
headers
)
response = conn.getresponse()
return response.status
return
def get_info(self, subscriber_id):
self.set_url('subscribers/%d.xml' % subscriber_id)
self.query('')
# Parse
result = []
tree = fromstring(self.get_response())
for plan in tree.getiterator('subscriber'):
data = {
'customer_id': int(plan.findtext('customer-id')),
'first_name': plan.findtext('billing-first-name'),
'last_name': plan.findtext('billing-last-name'),
'active': True if plan.findtext('active') == 'true' else False,
'trial_active': \
True if plan.findtext('on-trial') == 'true' else False,
'trial_elegible': \
True if plan.findtext('eligible-for-free-trial') == 'true' \
else False,
'lifetime': \
True if plan.findtext('lifetime-subscription') == 'true' \
else False,
'recurring': \
True if plan.findtext('recurring') == 'true' \
else False,
'card_expires_before_next_auto_renew': \
True if plan.findtext('card-expires-before-next-auto-renew') == 'true' \
else False,
'token': plan.findtext('token'),
'name': plan.findtext('subscription-plan-name'),
'feature_level': plan.findtext('feature-level'),
'store_credit': Decimal(plan.findtext('store-credit')),
'created_at': datetime.strptime(
plan.findtext('created-at'), '%Y-%m-%dT%H:%M:%SZ'
),
'date_changed': datetime.strptime(
plan.findtext('updated-at'), '%Y-%m-%dT%H:%M:%SZ'
),
'active_until': datetime.strptime(
plan.findtext('active-until'), '%Y-%m-%dT%H:%M:%SZ'
) if plan.findtext('active-until') else None,
'on_trial': True if plan.findtext('on-trial') == 'true' \
else False,
}
result.append(data)
return result[0]
def stop_auto_renew(self, subscriber_id):
self.set_url('subscribers/%d/stop_auto_renew.xml' % subscriber_id)
data = '''
<subscriber>
<customer-id>%d</customer-id>
</subscriber>
''' % (subscriber_id)
self.query(data)
return self.get_response()
def allow_free_trial(self, subscriber_id):
self.set_url('subscribers/%d/allow_free_trial.xml' % subscriber_id)
data = '''
<subscriber>
<customer-id>%d</customer-id>
</subscriber>
''' % (subscriber_id)
self.query(data)
return self.get_response()
def get_or_create_subscriber(self, subscriber_id, screen_name):
try:
return self.get_info(subscriber_id)
except urllib2.HTTPError, e:
if e.code == 404:
return self.create_subscriber(subscriber_id, screen_name)
| |
# Copyright 2012 VMware, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import contextlib
import copy
import mock
import netaddr
from oslo.config import cfg
from webob import exc
from neutron.api.v2 import attributes
from neutron.common import constants as l3_constants
from neutron.common import exceptions as n_exc
from neutron import context
from neutron.db import api as qdbapi
from neutron.db import common_db_mixin
from neutron.db import db_base_plugin_v2
from neutron.db import external_net_db
from neutron.db import l3_agentschedulers_db
from neutron.db import l3_attrs_db
from neutron.db import l3_db
from neutron.db import l3_dvr_db
from neutron.db import l3_rpc_base
from neutron.db import model_base
from neutron.extensions import external_net
from neutron.extensions import l3
from neutron import manager
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants as service_constants
from neutron.tests import base
from neutron.tests import fake_notifier
from neutron.tests.unit import test_agent_ext_plugin
from neutron.tests.unit import test_api_v2
from neutron.tests.unit import test_api_v2_extension
from neutron.tests.unit import test_db_plugin
LOG = logging.getLogger(__name__)
_uuid = uuidutils.generate_uuid
_get_path = test_api_v2._get_path
class L3TestExtensionManager(object):
def get_resources(self):
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attributes.RESOURCE_ATTRIBUTE_MAP.update(
l3.RESOURCE_ATTRIBUTE_MAP)
return l3.L3.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class L3NatExtensionTestCase(test_api_v2_extension.ExtensionTestCase):
fmt = 'json'
def setUp(self):
super(L3NatExtensionTestCase, self).setUp()
self._setUpExtension(
'neutron.extensions.l3.RouterPluginBase', None,
l3.RESOURCE_ATTRIBUTE_MAP, l3.L3, '',
allow_pagination=True, allow_sorting=True,
supported_extension_aliases=['router'],
use_quota=True)
def test_router_create(self):
router_id = _uuid()
data = {'router': {'name': 'router1', 'admin_state_up': True,
'tenant_id': _uuid(),
'external_gateway_info': None}}
return_value = copy.deepcopy(data['router'])
return_value.update({'status': "ACTIVE", 'id': router_id})
instance = self.plugin.return_value
instance.create_router.return_value = return_value
instance.get_routers_count.return_value = 0
res = self.api.post(_get_path('routers', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_router.assert_called_with(mock.ANY,
router=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('router', res)
router = res['router']
self.assertEqual(router['id'], router_id)
self.assertEqual(router['status'], "ACTIVE")
self.assertEqual(router['admin_state_up'], True)
def test_router_list(self):
router_id = _uuid()
return_value = [{'name': 'router1', 'admin_state_up': True,
'tenant_id': _uuid(), 'id': router_id}]
instance = self.plugin.return_value
instance.get_routers.return_value = return_value
res = self.api.get(_get_path('routers', fmt=self.fmt))
instance.get_routers.assert_called_with(mock.ANY, fields=mock.ANY,
filters=mock.ANY,
sorts=mock.ANY,
limit=mock.ANY,
marker=mock.ANY,
page_reverse=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('routers', res)
self.assertEqual(1, len(res['routers']))
self.assertEqual(router_id, res['routers'][0]['id'])
def test_router_update(self):
router_id = _uuid()
update_data = {'router': {'admin_state_up': False}}
return_value = {'name': 'router1', 'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE", 'id': router_id}
instance = self.plugin.return_value
instance.update_router.return_value = return_value
res = self.api.put(_get_path('routers', id=router_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_router.assert_called_with(mock.ANY, router_id,
router=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('router', res)
router = res['router']
self.assertEqual(router['id'], router_id)
self.assertEqual(router['status'], "ACTIVE")
self.assertEqual(router['admin_state_up'], False)
def test_router_get(self):
router_id = _uuid()
return_value = {'name': 'router1', 'admin_state_up': False,
'tenant_id': _uuid(),
'status': "ACTIVE", 'id': router_id}
instance = self.plugin.return_value
instance.get_router.return_value = return_value
res = self.api.get(_get_path('routers', id=router_id,
fmt=self.fmt))
instance.get_router.assert_called_with(mock.ANY, router_id,
fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('router', res)
router = res['router']
self.assertEqual(router['id'], router_id)
self.assertEqual(router['status'], "ACTIVE")
self.assertEqual(router['admin_state_up'], False)
def test_router_delete(self):
router_id = _uuid()
res = self.api.delete(_get_path('routers', id=router_id))
instance = self.plugin.return_value
instance.delete_router.assert_called_with(mock.ANY, router_id)
self.assertEqual(res.status_int, exc.HTTPNoContent.code)
def test_router_add_interface(self):
router_id = _uuid()
subnet_id = _uuid()
port_id = _uuid()
interface_data = {'subnet_id': subnet_id}
return_value = copy.deepcopy(interface_data)
return_value['port_id'] = port_id
instance = self.plugin.return_value
instance.add_router_interface.return_value = return_value
path = _get_path('routers', id=router_id,
action="add_router_interface",
fmt=self.fmt)
res = self.api.put(path, self.serialize(interface_data))
instance.add_router_interface.assert_called_with(mock.ANY, router_id,
interface_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('port_id', res)
self.assertEqual(res['port_id'], port_id)
self.assertEqual(res['subnet_id'], subnet_id)
class L3NatExtensionTestCaseXML(L3NatExtensionTestCase):
fmt = 'xml'
# This base plugin class is for tests.
class TestL3NatBasePlugin(db_base_plugin_v2.NeutronDbPluginV2,
external_net_db.External_net_db_mixin):
__native_pagination_support = True
__native_sorting_support = True
def create_network(self, context, network):
session = context.session
with session.begin(subtransactions=True):
net = super(TestL3NatBasePlugin, self).create_network(context,
network)
self._process_l3_create(context, net, network['network'])
return net
def update_network(self, context, id, network):
session = context.session
with session.begin(subtransactions=True):
net = super(TestL3NatBasePlugin, self).update_network(context, id,
network)
self._process_l3_update(context, net, network['network'])
return net
def delete_network(self, context, id):
with context.session.begin(subtransactions=True):
self._process_l3_delete(context, id)
super(TestL3NatBasePlugin, self).delete_network(context, id)
def delete_port(self, context, id, l3_port_check=True):
plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
if plugin:
if l3_port_check:
plugin.prevent_l3_port_deletion(context, id)
plugin.disassociate_floatingips(context, id)
return super(TestL3NatBasePlugin, self).delete_port(context, id)
# This plugin class is for tests with plugin that integrates L3.
class TestL3NatIntPlugin(TestL3NatBasePlugin,
l3_db.L3_NAT_db_mixin):
supported_extension_aliases = ["external-net", "router"]
# This plugin class is for tests with plugin that integrates L3 and L3 agent
# scheduling.
class TestL3NatIntAgentSchedulingPlugin(TestL3NatIntPlugin,
l3_agentschedulers_db.
L3AgentSchedulerDbMixin):
supported_extension_aliases = ["external-net", "router",
"l3_agent_scheduler"]
router_scheduler = importutils.import_object(
cfg.CONF.router_scheduler_driver)
# This plugin class is for tests with plugin not supporting L3.
class TestNoL3NatPlugin(TestL3NatBasePlugin):
__native_pagination_support = True
__native_sorting_support = True
supported_extension_aliases = ["external-net"]
# A L3 routing service plugin class for tests with plugins that
# delegate away L3 routing functionality
class TestL3NatServicePlugin(common_db_mixin.CommonDbMixin,
l3_dvr_db.L3_NAT_with_dvr_db_mixin,
l3_db.L3_NAT_db_mixin):
supported_extension_aliases = ["router"]
def __init__(self):
qdbapi.register_models(base=model_base.BASEV2)
def get_plugin_type(self):
return service_constants.L3_ROUTER_NAT
def get_plugin_description(self):
return "L3 Routing Service Plugin for testing"
# A L3 routing with L3 agent scheduling service plugin class for tests with
# plugins that delegate away L3 routing functionality
class TestL3NatAgentSchedulingServicePlugin(TestL3NatServicePlugin,
l3_agentschedulers_db.
L3AgentSchedulerDbMixin):
supported_extension_aliases = ["router", "l3_agent_scheduler"]
class L3NatTestCaseMixin(object):
def _create_router(self, fmt, tenant_id, name=None,
admin_state_up=None, set_context=False,
arg_list=None, **kwargs):
data = {'router': {'tenant_id': tenant_id}}
if name:
data['router']['name'] = name
if admin_state_up:
data['router']['admin_state_up'] = admin_state_up
for arg in (('admin_state_up', 'tenant_id') + (arg_list or ())):
# Arg must be present and not empty
if arg in kwargs and kwargs[arg]:
data['router'][arg] = kwargs[arg]
router_req = self.new_create_request('routers', data, fmt)
if set_context and tenant_id:
# create a specific auth context for this request
router_req.environ['neutron.context'] = context.Context(
'', tenant_id)
return router_req.get_response(self.ext_api)
def _make_router(self, fmt, tenant_id, name=None, admin_state_up=None,
external_gateway_info=None, set_context=False,
arg_list=None, **kwargs):
if external_gateway_info:
arg_list = ('external_gateway_info', ) + (arg_list or ())
res = self._create_router(fmt, tenant_id, name,
admin_state_up, set_context,
arg_list=arg_list,
external_gateway_info=external_gateway_info,
**kwargs)
return self.deserialize(fmt, res)
def _add_external_gateway_to_router(self, router_id, network_id,
expected_code=exc.HTTPOk.code,
neutron_context=None):
return self._update('routers', router_id,
{'router': {'external_gateway_info':
{'network_id': network_id}}},
expected_code=expected_code,
neutron_context=neutron_context)
def _remove_external_gateway_from_router(self, router_id, network_id,
expected_code=exc.HTTPOk.code,
external_gw_info=None):
return self._update('routers', router_id,
{'router': {'external_gateway_info':
external_gw_info}},
expected_code=expected_code)
def _router_interface_action(self, action, router_id, subnet_id, port_id,
expected_code=exc.HTTPOk.code,
expected_body=None,
tenant_id=None):
interface_data = {}
if subnet_id:
interface_data.update({'subnet_id': subnet_id})
if port_id and (action != 'add' or not subnet_id):
interface_data.update({'port_id': port_id})
req = self.new_action_request('routers', interface_data, router_id,
"%s_router_interface" % action)
# if tenant_id was specified, create a tenant context for this request
if tenant_id:
req.environ['neutron.context'] = context.Context(
'', tenant_id)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, expected_code)
response = self.deserialize(self.fmt, res)
if expected_body:
self.assertEqual(response, expected_body)
return response
@contextlib.contextmanager
def router(self, name='router1', admin_state_up=True,
fmt=None, tenant_id=_uuid(),
external_gateway_info=None, set_context=False,
**kwargs):
router = self._make_router(fmt or self.fmt, tenant_id, name,
admin_state_up, external_gateway_info,
set_context, **kwargs)
yield router
self._delete('routers', router['router']['id'])
def _set_net_external(self, net_id):
self._update('networks', net_id,
{'network': {external_net.EXTERNAL: True}})
def _create_floatingip(self, fmt, network_id, port_id=None,
fixed_ip=None, set_context=False):
data = {'floatingip': {'floating_network_id': network_id,
'tenant_id': self._tenant_id}}
if port_id:
data['floatingip']['port_id'] = port_id
if fixed_ip:
data['floatingip']['fixed_ip_address'] = fixed_ip
floatingip_req = self.new_create_request('floatingips', data, fmt)
if set_context and self._tenant_id:
# create a specific auth context for this request
floatingip_req.environ['neutron.context'] = context.Context(
'', self._tenant_id)
return floatingip_req.get_response(self.ext_api)
def _make_floatingip(self, fmt, network_id, port_id=None,
fixed_ip=None, set_context=False):
res = self._create_floatingip(fmt, network_id, port_id,
fixed_ip, set_context)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
return self.deserialize(fmt, res)
def _validate_floating_ip(self, fip):
body = self._list('floatingips')
self.assertEqual(len(body['floatingips']), 1)
self.assertEqual(body['floatingips'][0]['id'],
fip['floatingip']['id'])
body = self._show('floatingips', fip['floatingip']['id'])
self.assertEqual(body['floatingip']['id'],
fip['floatingip']['id'])
@contextlib.contextmanager
def floatingip_with_assoc(self, port_id=None, fmt=None, fixed_ip=None,
set_context=False):
with self.subnet(cidr='11.0.0.0/24') as public_sub:
self._set_net_external(public_sub['subnet']['network_id'])
private_port = None
if port_id:
private_port = self._show('ports', port_id)
with test_db_plugin.optional_ctx(private_port,
self.port) as private_port:
with self.router() as r:
sid = private_port['port']['fixed_ips'][0]['subnet_id']
private_sub = {'subnet': {'id': sid}}
floatingip = None
self._add_external_gateway_to_router(
r['router']['id'],
public_sub['subnet']['network_id'])
self._router_interface_action(
'add', r['router']['id'],
private_sub['subnet']['id'], None)
floatingip = self._make_floatingip(
fmt or self.fmt,
public_sub['subnet']['network_id'],
port_id=private_port['port']['id'],
fixed_ip=fixed_ip,
set_context=False)
yield floatingip
if floatingip:
self._delete('floatingips',
floatingip['floatingip']['id'])
self._router_interface_action(
'remove', r['router']['id'],
private_sub['subnet']['id'], None)
self._remove_external_gateway_from_router(
r['router']['id'],
public_sub['subnet']['network_id'])
@contextlib.contextmanager
def floatingip_no_assoc_with_public_sub(
self, private_sub, fmt=None, set_context=False, public_sub=None):
self._set_net_external(public_sub['subnet']['network_id'])
with self.router() as r:
floatingip = None
self._add_external_gateway_to_router(
r['router']['id'],
public_sub['subnet']['network_id'])
self._router_interface_action('add', r['router']['id'],
private_sub['subnet']['id'],
None)
floatingip = self._make_floatingip(
fmt or self.fmt,
public_sub['subnet']['network_id'],
set_context=set_context)
yield floatingip, r
if floatingip:
self._delete('floatingips',
floatingip['floatingip']['id'])
self._router_interface_action('remove', r['router']['id'],
private_sub['subnet']['id'],
None)
self._remove_external_gateway_from_router(
r['router']['id'],
public_sub['subnet']['network_id'])
@contextlib.contextmanager
def floatingip_no_assoc(self, private_sub, fmt=None, set_context=False):
with self.subnet(cidr='12.0.0.0/24') as public_sub:
with self.floatingip_no_assoc_with_public_sub(
private_sub, fmt, set_context, public_sub) as (f, r):
# Yield only the floating ip object
yield f
class ExtraAttributesMixinTestCase(base.BaseTestCase):
def setUp(self):
super(ExtraAttributesMixinTestCase, self).setUp()
self.mixin = l3_attrs_db.ExtraAttributesMixin()
def _test__extend_extra_router_dict(
self, extra_attributes, attributes, expected_attributes):
self.mixin._extend_extra_router_dict(
attributes, {'extra_attributes': extra_attributes})
self.assertEqual(expected_attributes, attributes)
def test__extend_extra_router_dict_string_default(self):
self.mixin.extra_attributes = [{
'name': "foo_key",
'default': 'foo_default'
}]
extension_attributes = {'foo_key': 'my_fancy_value'}
self._test__extend_extra_router_dict(
extension_attributes, {}, extension_attributes)
def test__extend_extra_router_dict_booleans_false_default(self):
self.mixin.extra_attributes = [{
'name': "foo_key",
'default': False
}]
extension_attributes = {'foo_key': True}
self._test__extend_extra_router_dict(
extension_attributes, {}, extension_attributes)
def test__extend_extra_router_dict_booleans_true_default(self):
self.mixin.extra_attributes = [{
'name': "foo_key",
'default': True
}]
# Test that the default is overridden
extension_attributes = {'foo_key': False}
self._test__extend_extra_router_dict(
extension_attributes, {}, extension_attributes)
def test__extend_extra_router_dict_no_extension_attributes(self):
self.mixin.extra_attributes = [{
'name': "foo_key",
'default': 'foo_value'
}]
self._test__extend_extra_router_dict({}, {}, {'foo_key': 'foo_value'})
def test__extend_extra_router_dict_none_extension_attributes(self):
self._test__extend_extra_router_dict(None, {}, {})
class L3NatTestCaseBase(L3NatTestCaseMixin):
def test_router_create(self):
name = 'router1'
tenant_id = _uuid()
expected_value = [('name', name), ('tenant_id', tenant_id),
('admin_state_up', True), ('status', 'ACTIVE'),
('external_gateway_info', None)]
with self.router(name='router1', admin_state_up=True,
tenant_id=tenant_id) as router:
for k, v in expected_value:
self.assertEqual(router['router'][k], v)
def test_router_create_call_extensions(self):
self.extension_called = False
def _extend_router_dict_test_attr(*args, **kwargs):
self.extension_called = True
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
l3.ROUTERS, [_extend_router_dict_test_attr])
self.assertFalse(self.extension_called)
with self.router():
self.assertTrue(self.extension_called)
def test_router_create_with_gwinfo(self):
with self.subnet() as s:
self._set_net_external(s['subnet']['network_id'])
data = {'router': {'tenant_id': _uuid()}}
data['router']['name'] = 'router1'
data['router']['external_gateway_info'] = {
'network_id': s['subnet']['network_id']}
router_req = self.new_create_request('routers', data, self.fmt)
res = router_req.get_response(self.ext_api)
router = self.deserialize(self.fmt, res)
self.assertEqual(
s['subnet']['network_id'],
router['router']['external_gateway_info']['network_id'])
self._delete('routers', router['router']['id'])
def test_router_list(self):
with contextlib.nested(self.router(),
self.router(),
self.router()
) as routers:
self._test_list_resources('router', routers)
def test_router_list_with_parameters(self):
with contextlib.nested(self.router(name='router1'),
self.router(name='router2'),
) as (router1, router2):
query_params = 'name=router1'
self._test_list_resources('router', [router1],
query_params=query_params)
query_params = 'name=router2'
self._test_list_resources('router', [router2],
query_params=query_params)
query_params = 'name=router3'
self._test_list_resources('router', [],
query_params=query_params)
def test_router_list_with_sort(self):
with contextlib.nested(self.router(name='router1'),
self.router(name='router2'),
self.router(name='router3')
) as (router1, router2, router3):
self._test_list_with_sort('router', (router3, router2, router1),
[('name', 'desc')])
def test_router_list_with_pagination(self):
with contextlib.nested(self.router(name='router1'),
self.router(name='router2'),
self.router(name='router3')
) as (router1, router2, router3):
self._test_list_with_pagination('router',
(router1, router2, router3),
('name', 'asc'), 2, 2)
def test_router_list_with_pagination_reverse(self):
with contextlib.nested(self.router(name='router1'),
self.router(name='router2'),
self.router(name='router3')
) as (router1, router2, router3):
self._test_list_with_pagination_reverse('router',
(router1, router2,
router3),
('name', 'asc'), 2, 2)
def test_router_update(self):
rname1 = "yourrouter"
rname2 = "nachorouter"
with self.router(name=rname1) as r:
body = self._show('routers', r['router']['id'])
self.assertEqual(body['router']['name'], rname1)
body = self._update('routers', r['router']['id'],
{'router': {'name': rname2}})
body = self._show('routers', r['router']['id'])
self.assertEqual(body['router']['name'], rname2)
def test_router_update_gateway(self):
with self.router() as r:
with self.subnet() as s1:
with self.subnet() as s2:
self._set_net_external(s1['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
s1['subnet']['network_id'])
body = self._show('routers', r['router']['id'])
net_id = (body['router']
['external_gateway_info']['network_id'])
self.assertEqual(net_id, s1['subnet']['network_id'])
self._set_net_external(s2['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
s2['subnet']['network_id'])
body = self._show('routers', r['router']['id'])
net_id = (body['router']
['external_gateway_info']['network_id'])
self.assertEqual(net_id, s2['subnet']['network_id'])
# Validate that we can clear the gateway with
# an empty dict, in any other case, we fall back
# on None as default value
self._remove_external_gateway_from_router(
r['router']['id'],
s2['subnet']['network_id'],
external_gw_info={})
def test_router_update_gateway_with_existed_floatingip(self):
with self.subnet() as subnet:
self._set_net_external(subnet['subnet']['network_id'])
with self.floatingip_with_assoc() as fip:
self._add_external_gateway_to_router(
fip['floatingip']['router_id'],
subnet['subnet']['network_id'],
expected_code=exc.HTTPConflict.code)
def test_router_update_gateway_to_empty_with_existed_floatingip(self):
with self.floatingip_with_assoc() as fip:
self._remove_external_gateway_from_router(
fip['floatingip']['router_id'], None,
expected_code=exc.HTTPConflict.code)
def test_router_add_interface_subnet(self):
exp_notifications = ['router.create.start',
'router.create.end',
'network.create.start',
'network.create.end',
'subnet.create.start',
'subnet.create.end',
'router.interface.create',
'router.interface.delete']
fake_notifier.reset()
with self.router() as r:
with self.subnet() as s:
body = self._router_interface_action('add',
r['router']['id'],
s['subnet']['id'],
None)
self.assertIn('port_id', body)
# fetch port and confirm device_id
r_port_id = body['port_id']
body = self._show('ports', r_port_id)
self.assertEqual(body['port']['device_id'], r['router']['id'])
body = self._router_interface_action('remove',
r['router']['id'],
s['subnet']['id'],
None)
body = self._show('ports', r_port_id,
expected_code=exc.HTTPNotFound.code)
self.assertEqual(
set(exp_notifications),
set(n['event_type'] for n in fake_notifier.NOTIFICATIONS))
for n in fake_notifier.NOTIFICATIONS:
if n['event_type'].startswith('router.interface.'):
payload = n['payload']['router_interface']
self.assertIn('id', payload)
self.assertEqual(payload['id'], r['router']['id'])
self.assertIn('tenant_id', payload)
stid = s['subnet']['tenant_id']
# tolerate subnet tenant deliberately to '' in the
# nsx metadata access case
self.assertIn(payload['tenant_id'], [stid, ''])
def test_router_add_interface_subnet_with_bad_tenant_returns_404(self):
with mock.patch('neutron.context.Context.to_dict') as tdict:
tenant_id = _uuid()
admin_context = {'roles': ['admin']}
tenant_context = {'tenant_id': 'bad_tenant',
'roles': []}
tdict.return_value = admin_context
with self.router(tenant_id=tenant_id) as r:
with self.network(tenant_id=tenant_id) as n:
with self.subnet(network=n) as s:
tdict.return_value = tenant_context
err_code = exc.HTTPNotFound.code
self._router_interface_action('add',
r['router']['id'],
s['subnet']['id'],
None,
err_code)
tdict.return_value = admin_context
body = self._router_interface_action('add',
r['router']['id'],
s['subnet']['id'],
None)
self.assertIn('port_id', body)
tdict.return_value = tenant_context
self._router_interface_action('remove',
r['router']['id'],
s['subnet']['id'],
None,
err_code)
tdict.return_value = admin_context
body = self._router_interface_action('remove',
r['router']['id'],
s['subnet']['id'],
None)
def test_router_add_interface_subnet_with_port_from_other_tenant(self):
tenant_id = _uuid()
other_tenant_id = _uuid()
with contextlib.nested(
self.router(tenant_id=tenant_id),
self.network(tenant_id=tenant_id),
self.network(tenant_id=other_tenant_id)) as (r, n1, n2):
with contextlib.nested(
self.subnet(network=n1, cidr='10.0.0.0/24'),
self.subnet(network=n2, cidr='10.1.0.0/24')) as (s1, s2):
body = self._router_interface_action(
'add',
r['router']['id'],
s2['subnet']['id'],
None)
self.assertIn('port_id', body)
self._router_interface_action(
'add',
r['router']['id'],
s1['subnet']['id'],
None,
tenant_id=tenant_id)
self.assertIn('port_id', body)
self._router_interface_action(
'remove',
r['router']['id'],
s1['subnet']['id'],
None,
tenant_id=tenant_id)
body = self._router_interface_action(
'remove',
r['router']['id'],
s2['subnet']['id'],
None)
def test_router_add_interface_port(self):
with self.router() as r:
with self.port(do_delete=False) as p:
body = self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
self.assertIn('port_id', body)
self.assertEqual(body['port_id'], p['port']['id'])
# fetch port and confirm device_id
body = self._show('ports', p['port']['id'])
self.assertEqual(body['port']['device_id'], r['router']['id'])
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_router_add_interface_empty_port_and_subnet_ids(self):
with self.router() as r:
self._router_interface_action('add', r['router']['id'],
None, None,
expected_code=exc.
HTTPBadRequest.code)
def test_router_add_interface_port_bad_tenant_returns_404(self):
with mock.patch('neutron.context.Context.to_dict') as tdict:
admin_context = {'roles': ['admin']}
tenant_context = {'tenant_id': 'bad_tenant',
'roles': []}
tdict.return_value = admin_context
with self.router() as r:
with self.port(do_delete=False) as p:
tdict.return_value = tenant_context
err_code = exc.HTTPNotFound.code
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'],
err_code)
tdict.return_value = admin_context
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
tdict.return_value = tenant_context
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'],
err_code)
tdict.return_value = admin_context
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_router_add_interface_dup_subnet1_returns_400(self):
with self.router() as r:
with self.subnet() as s:
self._router_interface_action('add',
r['router']['id'],
s['subnet']['id'],
None)
self._router_interface_action('add',
r['router']['id'],
s['subnet']['id'],
None,
expected_code=exc.
HTTPBadRequest.code)
self._router_interface_action('remove',
r['router']['id'],
s['subnet']['id'],
None)
def test_router_add_interface_dup_subnet2_returns_400(self):
with self.router() as r:
with self.subnet() as s:
with self.port(subnet=s, do_delete=False) as p1:
with self.port(subnet=s) as p2:
self._router_interface_action('add',
r['router']['id'],
None,
p1['port']['id'])
self._router_interface_action('add',
r['router']['id'],
None,
p2['port']['id'],
expected_code=exc.
HTTPBadRequest.code)
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p1['port']['id'])
def test_router_add_interface_overlapped_cidr_returns_400(self):
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s1:
self._router_interface_action('add',
r['router']['id'],
s1['subnet']['id'],
None)
def try_overlapped_cidr(cidr):
with self.subnet(cidr=cidr) as s2:
self._router_interface_action('add',
r['router']['id'],
s2['subnet']['id'],
None,
expected_code=exc.
HTTPBadRequest.code)
# another subnet with same cidr
try_overlapped_cidr('10.0.1.0/24')
# another subnet with overlapped cidr including s1
try_overlapped_cidr('10.0.0.0/16')
# clean-up
self._router_interface_action('remove',
r['router']['id'],
s1['subnet']['id'],
None)
def test_router_add_interface_no_data_returns_400(self):
with self.router() as r:
self._router_interface_action('add',
r['router']['id'],
None,
None,
expected_code=exc.
HTTPBadRequest.code)
def test_router_add_gateway_dup_subnet1_returns_400(self):
with self.router() as r:
with self.subnet() as s:
self._router_interface_action('add',
r['router']['id'],
s['subnet']['id'],
None)
self._set_net_external(s['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
s['subnet']['network_id'],
expected_code=exc.HTTPBadRequest.code)
self._router_interface_action('remove',
r['router']['id'],
s['subnet']['id'],
None)
def test_router_add_gateway_dup_subnet2_returns_400(self):
with self.router() as r:
with self.subnet() as s:
self._set_net_external(s['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
s['subnet']['network_id'])
self._router_interface_action('add',
r['router']['id'],
s['subnet']['id'],
None,
expected_code=exc.
HTTPBadRequest.code)
self._remove_external_gateway_from_router(
r['router']['id'],
s['subnet']['network_id'])
def test_router_add_gateway(self):
with self.router() as r:
with self.subnet() as s:
self._set_net_external(s['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
s['subnet']['network_id'])
body = self._show('routers', r['router']['id'])
net_id = body['router']['external_gateway_info']['network_id']
self.assertEqual(net_id, s['subnet']['network_id'])
self._remove_external_gateway_from_router(
r['router']['id'],
s['subnet']['network_id'])
body = self._show('routers', r['router']['id'])
gw_info = body['router']['external_gateway_info']
self.assertIsNone(gw_info)
def test_router_add_gateway_tenant_ctx(self):
with self.router(tenant_id='noadmin',
set_context=True) as r:
with self.subnet() as s:
self._set_net_external(s['subnet']['network_id'])
ctx = context.Context('', 'noadmin')
self._add_external_gateway_to_router(
r['router']['id'],
s['subnet']['network_id'],
neutron_context=ctx)
body = self._show('routers', r['router']['id'])
net_id = body['router']['external_gateway_info']['network_id']
self.assertEqual(net_id, s['subnet']['network_id'])
self._remove_external_gateway_from_router(
r['router']['id'],
s['subnet']['network_id'])
body = self._show('routers', r['router']['id'])
gw_info = body['router']['external_gateway_info']
self.assertIsNone(gw_info)
def test_create_router_port_with_device_id_of_other_teants_router(self):
with self.router() as admin_router:
with self.network(tenant_id='tenant_a',
set_context=True) as n:
with self.subnet(network=n):
self._create_port(
self.fmt, n['network']['id'],
tenant_id='tenant_a',
device_id=admin_router['router']['id'],
device_owner='network:router_interface',
set_context=True,
expected_res_status=exc.HTTPConflict.code)
def test_create_non_router_port_device_id_of_other_teants_router_update(
self):
# This tests that HTTPConflict is raised if we create a non-router
# port that matches the device_id of another tenants router and then
# we change the device_owner to be network:router_interface.
with self.router() as admin_router:
with self.network(tenant_id='tenant_a',
set_context=True) as n:
with self.subnet(network=n):
port_res = self._create_port(
self.fmt, n['network']['id'],
tenant_id='tenant_a',
device_id=admin_router['router']['id'],
set_context=True)
port = self.deserialize(self.fmt, port_res)
neutron_context = context.Context('', 'tenant_a')
data = {'port': {'device_owner':
'network:router_interface'}}
self._update('ports', port['port']['id'], data,
neutron_context=neutron_context,
expected_code=exc.HTTPConflict.code)
self._delete('ports', port['port']['id'])
def test_update_port_device_id_to_different_tenants_router(self):
with self.router() as admin_router:
with self.router(tenant_id='tenant_a',
set_context=True) as tenant_router:
with self.network(tenant_id='tenant_a',
set_context=True) as n:
with self.subnet(network=n) as s:
port = self._router_interface_action(
'add', tenant_router['router']['id'],
s['subnet']['id'], None, tenant_id='tenant_a')
neutron_context = context.Context('', 'tenant_a')
data = {'port':
{'device_id': admin_router['router']['id']}}
self._update('ports', port['port_id'], data,
neutron_context=neutron_context,
expected_code=exc.HTTPConflict.code)
self._router_interface_action(
'remove', tenant_router['router']['id'],
s['subnet']['id'], None, tenant_id='tenant_a')
def test_router_add_gateway_invalid_network_returns_404(self):
with self.router() as r:
self._add_external_gateway_to_router(
r['router']['id'],
"foobar", expected_code=exc.HTTPNotFound.code)
def test_router_add_gateway_net_not_external_returns_400(self):
with self.router() as r:
with self.subnet() as s:
# intentionally do not set net as external
self._add_external_gateway_to_router(
r['router']['id'],
s['subnet']['network_id'],
expected_code=exc.HTTPBadRequest.code)
def test_router_add_gateway_no_subnet_returns_400(self):
with self.router() as r:
with self.network() as n:
self._set_net_external(n['network']['id'])
self._add_external_gateway_to_router(
r['router']['id'],
n['network']['id'], expected_code=exc.HTTPBadRequest.code)
def test_router_remove_interface_inuse_returns_409(self):
with self.router() as r:
with self.subnet() as s:
self._router_interface_action('add',
r['router']['id'],
s['subnet']['id'],
None)
self._delete('routers', r['router']['id'],
expected_code=exc.HTTPConflict.code)
# remove interface so test can exit without errors
self._router_interface_action('remove',
r['router']['id'],
s['subnet']['id'],
None)
def test_router_remove_interface_wrong_subnet_returns_400(self):
with self.router() as r:
with self.subnet() as s:
with self.port(do_delete=False) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
self._router_interface_action('remove',
r['router']['id'],
s['subnet']['id'],
p['port']['id'],
exc.HTTPBadRequest.code)
#remove properly to clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_router_remove_interface_returns_200(self):
with self.router() as r:
with self.port(do_delete=False) as p:
body = self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'],
expected_body=body)
def test_router_remove_interface_wrong_port_returns_404(self):
with self.router() as r:
with self.subnet():
with self.port(do_delete=False) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
# create another port for testing failure case
res = self._create_port(self.fmt, p['port']['network_id'])
p2 = self.deserialize(self.fmt, res)
self._router_interface_action('remove',
r['router']['id'],
None,
p2['port']['id'],
exc.HTTPNotFound.code)
# remove correct interface to cleanup
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
# remove extra port created
self._delete('ports', p2['port']['id'])
def test_router_delete(self):
with self.router() as router:
router_id = router['router']['id']
req = self.new_show_request('router', router_id)
res = req.get_response(self._api_for_resource('router'))
self.assertEqual(res.status_int, 404)
def test_router_delete_with_port_existed_returns_409(self):
with self.subnet() as subnet:
res = self._create_router(self.fmt, _uuid())
router = self.deserialize(self.fmt, res)
self._router_interface_action('add',
router['router']['id'],
subnet['subnet']['id'],
None)
self._delete('routers', router['router']['id'],
exc.HTTPConflict.code)
self._router_interface_action('remove',
router['router']['id'],
subnet['subnet']['id'],
None)
self._delete('routers', router['router']['id'])
def test_router_delete_with_floatingip_existed_returns_409(self):
with self.port() as p:
private_sub = {'subnet': {'id':
p['port']['fixed_ips'][0]['subnet_id']}}
with self.subnet(cidr='12.0.0.0/24') as public_sub:
self._set_net_external(public_sub['subnet']['network_id'])
res = self._create_router(self.fmt, _uuid())
r = self.deserialize(self.fmt, res)
self._add_external_gateway_to_router(
r['router']['id'],
public_sub['subnet']['network_id'])
self._router_interface_action('add', r['router']['id'],
private_sub['subnet']['id'],
None)
res = self._create_floatingip(
self.fmt, public_sub['subnet']['network_id'],
port_id=p['port']['id'])
self.assertEqual(res.status_int, exc.HTTPCreated.code)
floatingip = self.deserialize(self.fmt, res)
self._delete('routers', r['router']['id'],
expected_code=exc.HTTPConflict.code)
# Cleanup
self._delete('floatingips', floatingip['floatingip']['id'])
self._router_interface_action('remove', r['router']['id'],
private_sub['subnet']['id'],
None)
self._delete('routers', r['router']['id'])
def test_router_show(self):
name = 'router1'
tenant_id = _uuid()
expected_value = [('name', name), ('tenant_id', tenant_id),
('admin_state_up', True), ('status', 'ACTIVE'),
('external_gateway_info', None)]
with self.router(name='router1', admin_state_up=True,
tenant_id=tenant_id) as router:
res = self._show('routers', router['router']['id'])
for k, v in expected_value:
self.assertEqual(res['router'][k], v)
def test_network_update_external_failure(self):
with self.router() as r:
with self.subnet() as s1:
self._set_net_external(s1['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
s1['subnet']['network_id'])
self._update('networks', s1['subnet']['network_id'],
{'network': {external_net.EXTERNAL: False}},
expected_code=exc.HTTPConflict.code)
self._remove_external_gateway_from_router(
r['router']['id'],
s1['subnet']['network_id'])
def test_network_update_external(self):
with self.router() as r:
with self.network('test_net') as testnet:
self._set_net_external(testnet['network']['id'])
with self.subnet() as s1:
self._set_net_external(s1['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
s1['subnet']['network_id'])
self._update('networks', testnet['network']['id'],
{'network': {external_net.EXTERNAL: False}})
self._remove_external_gateway_from_router(
r['router']['id'],
s1['subnet']['network_id'])
def test_floatingip_crd_ops(self):
with self.floatingip_with_assoc() as fip:
self._validate_floating_ip(fip)
# post-delete, check that it is really gone
body = self._list('floatingips')
self.assertEqual(len(body['floatingips']), 0)
self._show('floatingips', fip['floatingip']['id'],
expected_code=exc.HTTPNotFound.code)
def _test_floatingip_with_assoc_fails(self, plugin_method):
with self.subnet(cidr='200.0.0.0/24') as public_sub:
self._set_net_external(public_sub['subnet']['network_id'])
with self.port() as private_port:
with self.router() as r:
sid = private_port['port']['fixed_ips'][0]['subnet_id']
private_sub = {'subnet': {'id': sid}}
self._add_external_gateway_to_router(
r['router']['id'],
public_sub['subnet']['network_id'])
self._router_interface_action('add', r['router']['id'],
private_sub['subnet']['id'],
None)
with mock.patch(plugin_method) as pl:
pl.side_effect = n_exc.BadRequest(
resource='floatingip',
msg='fake_error')
res = self._create_floatingip(
self.fmt,
public_sub['subnet']['network_id'],
port_id=private_port['port']['id'])
self.assertEqual(res.status_int, 400)
for p in self._list('ports')['ports']:
if (p['device_owner'] ==
l3_constants.DEVICE_OWNER_FLOATINGIP):
self.fail('garbage port is not deleted')
self._remove_external_gateway_from_router(
r['router']['id'],
public_sub['subnet']['network_id'])
self._router_interface_action('remove',
r['router']['id'],
private_sub['subnet']['id'],
None)
def test_floatingip_with_assoc_fails(self):
self._test_floatingip_with_assoc_fails(
'neutron.db.l3_db.L3_NAT_db_mixin._check_and_get_fip_assoc')
def test_floatingip_update(
self, expected_status=l3_constants.FLOATINGIP_STATUS_ACTIVE):
with self.port() as p:
private_sub = {'subnet': {'id':
p['port']['fixed_ips'][0]['subnet_id']}}
with self.floatingip_no_assoc(private_sub) as fip:
body = self._show('floatingips', fip['floatingip']['id'])
self.assertIsNone(body['floatingip']['port_id'])
self.assertIsNone(body['floatingip']['fixed_ip_address'])
self.assertEqual(body['floatingip']['status'], expected_status)
port_id = p['port']['id']
ip_address = p['port']['fixed_ips'][0]['ip_address']
body = self._update('floatingips', fip['floatingip']['id'],
{'floatingip': {'port_id': port_id}})
self.assertEqual(body['floatingip']['port_id'], port_id)
self.assertEqual(body['floatingip']['fixed_ip_address'],
ip_address)
def test_floatingip_create_different_fixed_ip_same_port(self):
'''This tests that it is possible to delete a port that has
multiple floating ip addresses associated with it (each floating
address associated with a unique fixed address).
'''
with self.router() as r:
with self.subnet(cidr='11.0.0.0/24') as public_sub:
self._set_net_external(public_sub['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
public_sub['subnet']['network_id'])
with self.subnet() as private_sub:
ip_range = list(netaddr.IPNetwork(
private_sub['subnet']['cidr']))
fixed_ips = [{'ip_address': str(ip_range[-3])},
{'ip_address': str(ip_range[-2])}]
self._router_interface_action(
'add', r['router']['id'],
private_sub['subnet']['id'], None)
with self.port(subnet=private_sub,
fixed_ips=fixed_ips) as p:
fip1 = self._make_floatingip(
self.fmt,
public_sub['subnet']['network_id'],
p['port']['id'],
fixed_ip=str(ip_range[-2]))
fip2 = self._make_floatingip(
self.fmt,
public_sub['subnet']['network_id'],
p['port']['id'],
fixed_ip=str(ip_range[-3]))
# Test that floating ips are assigned successfully.
body = self._show('floatingips',
fip1['floatingip']['id'])
self.assertEqual(
body['floatingip']['port_id'],
fip1['floatingip']['port_id'])
body = self._show('floatingips',
fip2['floatingip']['id'])
self.assertEqual(
body['floatingip']['port_id'],
fip2['floatingip']['port_id'])
# Test that port has been successfully deleted.
body = self._show('ports', p['port']['id'],
expected_code=exc.HTTPNotFound.code)
for fip in [fip1, fip2]:
self._delete('floatingips',
fip['floatingip']['id'])
self._router_interface_action(
'remove', r['router']['id'],
private_sub['subnet']['id'], None)
self._remove_external_gateway_from_router(
r['router']['id'],
public_sub['subnet']['network_id'])
def test_floatingip_update_different_fixed_ip_same_port(self):
with self.subnet() as s:
ip_range = list(netaddr.IPNetwork(s['subnet']['cidr']))
fixed_ips = [{'ip_address': str(ip_range[-3])},
{'ip_address': str(ip_range[-2])}]
with self.port(subnet=s, fixed_ips=fixed_ips) as p:
with self.floatingip_with_assoc(
port_id=p['port']['id'],
fixed_ip=str(ip_range[-3])) as fip:
body = self._show('floatingips', fip['floatingip']['id'])
self.assertEqual(fip['floatingip']['id'],
body['floatingip']['id'])
self.assertEqual(fip['floatingip']['port_id'],
body['floatingip']['port_id'])
self.assertEqual(str(ip_range[-3]),
body['floatingip']['fixed_ip_address'])
self.assertIsNotNone(body['floatingip']['router_id'])
body_2 = self._update(
'floatingips', fip['floatingip']['id'],
{'floatingip': {'port_id': p['port']['id'],
'fixed_ip_address': str(ip_range[-2])}
})
self.assertEqual(fip['floatingip']['port_id'],
body_2['floatingip']['port_id'])
self.assertEqual(str(ip_range[-2]),
body_2['floatingip']['fixed_ip_address'])
def test_floatingip_update_different_router(self):
# Create subnet with different CIDRs to account for plugins which
# do not support overlapping IPs
with contextlib.nested(self.subnet(cidr='10.0.0.0/24'),
self.subnet(cidr='10.0.1.0/24')) as (
s1, s2):
with contextlib.nested(self.port(subnet=s1),
self.port(subnet=s2)) as (p1, p2):
private_sub1 = {'subnet':
{'id':
p1['port']['fixed_ips'][0]['subnet_id']}}
private_sub2 = {'subnet':
{'id':
p2['port']['fixed_ips'][0]['subnet_id']}}
with self.subnet(cidr='12.0.0.0/24') as public_sub:
with contextlib.nested(
self.floatingip_no_assoc_with_public_sub(
private_sub1, public_sub=public_sub),
self.floatingip_no_assoc_with_public_sub(
private_sub2, public_sub=public_sub)) as (
(fip1, r1), (fip2, r2)):
def assert_no_assoc(fip):
body = self._show('floatingips',
fip['floatingip']['id'])
self.assertIsNone(body['floatingip']['port_id'])
self.assertIsNone(
body['floatingip']['fixed_ip_address'])
assert_no_assoc(fip1)
assert_no_assoc(fip2)
def associate_and_assert(fip, port):
port_id = port['port']['id']
ip_address = (port['port']['fixed_ips']
[0]['ip_address'])
body = self._update(
'floatingips', fip['floatingip']['id'],
{'floatingip': {'port_id': port_id}})
self.assertEqual(body['floatingip']['port_id'],
port_id)
self.assertEqual(
body['floatingip']['fixed_ip_address'],
ip_address)
return body['floatingip']['router_id']
fip1_r1_res = associate_and_assert(fip1, p1)
self.assertEqual(fip1_r1_res, r1['router']['id'])
# The following operation will associate the floating
# ip to a different router
fip1_r2_res = associate_and_assert(fip1, p2)
self.assertEqual(fip1_r2_res, r2['router']['id'])
fip2_r1_res = associate_and_assert(fip2, p1)
self.assertEqual(fip2_r1_res, r1['router']['id'])
# disassociate fip1
self._update(
'floatingips', fip1['floatingip']['id'],
{'floatingip': {'port_id': None}})
fip2_r2_res = associate_and_assert(fip2, p2)
self.assertEqual(fip2_r2_res, r2['router']['id'])
def test_floatingip_with_assoc(self):
with self.floatingip_with_assoc() as fip:
body = self._show('floatingips', fip['floatingip']['id'])
self.assertEqual(body['floatingip']['id'],
fip['floatingip']['id'])
self.assertEqual(body['floatingip']['port_id'],
fip['floatingip']['port_id'])
self.assertIsNotNone(body['floatingip']['fixed_ip_address'])
self.assertIsNotNone(body['floatingip']['router_id'])
def test_floatingip_port_delete(self):
with self.subnet() as private_sub:
with self.floatingip_no_assoc(private_sub) as fip:
with self.port(subnet=private_sub) as p:
body = self._update('floatingips', fip['floatingip']['id'],
{'floatingip':
{'port_id': p['port']['id']}})
# note: once this port goes out of scope, the port will be
# deleted, which is what we want to test. We want to confirm
# that the fields are set back to None
body = self._show('floatingips', fip['floatingip']['id'])
self.assertEqual(body['floatingip']['id'],
fip['floatingip']['id'])
self.assertIsNone(body['floatingip']['port_id'])
self.assertIsNone(body['floatingip']['fixed_ip_address'])
self.assertIsNone(body['floatingip']['router_id'])
def test_two_fips_one_port_invalid_return_409(self):
with self.floatingip_with_assoc() as fip1:
res = self._create_floatingip(
self.fmt,
fip1['floatingip']['floating_network_id'],
fip1['floatingip']['port_id'])
self.assertEqual(res.status_int, exc.HTTPConflict.code)
def test_floating_ip_direct_port_delete_returns_409(self):
found = False
with self.floatingip_with_assoc():
for p in self._list('ports')['ports']:
if p['device_owner'] == l3_constants.DEVICE_OWNER_FLOATINGIP:
self._delete('ports', p['id'],
expected_code=exc.HTTPConflict.code)
found = True
self.assertTrue(found)
def _test_floatingip_with_invalid_create_port(self, plugin_class):
with self.port() as p:
private_sub = {'subnet': {'id':
p['port']['fixed_ips'][0]['subnet_id']}}
with self.subnet(cidr='12.0.0.0/24') as public_sub:
self._set_net_external(public_sub['subnet']['network_id'])
res = self._create_router(self.fmt, _uuid())
r = self.deserialize(self.fmt, res)
self._add_external_gateway_to_router(
r['router']['id'],
public_sub['subnet']['network_id'])
self._router_interface_action(
'add', r['router']['id'],
private_sub['subnet']['id'],
None)
with mock.patch(plugin_class + '.create_port') as createport:
createport.return_value = {'fixed_ips': []}
res = self._create_floatingip(
self.fmt, public_sub['subnet']['network_id'],
port_id=p['port']['id'])
self.assertEqual(res.status_int,
exc.HTTPBadRequest.code)
self._router_interface_action('remove',
r['router']['id'],
private_sub
['subnet']['id'],
None)
self._delete('routers', r['router']['id'])
def test_floatingip_with_invalid_create_port(self):
self._test_floatingip_with_invalid_create_port(
'neutron.db.db_base_plugin_v2.NeutronDbPluginV2')
def test_create_floatingip_no_ext_gateway_return_404(self):
with self.subnet() as public_sub:
self._set_net_external(public_sub['subnet']['network_id'])
with self.port() as private_port:
with self.router():
res = self._create_floatingip(
self.fmt,
public_sub['subnet']['network_id'],
port_id=private_port['port']['id'])
# this should be some kind of error
self.assertEqual(res.status_int, exc.HTTPNotFound.code)
def test_create_floating_non_ext_network_returns_400(self):
with self.subnet() as public_sub:
# normally we would set the network of public_sub to be
# external, but the point of this test is to handle when
# that is not the case
with self.router():
res = self._create_floatingip(
self.fmt,
public_sub['subnet']['network_id'])
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_create_floatingip_no_public_subnet_returns_400(self):
with self.network() as public_network:
with self.port() as private_port:
with self.router() as r:
sid = private_port['port']['fixed_ips'][0]['subnet_id']
private_sub = {'subnet': {'id': sid}}
self._router_interface_action('add', r['router']['id'],
private_sub['subnet']['id'],
None)
res = self._create_floatingip(
self.fmt,
public_network['network']['id'],
port_id=private_port['port']['id'])
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
# cleanup
self._router_interface_action('remove',
r['router']['id'],
private_sub['subnet']['id'],
None)
def test_create_floatingip_invalid_floating_network_id_returns_400(self):
# API-level test - no need to create all objects for l3 plugin
res = self._create_floatingip(self.fmt, 'iamnotanuuid',
uuidutils.generate_uuid(), '192.168.0.1')
self.assertEqual(res.status_int, 400)
def test_create_floatingip_invalid_floating_port_id_returns_400(self):
# API-level test - no need to create all objects for l3 plugin
res = self._create_floatingip(self.fmt, uuidutils.generate_uuid(),
'iamnotanuuid', '192.168.0.1')
self.assertEqual(res.status_int, 400)
def test_create_floatingip_invalid_fixed_ip_address_returns_400(self):
# API-level test - no need to create all objects for l3 plugin
res = self._create_floatingip(self.fmt, uuidutils.generate_uuid(),
uuidutils.generate_uuid(), 'iamnotnanip')
self.assertEqual(res.status_int, 400)
def test_floatingip_list_with_sort(self):
with contextlib.nested(self.subnet(cidr="10.0.0.0/24"),
self.subnet(cidr="11.0.0.0/24"),
self.subnet(cidr="12.0.0.0/24")
) as (s1, s2, s3):
network_id1 = s1['subnet']['network_id']
network_id2 = s2['subnet']['network_id']
network_id3 = s3['subnet']['network_id']
self._set_net_external(network_id1)
self._set_net_external(network_id2)
self._set_net_external(network_id3)
fp1 = self._make_floatingip(self.fmt, network_id1)
fp2 = self._make_floatingip(self.fmt, network_id2)
fp3 = self._make_floatingip(self.fmt, network_id3)
try:
self._test_list_with_sort('floatingip', (fp3, fp2, fp1),
[('floating_ip_address', 'desc')])
finally:
self._delete('floatingips', fp1['floatingip']['id'])
self._delete('floatingips', fp2['floatingip']['id'])
self._delete('floatingips', fp3['floatingip']['id'])
def test_floatingip_list_with_port_id(self):
with self.floatingip_with_assoc() as fip:
port_id = fip['floatingip']['port_id']
res = self._list('floatingips',
query_params="port_id=%s" % port_id)
self.assertEqual(len(res['floatingips']), 1)
res = self._list('floatingips', query_params="port_id=aaa")
self.assertEqual(len(res['floatingips']), 0)
def test_floatingip_list_with_pagination(self):
with contextlib.nested(self.subnet(cidr="10.0.0.0/24"),
self.subnet(cidr="11.0.0.0/24"),
self.subnet(cidr="12.0.0.0/24")
) as (s1, s2, s3):
network_id1 = s1['subnet']['network_id']
network_id2 = s2['subnet']['network_id']
network_id3 = s3['subnet']['network_id']
self._set_net_external(network_id1)
self._set_net_external(network_id2)
self._set_net_external(network_id3)
fp1 = self._make_floatingip(self.fmt, network_id1)
fp2 = self._make_floatingip(self.fmt, network_id2)
fp3 = self._make_floatingip(self.fmt, network_id3)
try:
self._test_list_with_pagination(
'floatingip', (fp1, fp2, fp3),
('floating_ip_address', 'asc'), 2, 2)
finally:
self._delete('floatingips', fp1['floatingip']['id'])
self._delete('floatingips', fp2['floatingip']['id'])
self._delete('floatingips', fp3['floatingip']['id'])
def test_floatingip_list_with_pagination_reverse(self):
with contextlib.nested(self.subnet(cidr="10.0.0.0/24"),
self.subnet(cidr="11.0.0.0/24"),
self.subnet(cidr="12.0.0.0/24")
) as (s1, s2, s3):
network_id1 = s1['subnet']['network_id']
network_id2 = s2['subnet']['network_id']
network_id3 = s3['subnet']['network_id']
self._set_net_external(network_id1)
self._set_net_external(network_id2)
self._set_net_external(network_id3)
fp1 = self._make_floatingip(self.fmt, network_id1)
fp2 = self._make_floatingip(self.fmt, network_id2)
fp3 = self._make_floatingip(self.fmt, network_id3)
try:
self._test_list_with_pagination_reverse(
'floatingip', (fp1, fp2, fp3),
('floating_ip_address', 'asc'), 2, 2)
finally:
self._delete('floatingips', fp1['floatingip']['id'])
self._delete('floatingips', fp2['floatingip']['id'])
self._delete('floatingips', fp3['floatingip']['id'])
def test_floatingip_delete_router_intf_with_subnet_id_returns_409(self):
found = False
with self.floatingip_with_assoc():
for p in self._list('ports')['ports']:
if p['device_owner'] == l3_constants.DEVICE_OWNER_ROUTER_INTF:
subnet_id = p['fixed_ips'][0]['subnet_id']
router_id = p['device_id']
self._router_interface_action(
'remove', router_id, subnet_id, None,
expected_code=exc.HTTPConflict.code)
found = True
break
self.assertTrue(found)
def test_floatingip_delete_router_intf_with_port_id_returns_409(self):
found = False
with self.floatingip_with_assoc():
for p in self._list('ports')['ports']:
if p['device_owner'] == l3_constants.DEVICE_OWNER_ROUTER_INTF:
router_id = p['device_id']
self._router_interface_action(
'remove', router_id, None, p['id'],
expected_code=exc.HTTPConflict.code)
found = True
break
self.assertTrue(found)
def test_router_delete_subnet_inuse_returns_409(self):
with self.router() as r:
with self.subnet() as s:
self._router_interface_action('add',
r['router']['id'],
s['subnet']['id'],
None)
# subnet cannot be delete as it's attached to a router
self._delete('subnets', s['subnet']['id'],
expected_code=exc.HTTPConflict.code)
# remove interface so test can exit without errors
self._router_interface_action('remove',
r['router']['id'],
s['subnet']['id'],
None)
def test_delete_ext_net_with_disassociated_floating_ips(self):
with self.network() as net:
net_id = net['network']['id']
self._set_net_external(net_id)
with self.subnet(network=net, do_delete=False):
self._make_floatingip(self.fmt, net_id)
class L3AgentDbTestCaseBase(L3NatTestCaseMixin):
"""Unit tests for methods called by the L3 agent."""
def test_l3_agent_routers_query_interfaces(self):
with self.router() as r:
with self.port(do_delete=False) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
routers = self.plugin.get_sync_data(
context.get_admin_context(), None)
self.assertEqual(1, len(routers))
interfaces = routers[0][l3_constants.INTERFACE_KEY]
self.assertEqual(1, len(interfaces))
subnet_id = interfaces[0]['subnet']['id']
wanted_subnetid = p['port']['fixed_ips'][0]['subnet_id']
self.assertEqual(wanted_subnetid, subnet_id)
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_l3_agent_routers_query_ignore_interfaces_with_moreThanOneIp(self):
with self.router() as r:
with self.subnet(cidr='9.0.1.0/24') as subnet:
with self.port(subnet=subnet,
do_delete=False,
fixed_ips=[{'ip_address': '9.0.1.3'}]) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
port = {'port': {'fixed_ips':
[{'ip_address': '9.0.1.4',
'subnet_id': subnet['subnet']['id']},
{'ip_address': '9.0.1.5',
'subnet_id': subnet['subnet']['id']}]}}
ctx = context.get_admin_context()
self.core_plugin.update_port(ctx, p['port']['id'], port)
routers = self.plugin.get_sync_data(ctx, None)
self.assertEqual(1, len(routers))
interfaces = routers[0].get(l3_constants.INTERFACE_KEY, [])
self.assertEqual(1, len(interfaces))
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_l3_agent_routers_query_gateway(self):
with self.router() as r:
with self.subnet() as s:
self._set_net_external(s['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
s['subnet']['network_id'])
routers = self.plugin.get_sync_data(
context.get_admin_context(), [r['router']['id']])
self.assertEqual(1, len(routers))
gw_port = routers[0]['gw_port']
self.assertEqual(s['subnet']['id'], gw_port['subnet']['id'])
self._remove_external_gateway_from_router(
r['router']['id'],
s['subnet']['network_id'])
def test_l3_agent_routers_query_floatingips(self):
with self.floatingip_with_assoc() as fip:
routers = self.plugin.get_sync_data(
context.get_admin_context(), [fip['floatingip']['router_id']])
self.assertEqual(1, len(routers))
floatingips = routers[0][l3_constants.FLOATINGIP_KEY]
self.assertEqual(1, len(floatingips))
self.assertEqual(floatingips[0]['id'],
fip['floatingip']['id'])
self.assertEqual(floatingips[0]['port_id'],
fip['floatingip']['port_id'])
self.assertIsNotNone(floatingips[0]['fixed_ip_address'])
self.assertIsNotNone(floatingips[0]['router_id'])
def _test_notify_op_agent(self, target_func, *args):
l3_rpc_agent_api_str = (
'neutron.api.rpc.agentnotifiers.l3_rpc_agent_api.L3AgentNotifyAPI')
plugin = manager.NeutronManager.get_service_plugins()[
service_constants.L3_ROUTER_NAT]
oldNotify = plugin.l3_rpc_notifier
try:
with mock.patch(l3_rpc_agent_api_str) as notifyApi:
plugin.l3_rpc_notifier = notifyApi
kargs = [item for item in args]
kargs.append(notifyApi)
target_func(*kargs)
except Exception:
plugin.l3_rpc_notifier = oldNotify
raise
else:
plugin.l3_rpc_notifier = oldNotify
def _test_router_gateway_op_agent(self, notifyApi):
with self.router() as r:
with self.subnet() as s:
self._set_net_external(s['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
s['subnet']['network_id'])
self._remove_external_gateway_from_router(
r['router']['id'],
s['subnet']['network_id'])
self.assertEqual(
2, notifyApi.routers_updated.call_count)
def test_router_gateway_op_agent(self):
self._test_notify_op_agent(self._test_router_gateway_op_agent)
def _test_interfaces_op_agent(self, r, notifyApi):
with self.port(do_delete=False) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
self.assertEqual(2, notifyApi.routers_updated.call_count)
def test_interfaces_op_agent(self):
with self.router() as r:
self._test_notify_op_agent(
self._test_interfaces_op_agent, r)
def _test_floatingips_op_agent(self, notifyApi):
with self.floatingip_with_assoc():
pass
# add gateway, add interface, associate, deletion of floatingip,
# delete gateway, delete interface
self.assertEqual(6, notifyApi.routers_updated.call_count)
def test_floatingips_op_agent(self):
self._test_notify_op_agent(self._test_floatingips_op_agent)
class L3BaseForIntTests(test_db_plugin.NeutronDbPluginV2TestCase):
mock_rescheduling = True
def setUp(self, plugin=None, ext_mgr=None, service_plugins=None):
if not plugin:
plugin = 'neutron.tests.unit.test_l3_plugin.TestL3NatIntPlugin'
# for these tests we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
ext_mgr = ext_mgr or L3TestExtensionManager()
if self.mock_rescheduling:
mock.patch('%s._check_router_needs_rescheduling' % plugin,
new=lambda *a: False).start()
super(L3BaseForIntTests, self).setUp(plugin=plugin, ext_mgr=ext_mgr,
service_plugins=service_plugins)
self.setup_notification_driver()
class L3BaseForSepTests(test_db_plugin.NeutronDbPluginV2TestCase):
def setUp(self, plugin=None, ext_mgr=None):
# the plugin without L3 support
if not plugin:
plugin = 'neutron.tests.unit.test_l3_plugin.TestNoL3NatPlugin'
# the L3 service plugin
l3_plugin = ('neutron.tests.unit.test_l3_plugin.'
'TestL3NatServicePlugin')
service_plugins = {'l3_plugin_name': l3_plugin}
# for these tests we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
if not ext_mgr:
ext_mgr = L3TestExtensionManager()
super(L3BaseForSepTests, self).setUp(plugin=plugin, ext_mgr=ext_mgr,
service_plugins=service_plugins)
self.setup_notification_driver()
class L3NatDBIntAgentSchedulingTestCase(L3BaseForIntTests,
L3NatTestCaseMixin,
test_agent_ext_plugin.
AgentDBTestMixIn):
"""Unit tests for core plugin with L3 routing and scheduling integrated."""
def setUp(self, plugin='neutron.tests.unit.test_l3_plugin.'
'TestL3NatIntAgentSchedulingPlugin',
ext_mgr=None, service_plugins=None):
self.mock_rescheduling = False
super(L3NatDBIntAgentSchedulingTestCase, self).setUp(
plugin, ext_mgr, service_plugins)
self.adminContext = context.get_admin_context()
def _assert_router_on_agent(self, router_id, agent_host):
plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
agents = plugin.list_l3_agents_hosting_router(
self.adminContext, router_id)['agents']
self.assertEqual(len(agents), 1)
self.assertEqual(agents[0]['host'], agent_host)
def test_update_gateway_agent_exists_supporting_network(self):
with contextlib.nested(self.router(),
self.subnet(),
self.subnet()) as (r, s1, s2):
self._set_net_external(s1['subnet']['network_id'])
l3_rpc = l3_rpc_base.L3RpcCallbackMixin()
self._register_one_l3_agent(
host='host1',
ext_net_id=s1['subnet']['network_id'])
self._register_one_l3_agent(
host='host2', internal_only=False,
ext_net_id=s2['subnet']['network_id'])
l3_rpc.sync_routers(self.adminContext,
host='host1')
self._assert_router_on_agent(r['router']['id'], 'host1')
self._add_external_gateway_to_router(
r['router']['id'],
s1['subnet']['network_id'])
self._assert_router_on_agent(r['router']['id'], 'host1')
self._set_net_external(s2['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
s2['subnet']['network_id'])
self._assert_router_on_agent(r['router']['id'], 'host2')
self._remove_external_gateway_from_router(
r['router']['id'],
s2['subnet']['network_id'])
def test_update_gateway_agent_exists_supporting_multiple_network(self):
with contextlib.nested(self.router(),
self.subnet(),
self.subnet()) as (r, s1, s2):
self._set_net_external(s1['subnet']['network_id'])
l3_rpc = l3_rpc_base.L3RpcCallbackMixin()
self._register_one_l3_agent(
host='host1',
ext_net_id=s1['subnet']['network_id'])
self._register_one_l3_agent(
host='host2', internal_only=False,
ext_net_id='', ext_bridge='')
l3_rpc.sync_routers(self.adminContext,
host='host1')
self._assert_router_on_agent(r['router']['id'], 'host1')
self._add_external_gateway_to_router(
r['router']['id'],
s1['subnet']['network_id'])
self._assert_router_on_agent(r['router']['id'], 'host1')
self._set_net_external(s2['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
s2['subnet']['network_id'])
self._assert_router_on_agent(r['router']['id'], 'host2')
self._remove_external_gateway_from_router(
r['router']['id'],
s2['subnet']['network_id'])
def test_router_update_gateway_no_eligible_l3_agent(self):
with self.router() as r:
with self.subnet() as s1:
with self.subnet() as s2:
self._set_net_external(s1['subnet']['network_id'])
self._set_net_external(s2['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
s1['subnet']['network_id'],
expected_code=exc.HTTPBadRequest.code)
class L3AgentDbIntTestCase(L3BaseForIntTests, L3AgentDbTestCaseBase):
"""Unit tests for methods called by the L3 agent for
the case where core plugin implements L3 routing.
"""
def setUp(self):
self.core_plugin = TestL3NatIntPlugin()
# core plugin is also plugin providing L3 routing
self.plugin = self.core_plugin
super(L3AgentDbIntTestCase, self).setUp()
class L3AgentDbSepTestCase(L3BaseForSepTests, L3AgentDbTestCaseBase):
"""Unit tests for methods called by the L3 agent for the
case where separate service plugin implements L3 routing.
"""
def setUp(self):
self.core_plugin = TestNoL3NatPlugin()
# core plugin is also plugin providing L3 routing
self.plugin = TestL3NatServicePlugin()
super(L3AgentDbSepTestCase, self).setUp()
class L3NatDBIntTestCase(L3BaseForIntTests, L3NatTestCaseBase):
"""Unit tests for core plugin with L3 routing integrated."""
pass
class L3NatDBSepTestCase(L3BaseForSepTests, L3NatTestCaseBase):
"""Unit tests for a separate L3 routing service plugin."""
pass
class L3NatDBIntTestCaseXML(L3NatDBIntTestCase):
fmt = 'xml'
class L3NatDBSepTestCaseXML(L3NatDBSepTestCase):
fmt = 'xml'
| |
# Python implementation of the MySQL client-server protocol
# https://dev.mysql.com/doc/dev/mysql-server/latest/PAGE_PROTOCOL.html
import hashlib
import socket
import ssl
import struct
import sys
import os
import stat
import getpass
try:
from ConfigParser import RawConfigParser
except ImportError:
from configparser import RawConfigParser
from cymysql.charset import charset_by_name, encoding_by_charset
from cymysql.cursors import Cursor
from cymysql.constants import CLIENT, COMMAND
from cymysql.converters import decoders, encoders, escape_item
from cymysql.err import Warning, Error, \
InterfaceError, DataError, DatabaseError, OperationalError, \
IntegrityError, InternalError, NotSupportedError, ProgrammingError
from cymysql.packet import MysqlPacket, MySQLResult
PYTHON3 = sys.version_info[0] > 2
DEFAULT_USER = getpass.getuser()
DEFAULT_CHARSET = 'utf8mb4'
def sha_new(*args, **kwargs):
return hashlib.new("sha1", *args, **kwargs)
def sha256_new(*args, **kwargs):
return hashlib.new("sha256", *args, **kwargs)
def byte2int(b):
if isinstance(b, int):
return b
else:
return ord(b)
def int2byte(i):
if PYTHON3:
return bytes([i])
else:
return chr(i)
def pack_int24(n):
if PYTHON3:
return bytes([n & 0xFF, (n >> 8) & 0xFF, (n >> 16) & 0xFF])
else:
return chr(n & 0xFF) + chr((n >> 8) & 0xFF) + chr((n >> 16) & 0xFF)
SCRAMBLE_LENGTH = 20
def _xor(data1, data2):
result = b''
for i in range(len(data1)):
j = i % len(data2)
x = struct.unpack('B', data1[i:i+1])[0] ^ struct.unpack('B', data2[j:j+1])[0]
result += struct.pack('B', x)
return result
def _mysql_native_password_scramble(password, message):
if password is None or len(password) == 0:
return b''
message2 = sha_new(password).digest()
stage2 = sha_new(message2).digest()
s = sha_new()
s.update(message[:SCRAMBLE_LENGTH])
s.update(stage2)
message1 = s.digest()
return _xor(message1, message2)
def _caching_sha2_password_scramble(password, nonce):
if password is None or len(password) == 0:
return b''
message1 = sha256_new(password).digest()
s = sha256_new()
s.update(sha256_new(message1).digest())
s.update(nonce[:SCRAMBLE_LENGTH])
message2 = s.digest()
return _xor(message1, message2)
class Connection(object):
"""
Representation of a socket with a mysql server.
The proper way to get an instance of this class is to call
connect()."""
def errorhandler(connection, cursor, errorclass, errorvalue):
err = errorclass, errorvalue
if cursor:
cursor.messages.append(err)
else:
connection.messages.append(err)
del cursor
del connection
if not issubclass(errorclass, Error):
raise Error(errorclass, errorvalue)
elif isinstance(errorvalue, errorclass):
raise errorvalue
else:
raise errorclass(*errorvalue)
def __init__(self, host="localhost", user=None, passwd="",
db=None, port=3306, unix_socket=None,
charset='', sql_mode=None,
read_default_file=None, use_unicode=None,
client_flag=0, cursorclass=Cursor, init_command=None,
connect_timeout=None, ssl=None, read_default_group=None,
compress=None, named_pipe=None,
conv=decoders, encoders=encoders):
"""
Establish a connection to the MySQL database. Accepts several
arguments:
host: Host where the database server is located
user: Username to log in as
passwd: Password to use.
db: Database to use, None to not use a particular one.
port: MySQL port to use, default is usually OK.
unix_socket: Optionally, you can use a unix socket rather than TCP/IP.
charset: Charset you want to use.
sql_mode: Default SQL_MODE to use.
read_default_file: Specifies my.cnf file to read these parameters from under the [client] section.
conv: Decoders dictionary to use instead of the default one. This is used to provide custom marshalling of types. See converters.
use_unicode: Whether or not to default to unicode strings. This option defaults to true for Py3k.
client_flag: Custom flags to send to MySQL. Find potential values in constants.CLIENT.
cursorclass: Custom cursor class to use.
init_command: Initial SQL statement to run when connection is established.
connect_timeout: Timeout before throwing an exception when connecting.
ssl: A dict of arguments similar to mysql_ssl_set()'s parameters. For now the capath and cipher arguments are not supported.
read_default_group: Group to read from in the configuration file.
compress; Not supported
named_pipe: Not supported
"""
if use_unicode is None and sys.version_info[0] > 2:
use_unicode = True
if compress or named_pipe:
raise NotImplementedError("compress and named_pipe arguments are not supported")
if ssl and ('capath' in ssl or 'cipher' in ssl):
raise NotImplementedError('ssl options capath and cipher are not supported')
self.socket = None
self.ssl = False
if ssl:
self.ssl = True
client_flag |= CLIENT.SSL
for k in ('key', 'cert', 'ca'):
v = None
if k in ssl:
v = ssl[k]
setattr(self, k, v)
if read_default_group and not read_default_file:
if sys.platform.startswith("win"):
read_default_file = "c:\\my.ini"
else:
for f in ('~/.my.cnf', '/etc/my.cnf', '/etc/mysql/my.cnf'):
if os.path.isfile(os.path.expanduser(f)):
read_default_file = f
break
if read_default_file:
if not read_default_group:
read_default_group = "client"
cfg = RawConfigParser()
cfg.read(os.path.expanduser(read_default_file))
def _config(key, default):
try:
return cfg.get(read_default_group, key)
except:
return default
user = _config("user", user)
passwd = _config("password", passwd)
host = _config("host", host)
db = _config("db", db)
unix_socket = _config("socket", unix_socket)
port = _config("port", port)
charset = _config("default-character-set", charset)
if (
host == 'localhost' and port == 3306
and not sys.platform.startswith('win')
and (unix_socket is None or not os.path.exists(unix_socket))
):
for f in (
'/var/lib/mysql/mysql.sock',
'/var/run/mysql/mysql.sock',
'/var/run/mysql.sock',
'/var/mysql/mysql.sock'
):
if os.path.exists(f) and stat.S_ISSOCK(os.stat(f).st_mode):
unix_socket = f
break
self.host = host
self.port = port
self.user = user or DEFAULT_USER
self.password = passwd
self.db = db
self.unix_socket = unix_socket
self.conv = conv
self.encoders = encoders
if charset:
self.charset = charset
self.use_unicode = True
else:
self.charset = DEFAULT_CHARSET
self.use_unicode = False
if use_unicode is not None:
self.use_unicode = use_unicode
self.encoding = encoding_by_charset(self.charset)
client_flag |= CLIENT.CAPABILITIES
client_flag |= CLIENT.MULTI_STATEMENTS
if self.db:
client_flag |= CLIENT.CONNECT_WITH_DB
# self.client_flag |= CLIENT.CLIENT_DEPRECATE_EOF
self.client_flag = client_flag
self.cursorclass = cursorclass
self.connect_timeout = connect_timeout
self._connect()
self.messages = []
self.set_charset(charset)
self._result = None
self.host_info = "Not connected"
self.autocommit(False)
if sql_mode is not None:
c = self.cursor()
c.execute("SET sql_mode=%s", (sql_mode,))
if init_command is not None:
c = self.cursor()
c.execute(init_command)
self.commit()
def close(self):
''' Send the quit message and close the socket '''
if self.socket is None:
raise Error("Already closed")
send_data = b'\x01\x00\x00\x00' + int2byte(COMMAND.COM_QUIT)
self.socket.sendall(send_data)
self.socket.close()
self.socket = None
def autocommit(self, value):
''' Set whether or not to commit after every execute() '''
if value:
q = "SET AUTOCOMMIT = 1"
else:
q = "SET AUTOCOMMIT = 0"
try:
self._execute_command(COMMAND.COM_QUERY, q)
self.read_packet()
except:
exc, value, tb = sys.exc_info()
self.errorhandler(None, exc, value)
def commit(self):
''' Commit changes to stable storage '''
try:
self._execute_command(COMMAND.COM_QUERY, "COMMIT")
self.read_packet()
except:
exc, value, tb = sys.exc_info()
self.errorhandler(None, exc, value)
def rollback(self):
''' Roll back the current transaction '''
try:
self._execute_command(COMMAND.COM_QUERY, "ROLLBACK")
self.read_packet()
except:
exc, value, tb = sys.exc_info()
self.errorhandler(None, exc, value)
def escape(self, obj):
''' Escape whatever value you pass to it '''
return escape_item(obj, self.charset, self.encoders)
def literal(self, obj):
''' Alias for escape() '''
return escape_item(obj, self.charset, self.encoders)
def cursor(self, cursor=None):
''' Create a new cursor to execute queries with '''
if cursor:
return cursor(self)
return self.cursorclass(self)
def __enter__(self):
''' Context manager that returns a Cursor '''
return self.cursor()
def __exit__(self, exc, value, traceback):
''' On successful exit, commit. On exception, rollback. '''
if exc:
self.rollback()
else:
self.commit()
def __del__(self):
if hasattr(self, 'socket') and self.socket:
self.socket.close()
self.socket = None
def _is_connect(self):
return bool(self.socket)
# The following methods are INTERNAL USE ONLY (called from Cursor)
def query(self, sql):
self._execute_command(COMMAND.COM_QUERY, sql)
self._result = MySQLResult(self)
def next_result(self):
self._result = MySQLResult(self)
def affected_rows(self):
if self._result:
self._result._affected_rows
else:
return 0
def kill(self, thread_id):
arg = struct.pack('<I', thread_id)
try:
self._execute_command(COMMAND.COM_PROCESS_KILL, arg)
pkt = self.read_packet()
return pkt.is_ok_packet()
except:
exc, value, tb = sys.exc_info()
self.errorhandler(None, exc, value)
return False
def ping(self, reconnect=True):
''' Check if the server is alive '''
try:
self._execute_command(COMMAND.COM_PING, "")
except:
if reconnect:
self._connect()
return self.ping(False)
else:
exc, value, tb = sys.exc_info()
self.errorhandler(None, exc, value)
return
pkt = self.read_packet()
return pkt.is_ok_packet()
def set_charset(self, charset):
try:
if charset:
self._execute_command(COMMAND.COM_QUERY, "SET NAMES %s" %
self.escape(charset))
self.read_packet()
self.charset = charset
except:
exc, value, tb = sys.exc_info()
self.errorhandler(None, exc, value)
def _connect(self):
sock = None
try:
if self.unix_socket and (self.host == 'localhost' or self.host == '127.0.0.1'):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
t = sock.gettimeout()
sock.settimeout(self.connect_timeout)
sock.connect(self.unix_socket)
sock.settimeout(t)
self.host_info = "Localhost via UNIX socket"
else:
sock = socket.create_connection((self.host, self.port), self.connect_timeout)
self.host_info = "socket %s:%d" % (self.host, self.port)
except socket.error as e:
if sock:
sock.close()
raise OperationalError(
2003, "Can't connect to MySQL server on %r (%s)" % (self.host, e.args[0])
)
self.socket = sock
self._get_server_information()
self._request_authentication()
def read_packet(self):
"""Read an entire "mysql packet" in its entirety from the network
and return a MysqlPacket type that represents the results."""
return MysqlPacket(self)
def insert_id(self):
if self._result:
return self._result.insert_id
else:
return 0
def _execute_command(self, command, sql):
if not self.socket:
self.errorhandler(None, InterfaceError, (-1, 'socket not found'))
if (
(PYTHON3 and isinstance(sql, str)) or
(not PYTHON3 and isinstance(sql, unicode))
):
sql = sql.encode(self.encoding)
if len(sql) + 1 > 0xffffff:
raise ValueError('Sending query packet is too large')
prelude = struct.pack('<i', len(sql)+1) + int2byte(command)
self.socket.sendall(prelude + sql)
def _scramble(self):
if self.auth_plugin_name in ('', 'mysql_native_password'):
data = _mysql_native_password_scramble(
self.password.encode(self.encoding), self.salt
)
elif self.auth_plugin_name == 'caching_sha2_password':
data = _caching_sha2_password_scramble(
self.password.encode(self.encoding), self.salt
)
elif self.auth_plugin_name == 'mysql_clear_password':
data = self.password.encode(self.encoding) + b'\x00'
else:
raise NotImplementedError(
"%s authentication plugin is not implemented" % (self.auth_plugin_name, )
)
return data
def _request_authentication(self):
if self.user is None:
raise ValueError("Did not specify a username")
next_packet = 1
charset_id = charset_by_name(self.charset).id
user = self.user.encode(self.encoding)
data_init = (
struct.pack('<i', self.client_flag) +
struct.pack("<I", 1) +
int2byte(charset_id) + int2byte(0)*23
)
if self.ssl and self.server_capabilities & CLIENT.SSL:
data = pack_int24(len(data_init)) + int2byte(next_packet) + data_init
self.socket.sendall(data)
next_packet += 1
self.socket = ssl.wrap_socket(self.socket, keyfile=self.key,
certfile=self.cert,
ca_certs=self.ca)
data = data_init + user + int2byte(0)
authresp = self._scramble()
if self.server_capabilities & CLIENT.SECURE_CONNECTION:
data += int2byte(len(authresp)) + authresp
else:
data += authresp + int2byte(0)
if self.db and self.server_capabilities & CLIENT.CONNECT_WITH_DB:
data += self.db.encode(self.encoding) + int2byte(0)
if self.server_capabilities & CLIENT.PLUGIN_AUTH:
data += self.auth_plugin_name.encode(self.encoding) + int2byte(0)
data = pack_int24(len(data)) + int2byte(next_packet) + data
next_packet += 2
self.socket.sendall(data)
auth_packet = self.read_packet()
if auth_packet.is_eof_packet():
# AuthSwitchRequest
# https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchRequest
self.auth_plugin_name, self.salt = auth_packet.read_auth_switch_request()
data = self._scramble()
data = pack_int24(len(data)) + int2byte(next_packet) + data
next_packet += 2
self.socket.sendall(data)
auth_packet = self.read_packet()
if self.auth_plugin_name == 'caching_sha2_password':
self._caching_sha2_authentication2(auth_packet, next_packet)
def _caching_sha2_authentication2(self, auth_packet, next_packet):
# https://dev.mysql.com/doc/dev/mysql-server/latest/page_caching_sha2_authentication_exchanges.html
if auth_packet.get_all_data() == b'\x01\x03': # fast_auth_success
self.read_packet()
return
# perform_full_authentication
assert auth_packet.get_all_data() == b'\x01\x04'
if self.ssl or self.unix_socket:
data = self.password.encode(self.encoding) + b'\x00'
else:
# request_public_key
data = b'\x02'
data = pack_int24(len(data)) + int2byte(next_packet) + data
next_packet += 2
self.socket.sendall(data)
response = self.read_packet()
public_pem = response.get_all_data()[1:]
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_OAEP
key = RSA.importKey(public_pem)
cipher = PKCS1_OAEP.new(key)
password = self.password.encode(self.encoding) + b'\x00'
data = cipher.encrypt(_xor(password, self.salt))
data = pack_int24(len(data)) + int2byte(next_packet) + data
next_packet += 2
self.socket.sendall(data)
self.read_packet()
# _mysql support
def thread_id(self):
return self.server_thread_id[0]
def character_set_name(self):
return self.charset
def get_host_info(self):
return self.host_info
def get_proto_info(self):
return self.protocol_version
def _get_server_information(self):
# https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake
i = 0
packet = self.read_packet()
data = packet.get_all_data()
self.protocol_version = byte2int(data[i:i+1])
i += 1
str_end = data.find(int2byte(0), i)
self.server_version = data[i:str_end].decode('utf-8')
i = str_end + 1
self.server_thread_id = struct.unpack('<I', data[i:i+4])
i += 4
self.salt = data[i:i+8]
i += 9
self.server_capabilities = struct.unpack('<H', data[i:i+2])[0]
i += 2
self.server_status = None
self.auth_plugin_name = ''
if len(data) > i:
# Drop server_language and server_charset now.
# character_set(1) only the lower 8 bits
# self.server_language = byte2int(data[i:i+1])
# self.server_charset = charset_by_id(self.server_language).name
i += 1
self.server_status = struct.unpack('<H', data[i:i+2])[0]
i += 2
self.server_capabilities |= (struct.unpack('<H', data[i:i+2])[0]) << 16
i += 2
salt_len = byte2int(data[i:i+1])
i += 1
i += 10 # reserverd
if salt_len:
rest_salt_len = max(13, salt_len-8)
self.salt += data[i:i+rest_salt_len-1]
i += rest_salt_len
self.auth_plugin_name = data[i:data.find(int2byte(0), i)].decode('utf-8')
def get_server_info(self):
return self.server_version
Warning = Warning
Error = Error
InterfaceError = InterfaceError
DatabaseError = DatabaseError
DataError = DataError
OperationalError = OperationalError
IntegrityError = IntegrityError
InternalError = InternalError
ProgrammingError = ProgrammingError
NotSupportedError = NotSupportedError
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
from docx import Document
from itertools import groupby
from operator import itemgetter
from xml.dom.minidom import parse
import sys
import re
import os
import shutil
import glob
import tempfile
import operator
import getopt
import pdb
import string
import zipfile
def parseslidecontent(pptxfile, words, booknum, verbose=False):
skippages = []
tmpd = tempfile.mkdtemp()
zipfile.ZipFile(pptxfile).extractall(path=tmpd, pwd=None)
# Parse slide content first
path = tmpd + '/ppt/slides/'
for infile in glob.glob(os.path.join(path, '*.xml')):
#parse each XML notes file from the notes folder.
dom = parse(infile)
noteslist = dom.getElementsByTagName('a:t')
page = int(re.sub(r'\D', "", infile.split("/")[-1]))
text = ''
for node in noteslist:
xmlTag = node.toxml()
xmlData = xmlTag.replace('<a:t>', '').replace('</a:t>', '')
text += " " + xmlData
# Convert to ascii to simplify
text = text.encode('ascii', 'ignore')
if "Course Roadmap" in text:
if verbose:
print "Skipping page %d:%d, \"Course Roadmap\" slide."%(booknum,page)
skippages.append(page)
words[str(booknum) + ":" + str(page)] = ''
else:
words[str(booknum) + ":" + str(page)] = text
# Next, parse notes content, skipping pages previously identified
path = tmpd + '/ppt/notesSlides/'
for infile in glob.glob(os.path.join(path, '*.xml')):
#parse each XML notes file from the notes folder.
dom = parse(infile)
noteslist = dom.getElementsByTagName('a:t')
# The page number is part of the filename
page = int(re.sub(r'\D', "", infile.split("/")[-1]))
if page in skippages:
# Skip this page previously identified with slide text
continue
# Create dictionary entry without content
text = ''
for node in noteslist:
xmlTag = node.toxml()
xmlData = xmlTag.replace('<a:t>', '').replace('</a:t>', '')
#concatenate the xmlData to the text for the particular slideNumber index.
text += " " + xmlData
# Convert to ascii to simplify
text = text.encode('ascii', 'ignore')
words[str(booknum) + ":" + str(page)] += " " + text
# Remove all the files created with unzip
shutil.rmtree(tmpd)
# Remove double-spaces which happens in the content occasionally
for page in words:
words[page] = ''.join(ch for ch in words[page] if ch not in set([',','(',')']))
words[page] = re.sub('\. ', " ", words[page])
words[page] = ' '.join(words[page].split())
return words
# Validate the contents of the concordance file
def checkconcordance(concordancefile):
# Declared empty here, just for validating concordance rules
page = ""
cspage = ""
booknum = 0
pagenum = 0
wordlist = ""
cswordlist = ""
ret=0
lineno=0
for line in open(concordancefile):
expression = None
lineno+=1
if line[0] == "#" or line == "\n" or line == "\r\n" or line.isspace(): continue
try:
key,expression = line.strip().split(";")
except ValueError:
# Explicit search term, continue
continue
if expression != None:
try:
eval(expression)
except Exception, e:
ret=1
sys.stdout.write("Error processing concordance file line " + str(lineno) + ": ")
sys.stdout.write(str(e))
sys.stdout.write("\n")
continue
return ret
# Take the index of entries, sort and reduce the page numbers
# into ranges (e.g. 1:3,1:4,1:5,1:8 into 1:3-5,1:8
# This is awful and I hope I never have to edit this code.
def indexreduce(index):
for entry in index:
matchesbybook = {}
pages=index[entry]
for bookpage in pages:
book,page = bookpage.split(":")
page = int(page)
try:
matchesbybook[book].append(page)
except KeyError:
matchesbybook[book] = [page]
for book in matchesbybook:
sortedreduced=[]
matchesbybook[book].sort()
matchesbybook[book] = numreduce(matchesbybook[book])
# Return to 1:66, 2:57 format
index[entry] = []
for book in matchesbybook:
for page in matchesbybook[book]:
index[entry].append(book + ":" + page)
return index
# Take a list of numbers and reduce them into hyphenated ranges for
# sequential values.
def numreduce(data):
str_list = []
for k, g in groupby(enumerate(data), lambda (i,x):i-x):
ilist = map(itemgetter(1), g)
#print ilist
if len(ilist) > 1:
str_list.append('%d-%d' % (ilist[0], ilist[-1]))
else:
str_list.append('%d' % ilist[0])
return str_list
def indexsort(string):
book,page = string.split(":")
page = re.sub('-.*', "", page)
return int(book)*(int(page)+1000)
def showconcordancehits(index, concordance):
print "Concordance matches:"
nohitcount=0
for key in concordance:
# The concordance key will not be present in the index list unless it was present
# in the PPTX file. The except here is for concordance entries that did not produce
# a match.
try:
rangedmatches = len(index[key])
# left justify the key name with 52 spaces - may need to be adjusted
print "\t%s%d ranged matches."%(key.ljust(52), rangedmatches)
except KeyError:
nohitcount+=1
print "\t%s0 matches."%(key.ljust(52))
if nohitcount == 0:
print "All entries in the concordance file produced matches."
return
def usage(status=0):
print "pptxindex v1.0.2"
print "Usage: pptxindex.py -c <CONCORDANCE> [-o WORDFILE] [-i WORDFILE] [PPTX FILES]"
print " [-h] [-t]"
print " -c <CONCORDANCE> Specify the concordance filename"
print " -o <WORDFILE> Specify the MS Word index output filename"
print " -i <WORDFILE> Specify the MS Word template file to base index on"
print " -t Test and validate concordance file syntax, then exit"
print " -v Verbose output (including 0-hit concordance entries)"
print " -h This usage information"
sys.exit(status)
if __name__ == "__main__":
concordancefile = None
indexoutputfile = None
testandexit = None
templatefile = None
verbose = False
if len(sys.argv) == 1: usage(0)
opts = getopt.getopt(sys.argv[1:],"i:c:o:htv")
for opt,optarg in opts[0]:
if opt == "-c":
concordancefile = optarg
elif opt == "-o":
indexoutputfile = optarg
elif opt == "-i":
templatefile = optarg
elif opt == "-t":
testandexit = True
elif opt == "-v":
verbose = True
elif opt == "-h":
usage()
if not concordancefile:
print "Error: concordance file not specified"
usage()
if not indexoutputfile:
indexoutputfile = concordancefile + ".docx"
# Check all the expressions in the concordance file
if (checkconcordance(concordancefile) != 0):
sys.stderr.write("Please correct the errors in the concordance file and try again.\n")
sys.exit(-1)
if testandexit:
print("No errors in the concordance file.")
sys.exit(0)
# Read concordance file and build the dictionary
concordance = {}
for line in open(concordancefile):
if line[0] == "#" or line == "^$": continue
try:
key,val = line.strip().split(";")
concordance[key] = val
except ValueError:
concordance[line.strip()] = None
# Handle globbing for pptx filenames on Windows
pptxfiles = []
for filemask in opts[1:][0]:
pptxfiles += glob.glob(filemask)
if len(pptxfiles) == 0:
sys.stderr.write("No matching PPTX files found.\n")
sys.exit(1)
pptxfiles.sort()
if verbose:
print("Processing PPTX files: %s")%' '.join(os.path.basename(x) for x in pptxfiles)
print("Extracting content from PPTX files.")
wordsbypage = {}
booknum=1
for pptxfile in pptxfiles:
if os.path.splitext(pptxfile.lower())[1] != ".pptx":
sys.stderr.write("Cannot process non-pptx filename \"%s\", exiting.\n"%pptxfile)
sys.exit(-1)
try:
# Retrieve slide and notes text for each slide in pptx file
wordsbypage = parseslidecontent(pptxfile, wordsbypage, booknum, verbose)
except zipfile.BadZipfile:
sys.stderr.write("Invalid pptx file \"%s\", exiting.\n"%pptxfile)
sys.exit(-1)
except:
print "Unexpected error:", sys.exc_info()[0]
sys.exit(-1)
booknum+=1
# Next, iterate through the concordance dictionary, searching for and recording
# matches for each entry.
print("Searching for matches with the concordance file.")
index = {}
for key in concordance:
pages = [] # list of page numbers
for bookpagenum in wordsbypage:
# To track hits with concordance entries, mark hits for this
# entry to None by default.
# These are the variables intended to be accessible by the author in the concordance file
cspage = wordsbypage[bookpagenum]
page = wordsbypage[bookpagenum].lower()
booknum,pagenum = bookpagenum.split(":")
wordlist = re.split("(?:(?:[^a-zA-Z]+')|(?:'[^a-zA-Z]+))|(?:[^a-zA-Z']+)", page)
cswordlist = re.split("(?:(?:[^a-zA-Z]+')|(?:'[^a-zA-Z]+))|(?:[^a-zA-Z']+)", cspage)
# Process the concordance file entry. If it is None, then use
# the key as the search string
if concordance[key] == None:
if (key.lower() in page):
pages.append(bookpagenum)
# Else, evaluate the right-side of the concordance entry as a Python expression
elif eval(concordance[key]):
pages.append(bookpagenum)
# If the concordance entry generated some matches, add it to the index list
if pages != []:
index[key] = pages
if verbose:
showconcordancehits(index, concordance)
# Reduce index entries "1:1,1:2,1:3" to 1:1-3"
print("Creating index reference ranges.")
index = indexreduce(index)
# Sort the reduced index entries numerically
for page in index:
index[page] = sorted(index[page], key=indexsort)
# With index list created, make the Word document
print("Creating index document.")
document = Document(templatefile)
if templatefile != None:
document.add_page_break()
document.add_heading('Index', level=1)
table = document.add_table(rows=0, cols=2, style="Light Shading")
for entry in sorted(index.keys(), key=str.lower):
row_cells = table.add_row().cells
row_cells[0].text = entry
row_cells[1].text = ", ".join(index[entry])
document.save(indexoutputfile)
print("Done.")
| |
# Copyright (c) 2013, Bob Van Zant <bob@veznat.com>
# All rights reserved.
#
# See LICENSE file for full license.
import warnings
from . import AWSHelperFn, AWSObject, AWSProperty, Tags
from .compat import policytypes
from .validators import boolean, integer, positive_integer, s3_bucket_name
from .validators import s3_transfer_acceleration_status
Private = "Private"
PublicRead = "PublicRead"
PublicReadWrite = "PublicReadWrite"
AuthenticatedRead = "AuthenticatedRead"
BucketOwnerRead = "BucketOwnerRead"
BucketOwnerFullControl = "BucketOwnerFullControl"
LogDeliveryWrite = "LogDeliveryWrite"
class PublicAccessBlockConfiguration(AWSProperty):
props = {
'BlockPublicAcls': (boolean, False),
'BlockPublicPolicy': (boolean, False),
'IgnorePublicAcls': (boolean, False),
'RestrictPublicBuckets': (boolean, False),
}
class VpcConfiguration(AWSProperty):
props = {
'VpcId': (basestring, False),
}
class AccessPoint(AWSObject):
resource_type = "AWS::S3::AccessPoint"
props = {
'Bucket': (basestring, True),
'CreationDate': (basestring, False),
'Name': (basestring, False),
'NetworkOrigin': (basestring, False),
'Policy': (dict, False),
'PolicyStatus': (dict, False),
'PublicAccessBlockConfiguration':
(PublicAccessBlockConfiguration, False),
'VpcConfiguration': (VpcConfiguration, False),
}
class CorsRules(AWSProperty):
props = {
'AllowedHeaders': ([basestring], False),
'AllowedMethods': ([basestring], True),
'AllowedOrigins': ([basestring], True),
'ExposedHeaders': ([basestring], False),
'Id': (basestring, False),
'MaxAge': (positive_integer, False),
}
class CorsConfiguration(AWSProperty):
props = {
'CorsRules': ([CorsRules], True),
}
class VersioningConfiguration(AWSProperty):
props = {
'Status': (basestring, False),
}
class AccelerateConfiguration(AWSProperty):
props = {
'AccelerationStatus': (s3_transfer_acceleration_status, True),
}
class RedirectAllRequestsTo(AWSProperty):
props = {
'HostName': (basestring, True),
'Protocol': (basestring, False),
}
class RedirectRule(AWSProperty):
props = {
'HostName': (basestring, False),
'HttpRedirectCode': (basestring, False),
'Protocol': (basestring, False),
'ReplaceKeyPrefixWith': (basestring, False),
'ReplaceKeyWith': (basestring, False),
}
class RoutingRuleCondition(AWSProperty):
props = {
'HttpErrorCodeReturnedEquals': (basestring, False),
'KeyPrefixEquals': (basestring, False),
}
class RoutingRule(AWSProperty):
props = {
'RedirectRule': (RedirectRule, True),
'RoutingRuleCondition': (RoutingRuleCondition, False),
}
class WebsiteConfiguration(AWSProperty):
props = {
'IndexDocument': (basestring, False),
'ErrorDocument': (basestring, False),
'RedirectAllRequestsTo': (RedirectAllRequestsTo, False),
'RoutingRules': ([RoutingRule], False),
}
class LifecycleRuleTransition(AWSProperty):
props = {
'StorageClass': (basestring, True),
'TransitionDate': (basestring, False),
'TransitionInDays': (positive_integer, False),
}
class AbortIncompleteMultipartUpload(AWSProperty):
props = {
'DaysAfterInitiation': (positive_integer, True),
}
class NoncurrentVersionTransition(AWSProperty):
props = {
'StorageClass': (basestring, True),
'TransitionInDays': (positive_integer, True),
}
class TagFilter(AWSProperty):
props = {
'Key': (basestring, True),
'Value': (basestring, True),
}
class LifecycleRule(AWSProperty):
props = {
'AbortIncompleteMultipartUpload':
(AbortIncompleteMultipartUpload, False),
'ExpirationDate': (basestring, False),
'ExpirationInDays': (positive_integer, False),
'Id': (basestring, False),
'NoncurrentVersionExpirationInDays': (positive_integer, False),
'NoncurrentVersionTransition': (NoncurrentVersionTransition, False),
'NoncurrentVersionTransitions': ([NoncurrentVersionTransition], False),
'Prefix': (basestring, False),
'Status': (basestring, True),
'TagFilters': ([TagFilter], False),
'Transition': (LifecycleRuleTransition, False),
'Transitions': ([LifecycleRuleTransition], False)
}
def validate(self):
if 'Transition' in self.properties:
if 'Transitions' not in self.properties:
# aws moved from a single transition to a list of them
# and deprecated 'Transition', so let's just move it to
# the new property and not annoy the user.
self.properties['Transitions'] = [
self.properties.pop('Transition')]
else:
raise ValueError(
'Cannot specify both "Transition" and "Transitions" '
'properties on S3 Bucket Lifecycle Rule. Please use '
'"Transitions" since the former has been deprecated.')
if 'NoncurrentVersionTransition' in self.properties:
if 'NoncurrentVersionTransitions' not in self.properties:
warnings.warn(
'NoncurrentVersionTransition has been deprecated in '
'favour of NoncurrentVersionTransitions.'
)
# Translate the old transition format to the new format
self.properties['NoncurrentVersionTransitions'] = [
self.properties.pop('NoncurrentVersionTransition')]
else:
raise ValueError(
'Cannot specify both "NoncurrentVersionTransition" and '
'"NoncurrentVersionTransitions" properties on S3 Bucket '
'Lifecycle Rule. Please use '
'"NoncurrentVersionTransitions" since the former has been '
'deprecated.')
if 'ExpirationInDays' in self.properties and 'ExpirationDate' in \
self.properties:
raise ValueError(
'Cannot specify both "ExpirationDate" and "ExpirationInDays"'
)
class LifecycleConfiguration(AWSProperty):
props = {
'Rules': ([LifecycleRule], True),
}
class LoggingConfiguration(AWSProperty):
props = {
'DestinationBucketName': (s3_bucket_name, False),
'LogFilePrefix': (basestring, False),
}
class Rules(AWSProperty):
props = {
'Name': (basestring, True),
'Value': (basestring, True)
}
class S3Key(AWSProperty):
props = {
'Rules': ([Rules], True)
}
class Filter(AWSProperty):
props = {
'S3Key': (S3Key, True)
}
class LambdaConfigurations(AWSProperty):
props = {
'Event': (basestring, True),
'Filter': (Filter, False),
'Function': (basestring, True),
}
class QueueConfigurations(AWSProperty):
props = {
'Event': (basestring, True),
'Filter': (Filter, False),
'Queue': (basestring, True),
}
class TopicConfigurations(AWSProperty):
props = {
'Event': (basestring, True),
'Filter': (Filter, False),
'Topic': (basestring, True),
}
class MetricsConfiguration(AWSProperty):
props = {
'Id': (basestring, True),
'Prefix': (basestring, False),
'TagFilters': ([TagFilter], False),
}
class NotificationConfiguration(AWSProperty):
props = {
'LambdaConfigurations': ([LambdaConfigurations], False),
'QueueConfigurations': ([QueueConfigurations], False),
'TopicConfigurations': ([TopicConfigurations], False),
}
class AccessControlTranslation(AWSProperty):
props = {
'Owner': (basestring, True),
}
class EncryptionConfiguration(AWSProperty):
props = {
'ReplicaKmsKeyID': (basestring, True),
}
class ReplicationConfigurationRulesDestination(AWSProperty):
props = {
'AccessControlTranslation': (AccessControlTranslation, False),
'Account': (basestring, False),
'Bucket': (basestring, True),
'EncryptionConfiguration': (EncryptionConfiguration, False),
'StorageClass': (basestring, False),
}
class SseKmsEncryptedObjects(AWSProperty):
props = {
'Status': (basestring, True),
}
class SourceSelectionCriteria(AWSProperty):
props = {
'SseKmsEncryptedObjects': (SseKmsEncryptedObjects, True),
}
class ReplicationConfigurationRules(AWSProperty):
props = {
'Destination': (ReplicationConfigurationRulesDestination, True),
'Id': (basestring, False),
'Prefix': (basestring, True),
'SourceSelectionCriteria': (SourceSelectionCriteria, False),
'Status': (basestring, True)
}
class ReplicationConfiguration(AWSProperty):
props = {
'Role': (basestring, True),
'Rules': ([ReplicationConfigurationRules], True)
}
class Destination(AWSProperty):
props = {
'BucketAccountId': (basestring, False),
'BucketArn': (basestring, True),
'Format': (basestring, True),
'Prefix': (basestring, False),
}
class DataExport(AWSProperty):
props = {
'Destination': (Destination, True),
'OutputSchemaVersion': (basestring, True),
}
class StorageClassAnalysis(AWSProperty):
props = {
'DataExport': (DataExport, False),
}
class AnalyticsConfiguration(AWSProperty):
props = {
'Id': (basestring, True),
'Prefix': (basestring, False),
'StorageClassAnalysis': (StorageClassAnalysis, True),
'TagFilters': ([TagFilter], False),
}
class ServerSideEncryptionByDefault(AWSProperty):
props = {
'KMSMasterKeyID': (basestring, False),
'SSEAlgorithm': (basestring, True),
}
class ServerSideEncryptionRule(AWSProperty):
props = {
'ServerSideEncryptionByDefault':
(ServerSideEncryptionByDefault, False),
}
class BucketEncryption(AWSProperty):
props = {
'ServerSideEncryptionConfiguration':
([ServerSideEncryptionRule], True),
}
class InventoryConfiguration(AWSProperty):
props = {
'Destination': (Destination, True),
'Enabled': (boolean, True),
'Id': (basestring, True),
'IncludedObjectVersions': (basestring, True),
'OptionalFields': ([basestring], True),
'Prefix': (basestring, False),
'ScheduleFrequency': (basestring, True),
}
class DefaultRetention(AWSProperty):
props = {
'Days': (integer, False),
'Mode': (basestring, False),
'Years': (integer, False),
}
class ObjectLockRule(AWSProperty):
props = {
'DefaultRetention': (DefaultRetention, False),
}
class ObjectLockConfiguration(AWSProperty):
props = {
'ObjectLockEnabled': (basestring, False),
'Rule': (ObjectLockRule, False),
}
class Bucket(AWSObject):
resource_type = "AWS::S3::Bucket"
props = {
'AccessControl': (basestring, False),
'AccelerateConfiguration': (AccelerateConfiguration, False),
'AnalyticsConfigurations': ([AnalyticsConfiguration], False),
'BucketEncryption': (BucketEncryption, False),
'BucketName': (s3_bucket_name, False),
'CorsConfiguration': (CorsConfiguration, False),
'InventoryConfigurations': ([InventoryConfiguration], False),
'LifecycleConfiguration': (LifecycleConfiguration, False),
'LoggingConfiguration': (LoggingConfiguration, False),
'MetricsConfigurations': ([MetricsConfiguration], False),
'NotificationConfiguration': (NotificationConfiguration, False),
'ObjectLockConfiguration': (ObjectLockConfiguration, False),
'ObjectLockEnabled': (boolean, False),
'PublicAccessBlockConfiguration': (PublicAccessBlockConfiguration,
False),
'ReplicationConfiguration': (ReplicationConfiguration, False),
'Tags': (Tags, False),
'WebsiteConfiguration': (WebsiteConfiguration, False),
'VersioningConfiguration': (VersioningConfiguration, False)
}
access_control_types = [
Private,
PublicRead,
PublicReadWrite,
AuthenticatedRead,
BucketOwnerRead,
BucketOwnerFullControl,
LogDeliveryWrite,
]
def validate(self):
access_control = self.properties.get('AccessControl')
if access_control is not None and \
not isinstance(access_control, AWSHelperFn):
if access_control not in self.access_control_types:
raise ValueError('AccessControl must be one of "%s"' % (
', '.join(self.access_control_types)))
class BucketPolicy(AWSObject):
resource_type = "AWS::S3::BucketPolicy"
props = {
'Bucket': (basestring, True),
'PolicyDocument': (policytypes, True),
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.