gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
from kivy.app import App
from kivy.event import EventDispatcher
from kivy.clock import Clock
from kivy.properties import DictProperty
from kivy.logger import Logger
import netcheck
import toast
from jnius import autoclass, PythonJavaClass, java_method, cast
from android import activity
from functools import partial
context = autoclass('org.renpy.android.PythonActivity').mActivity
IabHelper = autoclass('org.kivy.billing.IabHelper')
IabResults = autoclass('org.kivy.billing.IabResult')
Inventory = autoclass('org.kivy.billing.Inventory')
Purchase = autoclass('org.kivy.billing.Purchase')
''' There is a big difference between the twitter module. All twitter
callbacks return through one listener that implements an interface. There
are many fewer places where an object can call back from Java to an object that
was already CG'd in Python. Either sync the Python GC and Java GC or be sure
to follow the Twitter4J style all-in-one listener architecture when implementing
Java objects. This is my advice for writing PyJNIus integrations for now.
Since every callback is it's own object here and there is no error callback,
every callback has to be stored in _refs to keep it alive when it goes out of
the scope where it was created '''
# constants
TIMEOUT = 120.0 # seconds to either succeed or fail
#implement save if you purchase (and consume) things without
#using them. Alternatively implement the inventory without
#consuming items until the user uses them.
#SAVE_PATH = './billing.json'
DEBUG=True
# since our callbacks from Java don't keep their Python
# from getting GC'd, we have to keep refs
_refs = []
# we remove refs when they are called to allow gc
def _allow_gc(fn):
def checked(self, *args, **kwargs):
fn(self, *args, **kwargs)
_refs.remove(self)
return checked
def _protect_callback(new_callback):
'''increment counter and attach to new callback object'''
_refs.append(new_callback)
# Java callbacks that call back into the provided Python callbacks
class _OnIabSetupFinishedListener(PythonJavaClass):
__javainterfaces__ = ['org.kivy.billing.IabHelper$OnIabSetupFinishedListener']
__javacontext__ = 'app'
def __init__(self, callback):
self.callback = callback
super(_OnIabSetupFinishedListener, self).__init__()
@java_method('(Lorg/kivy/billing/IabResult;)V')
@_allow_gc
def onIabSetupFinished(self, result):
self.callback(result)
class _QueryInventoryFinishedListener(PythonJavaClass):
__javainterfaces__ = ['org.kivy.billing.IabHelper$QueryInventoryFinishedListener']
__javacontext__ = 'app'
def __init__(self, callback):
self.callback = callback
super(_QueryInventoryFinishedListener, self).__init__()
@java_method('(Lorg/kivy/billing/IabResult;Lorg/kivy/billing/Inventory;)V')
@_allow_gc
def onQueryInventoryFinished(self, result, inventory):
self.callback(result, inventory)
class _OnPurchaseFinishedListener(PythonJavaClass):
''' This one seems to blow up inside the IabHelper OnActivityResult'''
__javainterfaces__ = ['org.kivy.billing.IabHelper$OnIabPurchaseFinishedListener']
__javacontext__ = 'app'
def __init__(self, callback):
self.callback = callback
super(_OnPurchaseFinishedListener, self).__init__()
@java_method('(Lorg/kivy/billing/IabResult;Lorg/kivy/billing/Purchase;)V')
@_allow_gc
def onIabPurchaseFinished(self, result, purchase):
self.callback(result, purchase)
class _OnConsumeFinishedListener(PythonJavaClass):
__javainterfaces__ = ['org.kivy.billing.IabHelper$OnConsumeFinishedListener']
__javacontext__ = 'app'
def __init__(self, callback):
self.callback = callback
super(_OnConsumeFinishedListener, self).__init__()
@java_method('(Lorg/kivy/billing/Purchase;Lorg/kivy/billing/IabResult;)V')
@_allow_gc
def onConsumeFinished(self, purchase, result):
self.callback(purchase, result)
class AndroidBilling(EventDispatcher):
consumed = DictProperty()
def __init__(self,
app_public_key,
skus,
auto_check_inventory=10,
toasty=True,
**kwargs):
self.app_public_key = app_public_key
self.skus = skus
self.toasty = toasty
# This shouldn't collide, but I will pay you $2 if it does
# for the first occurrence ever. After that, you should fix
# the code to something more unique =)
self.r_code = abs(hash('org.kivy.billing'))
# interal state initialize
self.purchase_requested = None
self.syncing = False
self.setup_complete = False
self.error_msg = 'there was an error'
if auto_check_inventory >= 0:
Clock.schedule_once(self._setup, auto_check_inventory)
def purchase(self, sku):
# Really need to move these debug settings to a global
# settings file. Oh and they say that global settings files are bad.
# Let's get to the bottom of it.
if DEBUG:
self.debug_sku = sku
sku = 'android.test.purchased'
if sku not in self.skus:
self.skus.append(sku)
Logger.warning('IAB is running in DEBUG mode and won\'t buy anything!')
if self.purchase_requested is not None:
self._toast('purchase already in progress')
return False
elif self.syncing:
self.purchase_requested = sku
Clock.schedule_once(self._fail, TIMEOUT)
self._toast('will start purchase shortly')
return True
else:
Logger.info('Purchasing ' + sku)
if not self.setup_complete:
self._toast('will start purchase shortly')
else:
self._toast('purchase started')
self.purchase_requested = sku
Clock.schedule_once(self._fail, TIMEOUT)
self._process_purchase()
return True
def retry_prompt(self, callback):
''' Monkey patch here to implement a real prompt'''
callback(False)
def set_retry_prompt(self, fn):
''' Or use this handy public setter if you really like Java.'''
self.retry_prompt = fn
#################
# Private Methods
#################
# Bound in _setup_callback to activity.on_activity_result
def _on_activity_result(self, requestCode, responseCode, Intent):
if DEBUG:
Logger.info('Request Code: ' + str(requestCode))
Logger.info('Expected Code: ' + str(self.r_code))
if requestCode == self.r_code:
Logger.info('Passing result to IAB helper')
if self.helper.handleActivityResult(requestCode, responseCode, Intent):
Logger.info('Helper completed the request.')
self._get_inventory()
return True
def _setup(self, *args):
Clock.unschedule(self._setup)
if not self.syncing and not \
(hasattr(self, 'helper') and self.helper.mSetupDone) and \
netcheck.connection_available():
self.syncing = True
Logger.info('Attempting startup')
k = self.app_public_key
c = cast('android.app.Activity', context)
self.helper = helper = IabHelper(c, k)
# prints a lot of useful messages that might
# not make it back to python space
helper.enableDebugLogging(DEBUG)
s = _OnIabSetupFinishedListener(self._setup_callback)
_protect_callback(s)
self.helper.startSetup(s)
def _setup_callback(self, result):
if result.isSuccess() and self.helper.mSetupDone:
Logger.info('Setup complete. Scheduling inventory check')
self.setup_complete = True
a = App.get_running_app()
a.bind(on_stop=self._dispose)
activity.bind(on_activity_result=self._on_activity_result)
self._get_inventory()
else:
Logger.info('There was a problem with setup')
self.error_msg = 'could not connect to play store'
self._fail()
def _get_inventory(self, *args):
Logger.info('Getting Inventory')
q = _QueryInventoryFinishedListener(self._got_inventory_callback)
_protect_callback(q)
self.helper.queryInventoryAsync(q)
def _got_inventory_callback(self, result, inventory):
if result.isSuccess():
Logger.info('Got Inventory')
self.inventory = inventory
# Inventory has some map methods that might be slightly more
# straightforward but this is fast already
purchases = list()
for s in self.skus:
Logger.info('Checking for ' + s + ' in the inventory')
if inventory.hasPurchase(s):
purchases.append(inventory.getPurchase(s))
Logger.info(s + ' is ready for consumption')
self.purchases = purchases
if len(self.purchases):
self.syncing = True
else:
self.syncing = False
self.inventory_checked = True
self._process_inventory()
else:
self.error_msg = 'Could not check inventory'
self._fail()
def _process_purchase(self):
Logger.info('in purchase')
if not netcheck.connection_available():
Logger.info('no net avaiable')
netcheck.ask_connect(self._connection_callback)
elif not self.setup_complete:
Logger.info('setup not complete')
self._setup()
else:
Logger.info('doing the purchase')
Logger.info(str(self.purchase_requested))
if self.purchase_requested is not None:
sku = self.purchasing = self.purchase_requested
else:
self.purchasing = self.purchase_requested = None
Logger.info('returning for no good reason')
return
if sku not in self.skus:
raise AttributeError('The sku is not in the skus you initialized with')
Logger.info('Starting purchase workflow for ' + sku)
c = cast('android.app.Activity', context)
r = self.r_code
p = _OnPurchaseFinishedListener(self._purchase_finished)
_protect_callback(p)
self.helper.launchPurchaseFlow(c, sku, r, p)
def _purchase_finished(self, result, purchase):
Logger.info('Result was ' + str(result.isSuccess()) + ' for ' +
purchase.getSku())
if result.isSuccess():
self._consume(purchase)
def _process_inventory(self):
if len(self.purchases):
self._consume(self.purchases[0])
else:
# if we're done with inventory, we go back to purchasing
self._process_purchase()
def _consume(self, purchase):
Logger.info('Consuming ' + purchase.getSku())
c = _OnConsumeFinishedListener(self._consume_finished)
_protect_callback(c)
self.helper.consumeAsync(purchase, c)
def _consume_finished(self, purchase, result):
try:
s = str(purchase.getSku())
except:
s = 'unknown sku'
if result.isSuccess():
if DEBUG:
s = self.debug_sku
# Since we are faking the sku passed in for debug mode,
# there's no way to know if the consumption happened -really-
# for purchase.getSku() or for debug_sku. The information
# is gone. It's in the air. You can never capture it again.
self.purchase_requested = None
Clock.unschedule(self._fail)
self.consumed[s] = self.consumed.get(s, 0) + 1
Logger.info(s + ' was successfully purchased. Time to get rich!')
self.purchases.remove(purchase)
if s == self.purchase_requested:
self.purchase_requested = None
Clock.unschedule(self._fail)
self._process_inventory
else:
Logger.info('There was a problem consuming ' + s)
self._fail()
######################################
# Managing timeouts and retry workflow
######################################
def _fail(self, *args):
Clock.unschedule(self._fail)
# since the setup and everything in between can fail,
# we don't want to prompt the user for background stuff
if self.purchase_requested is not None:
self._toast(self.error_msg)
self._ask_retry()
def _retry_callback(self, retry):
if retry:
self._process_purchase()
else:
self._processing=False
self._purchase_requested = None
self._tries = 0
def _ask_retry(self):
self.retry_prompt(self._retry_callback)
def _connection_callback(self, connected):
Logger.info('in billing connection callback: ' + str(connected))
if connected:
self._process_purchase()
else:
self._fail()
def _dispose(self, *args):
''' Let all callbacks be GC'd and destroy helper'''
self.helper.dispose()
global _refs
_refs = []
def _toast(self, text, length_long=False):
if self.toasty:
toast.toast(text, length_long)
|
|
# kcell.py
# python3
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import model_selection, svm, metrics, cluster, tree
import kkeras
import numpy as np
import seaborn as sns
def GET_clsf2_by_clst( nb_classes):
def clsf2_by_clst( Xpart_cf, Xpart_ct):
"""
Clustering is performed and then, classification performed by clustered indices.
"""
cl_model = cluster.KMeans(n_clusters=nb_classes)
cl_model.fit(Xpart_ct)
yint = cl_model.predict( Xpart_ct)
X_train, X_test, y_train, y_test = \
model_selection.train_test_split( Xpart_cf, yint, test_size = 0.2)
model = tree.DecisionTreeClassifier()
model.fit( X_train, y_train)
dt_score = model.score( X_test, y_test)
print( "DT-C:", dt_score)
model = svm.SVC( kernel = 'linear')
model.fit( X_train, y_train)
sv_score = model.score( X_test, y_test)
print( "SVC:", sv_score)
model = kkeras.MLPC( [Xpart_cf.shape[1], 30, 10, nb_classes])
model.fit( X_train, y_train, X_test, y_test, nb_classes)
mlp_score = model.score( X_test, y_test)
print( "MLP:", mlp_score)
return dt_score, sv_score, mlp_score
return clsf2_by_clst
def GET_clsf2_by_yint( nb_classes):
def clsf2_by_yint( X1part, yint):
"""
classification is performed by yint
"""
X_train, X_test, y_train, y_test = \
model_selection.train_test_split( X1part, yint, test_size = 0.2)
model = tree.DecisionTreeClassifier()
model.fit( X_train, y_train)
dt_score = model.score( X_test, y_test)
print( "DT:", dt_score)
model = svm.SVC( kernel = 'linear')
model.fit( X_train, y_train)
sv_score = model.score( X_test, y_test)
print( "SVC:", sv_score)
model = kkeras.MLPC( [X1part.shape[1], 30, 10, nb_classes])
model.fit( X_train, y_train, X_test, y_test, nb_classes)
mlp_score = model.score( X_test, y_test)
print( "MLP:", mlp_score)
return dt_score, sv_score, mlp_score
return clsf2_by_yint
def pd_clsf2_by_clst( ix, Xpart_ct, Xpart_cf, nb_classes):
VI = {1:"Velocity", 2:"Intensity", 12:"Combined"}
print( "Type", ix, "- Clustering:", ix[1], "Classification:", ix[0])
s_l = GET_clsf2_by_clst(nb_classes)(Xpart_cf, Xpart_ct)
df_i = pd.DataFrame()
df_i["Type"] = ["KMenas: " + str( ix)] * 3
df_i["Clustering"] = [ VI[ix[0]]] * 3
df_i["Classification"] = [ VI[ix[1]]] * 3
df_i["Clustering method"] = [ "KMeans"] * 3
df_i["Classification method"] = [ "DT", "SVC", "DNN"]
df_i["Pc"] = s_l
return df_i
def pd_clsf2_by_yint( ix, yint, Xpart_cf, nb_classes):
VI = {1:"Velocity", 2:"Intensity", 12:"Combined"}
print( "Type", ix, "- Clustering:", ix[1], "Classification:", ix[0])
s_l = GET_clsf2_by_yint(nb_classes)(Xpart_cf, yint)
df_i = pd.DataFrame()
df_i["Type"] = ["Science: "+str( ix)] * 3
df_i["Clustering"] = [ VI[ix[0]]] * 3
df_i["Classification"] = [ VI[ix[1]]] * 3
df_i["Clustering method"] = [ "Sceince method"] * 3
df_i["Classification method"] = [ "DT", "SVC", "DNN"]
df_i["Pc"] = s_l
return df_i
class _Subclustering_r0():
def __init__(self, X1part, X2part, y, cell,
X1_ylim = [-1.5, 1.5], X2_ylim = [-2, 4],
cmethod = "KMenas",
cparam_d = {"n_clusters": 2}):
self.X1part = X1part
self.X2part = X2part
self.y = y
self.cell = cell
self.X1_ylim = X1_ylim
self.X2_ylim = X2_ylim
self.cmethod = cmethod
self.cparam_d = cparam_d
def show_both( self, c):
X1part = self.X1part
X2part = self.X2part
y = self.y
cell = self.cell
X1_ylim = self.X1_ylim
X2_ylim = self.X2_ylim
cmethod = self.cmethod
cparam_d = self.cparam_d
#print("Cluster:", c)
X3_int = X2part[ np.where(y==c)[0],:]
X3_vel = X1part[ np.where(y==c)[0],:]
#km = cluster.KMeans(2)
#km = getattr(cluster, cmethod)(2)
km = getattr(cluster, cmethod)(**cparam_d)
y3 = km.fit_predict( X3_int)
plt.figure(figsize=(9,4))
plt.subplot(1,2,1)
#print("Intensity")
n_0 = X3_int[ np.where( y3==0)[0]].shape[0]
n_1 = X3_int[ np.where( y3==1)[0]].shape[0]
sns.tsplot( X3_int[ np.where( y3==0)[0],:], color="blue")
sns.tsplot( X3_int[ np.where( y3==1)[0],:], color="green")
plt.ylim(X2_ylim)
plt.title("Cluster{0}:X2 {1}:{2}".format(c, n_0, n_1))
#plt.show()
plt.subplot(1,2,2)
#print("Velocity")
sns.tsplot( X3_vel[ np.where( y3==0)[0],:], color="blue")
sns.tsplot( X3_vel[ np.where( y3==1)[0],:], color="green")
plt.ylim(X1_ylim)
plt.title("Cluster{0}:X1 {1}:{2}".format(c, n_0, n_1))
plt.show()
cell3 = cell[ np.where(y==c)[0]]
plt.subplot(1,2,1)
plt.stem( cell3[np.where( y3==0)[0]], linefmt='b-', markerfmt='bo')
plt.title("Cell Index - Subcluster 1")
plt.subplot(1,2,2)
plt.stem( cell3[np.where( y3==1)[0]], linefmt='g-', markerfmt='go')
plt.title("Cell Index - Subcluster 2")
plt.show()
return y3
def show_both_cell( self, c, cell_id):
X1part = self.X1part
X2part = self.X2part
y = self.y
cell = self.cell
X1_ylim = self.X1_ylim
X2_ylim = self.X2_ylim
cmethod = self.cmethod
X3_int = X2part[ np.where(y==c)[0],:]
X3_vel = X1part[ np.where(y==c)[0],:]
cell3 = cell[ np.where(y==c)[0]]
#km = cluster.KMeans(2)
#km = getattr(cluster, cmethod)(2)
km = getattr(cluster, cmethod)(**cparam_d)
y3 = km.fit_predict( X3_int)
# redefine based on cell_id
X3_int = X3_int[ np.where(cell3==cell_id)[0],:]
X3_vel = X3_vel[ np.where(cell3==cell_id)[0],:]
y3 = y3[np.where(cell3==cell_id)[0]]
n_0 = X3_int[ np.where( y3==0)[0]].shape[0]
n_1 = X3_int[ np.where( y3==1)[0]].shape[0]
plt.figure(figsize=(9,4))
plt.subplot(1,2,1)
if n_0 > 0: sns.tsplot( X3_int[ np.where( y3==0)[0],:], color="blue")
if n_1 > 0: sns.tsplot( X3_int[ np.where( y3==1)[0],:], color="green")
plt.ylim(X2_ylim)
plt.title("Cluster{0}:Intensity {1}:{2}".format(c, n_0, n_1))
#plt.show()
plt.subplot(1,2,2)
#print("Velocity")
if n_0 > 0: sns.tsplot( X3_vel[ np.where( y3==0)[0],:], color="blue")
if n_1 > 0: sns.tsplot( X3_vel[ np.where( y3==1)[0],:], color="green")
plt.ylim(X1_ylim)
plt.title("Cluster{0}:Velocity {1}:{2}".format(c, n_0, n_1))
plt.show()
class Subclustering():
def __init__(self, X1part, X2part, y, cell,
X1_ylim = [-1.5, 1.5], X2_ylim = [-2, 4],
cmethod = "KMenas",
cparam_d = {"n_clusters": 2}):
self.X1part = X1part
self.X2part = X2part
self.y = y
self.cell = cell
self.X1_ylim = X1_ylim
self.X2_ylim = X2_ylim
self.cmethod = cmethod
self.cparam_d = cparam_d
def show_both( self, c):
X1part = self.X1part
X2part = self.X2part
y = self.y
cell = self.cell
X1_ylim = self.X1_ylim
X2_ylim = self.X2_ylim
cmethod = self.cmethod
cparam_d = self.cparam_d
#print("Cluster:", c)
X3_int = X2part[ np.where(y==c)[0],:]
X3_vel = X1part[ np.where(y==c)[0],:]
#km = cluster.KMeans(2)
#km = getattr(cluster, cmethod)(2)
km = getattr(cluster, cmethod)(**cparam_d)
y3 = km.fit_predict( X3_int)
plt.figure(figsize=(9,4))
plt.subplot(1,2,1)
#print("Intensity")
n_0 = X3_int[ np.where( y3==0)[0]].shape[0]
n_1 = X3_int[ np.where( y3==1)[0]].shape[0]
sns.tsplot( X3_int[ np.where( y3==0)[0],:], color="blue")
sns.tsplot( X3_int[ np.where( y3==1)[0],:], color="green")
plt.ylim(X2_ylim)
plt.title("Cluster{0}:X2 {1}:{2}".format(c, n_0, n_1))
#plt.show()
plt.subplot(1,2,2)
#print("Velocity")
sns.tsplot( X3_vel[ np.where( y3==0)[0],:], color="blue")
sns.tsplot( X3_vel[ np.where( y3==1)[0],:], color="green")
plt.ylim(X1_ylim)
plt.title("Cluster{0}:X1 {1}:{2}".format(c, n_0, n_1))
plt.show()
cell3 = cell[ np.where(y==c)[0]]
plt.subplot(1,2,1)
plt.stem( cell3[np.where( y3==0)[0]], linefmt='b-', markerfmt='bo')
plt.title("Cell Index - Subcluster 1")
plt.subplot(1,2,2)
plt.stem( cell3[np.where( y3==1)[0]], linefmt='g-', markerfmt='go')
plt.title("Cell Index - Subcluster 2")
plt.show()
return y3
def show_both_cell( self, c, cell_id):
X1part = self.X1part
X2part = self.X2part
y = self.y
cell = self.cell
X1_ylim = self.X1_ylim
X2_ylim = self.X2_ylim
cmethod = self.cmethod
X3_int = X2part[ np.where(y==c)[0],:]
X3_vel = X1part[ np.where(y==c)[0],:]
cell3 = cell[ np.where(y==c)[0]]
km = getattr(cluster, cmethod)(**cparam_d)
y3 = km.fit_predict( X3_int)
# redefine based on cell_id
X3_int = X3_int[ np.where(cell3==cell_id)[0],:]
X3_vel = X3_vel[ np.where(cell3==cell_id)[0],:]
y3 = y3[np.where(cell3==cell_id)[0]]
n_0 = X3_int[ np.where( y3==0)[0]].shape[0]
n_1 = X3_int[ np.where( y3==1)[0]].shape[0]
plt.figure(figsize=(9,4))
plt.subplot(1,2,1)
if n_0 > 0: sns.tsplot( X3_int[ np.where( y3==0)[0],:], color="blue")
if n_1 > 0: sns.tsplot( X3_int[ np.where( y3==1)[0],:], color="green")
plt.ylim(X2_ylim)
plt.title("Cluster{0}:Intensity {1}:{2}".format(c, n_0, n_1))
#plt.show()
plt.subplot(1,2,2)
#print("Velocity")
if n_0 > 0: sns.tsplot( X3_vel[ np.where( y3==0)[0],:], color="blue")
if n_1 > 0: sns.tsplot( X3_vel[ np.where( y3==1)[0],:], color="green")
plt.ylim(X1_ylim)
plt.title("Cluster{0}:Velocity {1}:{2}".format(c, n_0, n_1))
plt.show()
def show_both_kmeans( self, c):
X1part = self.X1part
X2part = self.X2part
y = self.y
cell = self.cell
X1_ylim = self.X1_ylim
X2_ylim = self.X2_ylim
cmethod = self.cmethod
cparam_d = self.cparam_d
nc = cparam_d["n_clusters"]
#print("Cluster:", c)
X3_int = X2part[ y==c,:]
X3_vel = X1part[ y==c,:]
#km = cluster.KMeans(2)
#km = getattr(cluster, cmethod)(2)
assert cmethod == "KMeans"
km = cluster.KMeans( nc)
y3 = km.fit_predict( X3_int)
plt.figure(figsize=(9,4))
plt.subplot(1,2,1)
#print("Intensity")
n_l = [ X3_int[ y3==i].shape[0] for i in range(nc)]
for i in range(nc):
sns.tsplot( X3_int[ y3==i,:], color=plt.cm.rainbow(i/nc))
plt.ylim(X2_ylim)
plt.title("Cluster{0}:X2 {1}".format(c, n_l))
#plt.show()
plt.subplot(1,2,2)
#print("Velocity")
for i in range(nc):
sns.tsplot( X3_vel[ y3==i,:], color=plt.cm.rainbow(i/nc))
plt.ylim(X1_ylim)
plt.title("Cluster{0}:X1 {1}".format(c, n_l))
plt.show()
return y3
|
|
"""
tests.test_component_group
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests the group compoments.
"""
# pylint: disable=protected-access,too-many-public-methods
import unittest
import logging
import homeassistant as ha
from homeassistant.const import STATE_ON, STATE_OFF, STATE_HOME, STATE_UNKNOWN
import homeassistant.components.group as group
def setUpModule(): # pylint: disable=invalid-name
""" Setup to ignore group errors. """
logging.disable(logging.CRITICAL)
class TestComponentsGroup(unittest.TestCase):
""" Tests homeassistant.components.group module. """
def setUp(self): # pylint: disable=invalid-name
""" Init needed objects. """
self.hass = ha.HomeAssistant()
self.hass.states.set('light.Bowl', STATE_ON)
self.hass.states.set('light.Ceiling', STATE_OFF)
test_group = group.Group(
self.hass, 'init_group', ['light.Bowl', 'light.Ceiling'], False)
self.group_entity_id = test_group.entity_id
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass.stop()
def test_setup_group_with_mixed_groupable_states(self):
""" Try to setup a group with mixed groupable states """
self.hass.states.set('device_tracker.Paulus', STATE_HOME)
group.setup_group(
self.hass, 'person_and_light',
['light.Bowl', 'device_tracker.Paulus'])
self.assertEqual(
STATE_ON,
self.hass.states.get(
group.ENTITY_ID_FORMAT.format('person_and_light')).state)
def test_setup_group_with_a_non_existing_state(self):
""" Try to setup a group with a non existing state """
grp = group.setup_group(
self.hass, 'light_and_nothing',
['light.Bowl', 'non.existing'])
self.assertEqual(STATE_ON, grp.state.state)
def test_setup_group_with_non_groupable_states(self):
self.hass.states.set('cast.living_room', "Plex")
self.hass.states.set('cast.bedroom', "Netflix")
grp = group.setup_group(
self.hass, 'chromecasts',
['cast.living_room', 'cast.bedroom'])
self.assertEqual(STATE_UNKNOWN, grp.state.state)
def test_setup_empty_group(self):
""" Try to setup an empty group. """
grp = group.setup_group(self.hass, 'nothing', [])
self.assertEqual(STATE_UNKNOWN, grp.state.state)
def test_monitor_group(self):
""" Test if the group keeps track of states. """
# Test if group setup in our init mode is ok
self.assertIn(self.group_entity_id, self.hass.states.entity_ids())
group_state = self.hass.states.get(self.group_entity_id)
self.assertEqual(STATE_ON, group_state.state)
self.assertTrue(group_state.attributes[group.ATTR_AUTO])
def test_group_turns_off_if_all_off(self):
"""
Test if the group turns off if the last device that was on turns off.
"""
self.hass.states.set('light.Bowl', STATE_OFF)
self.hass.pool.block_till_done()
group_state = self.hass.states.get(self.group_entity_id)
self.assertEqual(STATE_OFF, group_state.state)
def test_group_turns_on_if_all_are_off_and_one_turns_on(self):
"""
Test if group turns on if all devices were turned off and one turns on.
"""
# Make sure all are off.
self.hass.states.set('light.Bowl', STATE_OFF)
self.hass.pool.block_till_done()
# Turn one on
self.hass.states.set('light.Ceiling', STATE_ON)
self.hass.pool.block_till_done()
group_state = self.hass.states.get(self.group_entity_id)
self.assertEqual(STATE_ON, group_state.state)
def test_is_on(self):
""" Test is_on method. """
self.assertTrue(group.is_on(self.hass, self.group_entity_id))
self.hass.states.set('light.Bowl', STATE_OFF)
self.hass.pool.block_till_done()
self.assertFalse(group.is_on(self.hass, self.group_entity_id))
# Try on non existing state
self.assertFalse(group.is_on(self.hass, 'non.existing'))
def test_expand_entity_ids(self):
""" Test expand_entity_ids method. """
self.assertEqual(sorted(['light.ceiling', 'light.bowl']),
sorted(group.expand_entity_ids(
self.hass, [self.group_entity_id])))
def test_expand_entity_ids_does_not_return_duplicates(self):
""" Test that expand_entity_ids does not return duplicates. """
self.assertEqual(
['light.bowl', 'light.ceiling'],
sorted(group.expand_entity_ids(
self.hass, [self.group_entity_id, 'light.Ceiling'])))
self.assertEqual(
['light.bowl', 'light.ceiling'],
sorted(group.expand_entity_ids(
self.hass, ['light.bowl', self.group_entity_id])))
def test_expand_entity_ids_ignores_non_strings(self):
""" Test that non string elements in lists are ignored. """
self.assertEqual([], group.expand_entity_ids(self.hass, [5, True]))
def test_get_entity_ids(self):
""" Test get_entity_ids method. """
self.assertEqual(
['light.bowl', 'light.ceiling'],
sorted(group.get_entity_ids(self.hass, self.group_entity_id)))
def test_get_entity_ids_with_domain_filter(self):
""" Test if get_entity_ids works with a domain_filter. """
self.hass.states.set('switch.AC', STATE_OFF)
mixed_group = group.Group(
self.hass, 'mixed_group', ['light.Bowl', 'switch.AC'], False)
self.assertEqual(
['switch.ac'],
group.get_entity_ids(
self.hass, mixed_group.entity_id, domain_filter="switch"))
def test_get_entity_ids_with_non_existing_group_name(self):
""" Tests get_entity_ids with a non existing group. """
self.assertEqual([], group.get_entity_ids(self.hass, 'non_existing'))
def test_get_entity_ids_with_non_group_state(self):
""" Tests get_entity_ids with a non group state. """
self.assertEqual([], group.get_entity_ids(self.hass, 'switch.AC'))
def test_group_being_init_before_first_tracked_state_is_set_to_on(self):
""" Test if the group turns on if no states existed and now a state it is
tracking is being added as ON. """
test_group = group.Group(
self.hass, 'test group', ['light.not_there_1'])
self.hass.states.set('light.not_there_1', STATE_ON)
self.hass.pool.block_till_done()
group_state = self.hass.states.get(test_group.entity_id)
self.assertEqual(STATE_ON, group_state.state)
def test_group_being_init_before_first_tracked_state_is_set_to_off(self):
""" Test if the group turns off if no states existed and now a state it is
tracking is being added as OFF. """
test_group = group.Group(
self.hass, 'test group', ['light.not_there_1'])
self.hass.states.set('light.not_there_1', STATE_OFF)
self.hass.pool.block_till_done()
group_state = self.hass.states.get(test_group.entity_id)
self.assertEqual(STATE_OFF, group_state.state)
def test_setup(self):
""" Test setup method. """
self.assertTrue(
group.setup(
self.hass,
{
group.DOMAIN: {
'second_group': ','.join((self.group_entity_id,
'light.Bowl'))
}
}))
group_state = self.hass.states.get(
group.ENTITY_ID_FORMAT.format('second_group'))
self.assertEqual(STATE_ON, group_state.state)
self.assertFalse(group_state.attributes[group.ATTR_AUTO])
def test_groups_get_unique_names(self):
""" Two groups with same name should both have a unique entity id. """
grp1 = group.Group(self.hass, 'Je suis Charlie')
grp2 = group.Group(self.hass, 'Je suis Charlie')
self.assertNotEqual(grp1.entity_id, grp2.entity_id)
|
|
from __future__ import unicode_literals
from calendar import timegm
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from django.http import Http404, HttpResponse
from django.template import TemplateDoesNotExist, loader
from django.utils import feedgenerator, six
from django.utils.encoding import force_text, iri_to_uri
from django.utils.html import escape
from django.utils.http import http_date
from django.utils.timezone import get_default_timezone, is_naive, make_aware
def add_domain(domain, url, secure=False):
protocol = 'https' if secure else 'http'
if url.startswith('//'):
# Support network-path reference (see #16753) - RSS requires a protocol
url = '%s:%s' % (protocol, url)
elif not url.startswith(('http://', 'https://', 'mailto:')):
url = iri_to_uri('%s://%s%s' % (protocol, domain, url))
return url
class FeedDoesNotExist(ObjectDoesNotExist):
pass
class Feed(object):
feed_type = feedgenerator.DefaultFeed
title_template = None
description_template = None
def __call__(self, request, *args, **kwargs):
try:
obj = self.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
raise Http404('Feed object does not exist.')
feedgen = self.get_feed(obj, request)
response = HttpResponse(content_type=feedgen.content_type)
if hasattr(self, 'item_pubdate') or hasattr(self, 'item_updateddate'):
# if item_pubdate or item_updateddate is defined for the feed, set
# header so as ConditionalGetMiddleware is able to send 304 NOT MODIFIED
response['Last-Modified'] = http_date(
timegm(feedgen.latest_post_date().utctimetuple()))
feedgen.write(response, 'utf-8')
return response
def item_title(self, item):
# Titles should be double escaped by default (see #6533)
return escape(force_text(item))
def item_description(self, item):
return force_text(item)
def item_link(self, item):
try:
return item.get_absolute_url()
except AttributeError:
raise ImproperlyConfigured(
'Give your %s class a get_absolute_url() method, or define an '
'item_link() method in your Feed class.' % item.__class__.__name__
)
def item_enclosures(self, item):
enc_url = self._get_dynamic_attr('item_enclosure_url', item)
if enc_url:
enc = feedgenerator.Enclosure(
url=force_text(enc_url),
length=force_text(self._get_dynamic_attr('item_enclosure_length', item)),
mime_type=force_text(self._get_dynamic_attr('item_enclosure_mime_type', item)),
)
return [enc]
return []
def _get_dynamic_attr(self, attname, obj, default=None):
try:
attr = getattr(self, attname)
except AttributeError:
return default
if callable(attr):
# Check co_argcount rather than try/excepting the function and
# catching the TypeError, because something inside the function
# may raise the TypeError. This technique is more accurate.
try:
code = six.get_function_code(attr)
except AttributeError:
code = six.get_function_code(attr.__call__)
if code.co_argcount == 2: # one argument is 'self'
return attr(obj)
else:
return attr()
return attr
def feed_extra_kwargs(self, obj):
"""
Returns an extra keyword arguments dictionary that is used when
initializing the feed generator.
"""
return {}
def item_extra_kwargs(self, item):
"""
Returns an extra keyword arguments dictionary that is used with
the `add_item` call of the feed generator.
"""
return {}
def get_object(self, request, *args, **kwargs):
return None
def get_context_data(self, **kwargs):
"""
Returns a dictionary to use as extra context if either
``self.description_template`` or ``self.item_template`` are used.
Default implementation preserves the old behavior
of using {'obj': item, 'site': current_site} as the context.
"""
return {'obj': kwargs.get('item'), 'site': kwargs.get('site')}
def get_feed(self, obj, request):
"""
Returns a feedgenerator.DefaultFeed object, fully populated, for
this feed. Raises FeedDoesNotExist for invalid parameters.
"""
current_site = get_current_site(request)
link = self._get_dynamic_attr('link', obj)
link = add_domain(current_site.domain, link, request.is_secure())
feed = self.feed_type(
title=self._get_dynamic_attr('title', obj),
subtitle=self._get_dynamic_attr('subtitle', obj),
link=link,
description=self._get_dynamic_attr('description', obj),
language=settings.LANGUAGE_CODE,
feed_url=add_domain(
current_site.domain,
self._get_dynamic_attr('feed_url', obj) or request.path,
request.is_secure(),
),
author_name=self._get_dynamic_attr('author_name', obj),
author_link=self._get_dynamic_attr('author_link', obj),
author_email=self._get_dynamic_attr('author_email', obj),
categories=self._get_dynamic_attr('categories', obj),
feed_copyright=self._get_dynamic_attr('feed_copyright', obj),
feed_guid=self._get_dynamic_attr('feed_guid', obj),
ttl=self._get_dynamic_attr('ttl', obj),
**self.feed_extra_kwargs(obj)
)
title_tmp = None
if self.title_template is not None:
try:
title_tmp = loader.get_template(self.title_template)
except TemplateDoesNotExist:
pass
description_tmp = None
if self.description_template is not None:
try:
description_tmp = loader.get_template(self.description_template)
except TemplateDoesNotExist:
pass
for item in self._get_dynamic_attr('items', obj):
context = self.get_context_data(item=item, site=current_site,
obj=obj, request=request)
if title_tmp is not None:
title = title_tmp.render(context, request)
else:
title = self._get_dynamic_attr('item_title', item)
if description_tmp is not None:
description = description_tmp.render(context, request)
else:
description = self._get_dynamic_attr('item_description', item)
link = add_domain(
current_site.domain,
self._get_dynamic_attr('item_link', item),
request.is_secure(),
)
enclosures = self._get_dynamic_attr('item_enclosures', item)
author_name = self._get_dynamic_attr('item_author_name', item)
if author_name is not None:
author_email = self._get_dynamic_attr('item_author_email', item)
author_link = self._get_dynamic_attr('item_author_link', item)
else:
author_email = author_link = None
tz = get_default_timezone()
pubdate = self._get_dynamic_attr('item_pubdate', item)
if pubdate and is_naive(pubdate):
pubdate = make_aware(pubdate, tz)
updateddate = self._get_dynamic_attr('item_updateddate', item)
if updateddate and is_naive(updateddate):
updateddate = make_aware(updateddate, tz)
feed.add_item(
title=title,
link=link,
description=description,
unique_id=self._get_dynamic_attr('item_guid', item, link),
unique_id_is_permalink=self._get_dynamic_attr(
'item_guid_is_permalink', item),
enclosures=enclosures,
pubdate=pubdate,
updateddate=updateddate,
author_name=author_name,
author_email=author_email,
author_link=author_link,
categories=self._get_dynamic_attr('item_categories', item),
item_copyright=self._get_dynamic_attr('item_copyright', item),
**self.item_extra_kwargs(item)
)
return feed
|
|
import json
import pickle
from binascii import b2a_hex
from django.contrib.gis.gdal import (
CoordTransform, GDALException, OGRGeometry, OGRGeomType, SpatialReference,
)
from django.template import Context
from django.template.engine import Engine
from django.test import SimpleTestCase
from ..test_data import TestDataMixin
class OGRGeomTest(SimpleTestCase, TestDataMixin):
"This tests the OGR Geometry."
def test_geomtype(self):
"Testing OGRGeomType object."
# OGRGeomType should initialize on all these inputs.
OGRGeomType(1)
OGRGeomType(7)
OGRGeomType('point')
OGRGeomType('GeometrycollectioN')
OGRGeomType('LINearrING')
OGRGeomType('Unknown')
# Should throw TypeError on this input
with self.assertRaises(GDALException):
OGRGeomType(23)
with self.assertRaises(GDALException):
OGRGeomType('fooD')
with self.assertRaises(GDALException):
OGRGeomType(9)
# Equivalence can take strings, ints, and other OGRGeomTypes
self.assertEqual(OGRGeomType(1), OGRGeomType(1))
self.assertEqual(OGRGeomType(7), 'GeometryCollection')
self.assertEqual(OGRGeomType('point'), 'POINT')
self.assertNotEqual(OGRGeomType('point'), 2)
self.assertEqual(OGRGeomType('unknown'), 0)
self.assertEqual(OGRGeomType(6), 'MULtiPolyGON')
self.assertEqual(OGRGeomType(1), OGRGeomType('point'))
self.assertNotEqual(OGRGeomType('POINT'), OGRGeomType(6))
# Testing the Django field name equivalent property.
self.assertEqual('PointField', OGRGeomType('Point').django)
self.assertEqual('GeometryField', OGRGeomType('Geometry').django)
self.assertEqual('GeometryField', OGRGeomType('Unknown').django)
self.assertIsNone(OGRGeomType('none').django)
# 'Geometry' initialization implies an unknown geometry type.
gt = OGRGeomType('Geometry')
self.assertEqual(0, gt.num)
self.assertEqual('Unknown', gt.name)
def test_geomtype_25d(self):
"Testing OGRGeomType object with 25D types."
wkb25bit = OGRGeomType.wkb25bit
self.assertEqual(OGRGeomType(wkb25bit + 1), 'Point25D')
self.assertEqual(OGRGeomType('MultiLineString25D'), (5 + wkb25bit))
self.assertEqual('GeometryCollectionField', OGRGeomType('GeometryCollection25D').django)
def test_wkt(self):
"Testing WKT output."
for g in self.geometries.wkt_out:
geom = OGRGeometry(g.wkt)
self.assertEqual(g.wkt, geom.wkt)
def test_ewkt(self):
"Testing EWKT input/output."
for ewkt_val in ('POINT (1 2 3)', 'LINEARRING (0 0,1 1,2 1,0 0)'):
# First with ewkt output when no SRID in EWKT
self.assertEqual(ewkt_val, OGRGeometry(ewkt_val).ewkt)
# No test consumption with an SRID specified.
ewkt_val = 'SRID=4326;%s' % ewkt_val
geom = OGRGeometry(ewkt_val)
self.assertEqual(ewkt_val, geom.ewkt)
self.assertEqual(4326, geom.srs.srid)
def test_gml(self):
"Testing GML output."
for g in self.geometries.wkt_out:
geom = OGRGeometry(g.wkt)
exp_gml = g.gml
self.assertEqual(exp_gml, geom.gml)
def test_hex(self):
"Testing HEX input/output."
for g in self.geometries.hex_wkt:
geom1 = OGRGeometry(g.wkt)
self.assertEqual(g.hex.encode(), geom1.hex)
# Constructing w/HEX
geom2 = OGRGeometry(g.hex)
self.assertEqual(geom1, geom2)
def test_wkb(self):
"Testing WKB input/output."
for g in self.geometries.hex_wkt:
geom1 = OGRGeometry(g.wkt)
wkb = geom1.wkb
self.assertEqual(b2a_hex(wkb).upper(), g.hex.encode())
# Constructing w/WKB.
geom2 = OGRGeometry(wkb)
self.assertEqual(geom1, geom2)
def test_json(self):
"Testing GeoJSON input/output."
for g in self.geometries.json_geoms:
geom = OGRGeometry(g.wkt)
if not hasattr(g, 'not_equal'):
# Loading jsons to prevent decimal differences
self.assertEqual(json.loads(g.json), json.loads(geom.json))
self.assertEqual(json.loads(g.json), json.loads(geom.geojson))
self.assertEqual(OGRGeometry(g.wkt), OGRGeometry(geom.json))
# Test input with some garbage content (but valid json) (#15529)
geom = OGRGeometry('{"type": "Point", "coordinates": [ 100.0, 0.0 ], "other": "<test>"}')
self.assertIsInstance(geom, OGRGeometry)
def test_points(self):
"Testing Point objects."
OGRGeometry('POINT(0 0)')
for p in self.geometries.points:
if not hasattr(p, 'z'): # No 3D
pnt = OGRGeometry(p.wkt)
self.assertEqual(1, pnt.geom_type)
self.assertEqual('POINT', pnt.geom_name)
self.assertEqual(p.x, pnt.x)
self.assertEqual(p.y, pnt.y)
self.assertEqual((p.x, p.y), pnt.tuple)
def test_multipoints(self):
"Testing MultiPoint objects."
for mp in self.geometries.multipoints:
mgeom1 = OGRGeometry(mp.wkt) # First one from WKT
self.assertEqual(4, mgeom1.geom_type)
self.assertEqual('MULTIPOINT', mgeom1.geom_name)
mgeom2 = OGRGeometry('MULTIPOINT') # Creating empty multipoint
mgeom3 = OGRGeometry('MULTIPOINT')
for g in mgeom1:
mgeom2.add(g) # adding each point from the multipoints
mgeom3.add(g.wkt) # should take WKT as well
self.assertEqual(mgeom1, mgeom2) # they should equal
self.assertEqual(mgeom1, mgeom3)
self.assertEqual(mp.coords, mgeom2.coords)
self.assertEqual(mp.n_p, mgeom2.point_count)
def test_linestring(self):
"Testing LineString objects."
prev = OGRGeometry('POINT(0 0)')
for ls in self.geometries.linestrings:
linestr = OGRGeometry(ls.wkt)
self.assertEqual(2, linestr.geom_type)
self.assertEqual('LINESTRING', linestr.geom_name)
self.assertEqual(ls.n_p, linestr.point_count)
self.assertEqual(ls.coords, linestr.tuple)
self.assertEqual(linestr, OGRGeometry(ls.wkt))
self.assertNotEqual(linestr, prev)
msg = 'Index out of range when accessing points of a line string: %s.'
with self.assertRaisesMessage(IndexError, msg % len(linestr)):
linestr.__getitem__(len(linestr))
prev = linestr
# Testing the x, y properties.
x = [tmpx for tmpx, tmpy in ls.coords]
y = [tmpy for tmpx, tmpy in ls.coords]
self.assertEqual(x, linestr.x)
self.assertEqual(y, linestr.y)
def test_multilinestring(self):
"Testing MultiLineString objects."
prev = OGRGeometry('POINT(0 0)')
for mls in self.geometries.multilinestrings:
mlinestr = OGRGeometry(mls.wkt)
self.assertEqual(5, mlinestr.geom_type)
self.assertEqual('MULTILINESTRING', mlinestr.geom_name)
self.assertEqual(mls.n_p, mlinestr.point_count)
self.assertEqual(mls.coords, mlinestr.tuple)
self.assertEqual(mlinestr, OGRGeometry(mls.wkt))
self.assertNotEqual(mlinestr, prev)
prev = mlinestr
for ls in mlinestr:
self.assertEqual(2, ls.geom_type)
self.assertEqual('LINESTRING', ls.geom_name)
msg = 'Index out of range when accessing geometry in a collection: %s.'
with self.assertRaisesMessage(IndexError, msg % len(mlinestr)):
mlinestr.__getitem__(len(mlinestr))
def test_linearring(self):
"Testing LinearRing objects."
prev = OGRGeometry('POINT(0 0)')
for rr in self.geometries.linearrings:
lr = OGRGeometry(rr.wkt)
# self.assertEqual(101, lr.geom_type.num)
self.assertEqual('LINEARRING', lr.geom_name)
self.assertEqual(rr.n_p, len(lr))
self.assertEqual(lr, OGRGeometry(rr.wkt))
self.assertNotEqual(lr, prev)
prev = lr
def test_polygons(self):
"Testing Polygon objects."
# Testing `from_bbox` class method
bbox = (-180, -90, 180, 90)
p = OGRGeometry.from_bbox(bbox)
self.assertEqual(bbox, p.extent)
prev = OGRGeometry('POINT(0 0)')
for p in self.geometries.polygons:
poly = OGRGeometry(p.wkt)
self.assertEqual(3, poly.geom_type)
self.assertEqual('POLYGON', poly.geom_name)
self.assertEqual(p.n_p, poly.point_count)
self.assertEqual(p.n_i + 1, len(poly))
msg = 'Index out of range when accessing rings of a polygon: %s.'
with self.assertRaisesMessage(IndexError, msg % len(poly)):
poly.__getitem__(len(poly))
# Testing area & centroid.
self.assertAlmostEqual(p.area, poly.area, 9)
x, y = poly.centroid.tuple
self.assertAlmostEqual(p.centroid[0], x, 9)
self.assertAlmostEqual(p.centroid[1], y, 9)
# Testing equivalence
self.assertEqual(poly, OGRGeometry(p.wkt))
self.assertNotEqual(poly, prev)
if p.ext_ring_cs:
ring = poly[0]
self.assertEqual(p.ext_ring_cs, ring.tuple)
self.assertEqual(p.ext_ring_cs, poly[0].tuple)
self.assertEqual(len(p.ext_ring_cs), ring.point_count)
for r in poly:
self.assertEqual('LINEARRING', r.geom_name)
def test_polygons_templates(self):
# Accessing Polygon attributes in templates should work.
engine = Engine()
template = engine.from_string('{{ polygons.0.wkt }}')
polygons = [OGRGeometry(p.wkt) for p in self.geometries.multipolygons[:2]]
content = template.render(Context({'polygons': polygons}))
self.assertIn('MULTIPOLYGON (((100', content)
def test_closepolygons(self):
"Testing closing Polygon objects."
# Both rings in this geometry are not closed.
poly = OGRGeometry('POLYGON((0 0, 5 0, 5 5, 0 5), (1 1, 2 1, 2 2, 2 1))')
self.assertEqual(8, poly.point_count)
with self.assertRaises(GDALException):
poly.centroid
poly.close_rings()
self.assertEqual(10, poly.point_count) # Two closing points should've been added
self.assertEqual(OGRGeometry('POINT(2.5 2.5)'), poly.centroid)
def test_multipolygons(self):
"Testing MultiPolygon objects."
OGRGeometry('POINT(0 0)')
for mp in self.geometries.multipolygons:
mpoly = OGRGeometry(mp.wkt)
self.assertEqual(6, mpoly.geom_type)
self.assertEqual('MULTIPOLYGON', mpoly.geom_name)
if mp.valid:
self.assertEqual(mp.n_p, mpoly.point_count)
self.assertEqual(mp.num_geom, len(mpoly))
msg = 'Index out of range when accessing geometry in a collection: %s.'
with self.assertRaisesMessage(IndexError, msg % len(mpoly)):
mpoly.__getitem__(len(mpoly))
for p in mpoly:
self.assertEqual('POLYGON', p.geom_name)
self.assertEqual(3, p.geom_type)
self.assertEqual(mpoly.wkt, OGRGeometry(mp.wkt).wkt)
def test_srs(self):
"Testing OGR Geometries with Spatial Reference objects."
for mp in self.geometries.multipolygons:
# Creating a geometry w/spatial reference
sr = SpatialReference('WGS84')
mpoly = OGRGeometry(mp.wkt, sr)
self.assertEqual(sr.wkt, mpoly.srs.wkt)
# Ensuring that SRS is propagated to clones.
klone = mpoly.clone()
self.assertEqual(sr.wkt, klone.srs.wkt)
# Ensuring all children geometries (polygons and their rings) all
# return the assigned spatial reference as well.
for poly in mpoly:
self.assertEqual(sr.wkt, poly.srs.wkt)
for ring in poly:
self.assertEqual(sr.wkt, ring.srs.wkt)
# Ensuring SRS propagate in topological ops.
a = OGRGeometry(self.geometries.topology_geoms[0].wkt_a, sr)
b = OGRGeometry(self.geometries.topology_geoms[0].wkt_b, sr)
diff = a.difference(b)
union = a.union(b)
self.assertEqual(sr.wkt, diff.srs.wkt)
self.assertEqual(sr.srid, union.srs.srid)
# Instantiating w/an integer SRID
mpoly = OGRGeometry(mp.wkt, 4326)
self.assertEqual(4326, mpoly.srid)
mpoly.srs = SpatialReference(4269)
self.assertEqual(4269, mpoly.srid)
self.assertEqual('NAD83', mpoly.srs.name)
# Incrementing through the multipolygon after the spatial reference
# has been re-assigned.
for poly in mpoly:
self.assertEqual(mpoly.srs.wkt, poly.srs.wkt)
poly.srs = 32140
for ring in poly:
# Changing each ring in the polygon
self.assertEqual(32140, ring.srs.srid)
self.assertEqual('NAD83 / Texas South Central', ring.srs.name)
ring.srs = str(SpatialReference(4326)) # back to WGS84
self.assertEqual(4326, ring.srs.srid)
# Using the `srid` property.
ring.srid = 4322
self.assertEqual('WGS 72', ring.srs.name)
self.assertEqual(4322, ring.srid)
# srs/srid may be assigned their own values, even when srs is None.
mpoly = OGRGeometry(mp.wkt, srs=None)
mpoly.srs = mpoly.srs
mpoly.srid = mpoly.srid
def test_srs_transform(self):
"Testing transform()."
orig = OGRGeometry('POINT (-104.609 38.255)', 4326)
trans = OGRGeometry('POINT (992385.4472045 481455.4944650)', 2774)
# Using an srid, a SpatialReference object, and a CoordTransform object
# or transformations.
t1, t2, t3 = orig.clone(), orig.clone(), orig.clone()
t1.transform(trans.srid)
t2.transform(SpatialReference('EPSG:2774'))
ct = CoordTransform(SpatialReference('WGS84'), SpatialReference(2774))
t3.transform(ct)
# Testing use of the `clone` keyword.
k1 = orig.clone()
k2 = k1.transform(trans.srid, clone=True)
self.assertEqual(k1, orig)
self.assertNotEqual(k1, k2)
prec = 3
for p in (t1, t2, t3, k2):
self.assertAlmostEqual(trans.x, p.x, prec)
self.assertAlmostEqual(trans.y, p.y, prec)
def test_transform_dim(self):
"Testing coordinate dimension is the same on transformed geometries."
ls_orig = OGRGeometry('LINESTRING(-104.609 38.255)', 4326)
ls_trans = OGRGeometry('LINESTRING(992385.4472045 481455.4944650)', 2774)
prec = 3
ls_orig.transform(ls_trans.srs)
# Making sure the coordinate dimension is still 2D.
self.assertEqual(2, ls_orig.coord_dim)
self.assertAlmostEqual(ls_trans.x[0], ls_orig.x[0], prec)
self.assertAlmostEqual(ls_trans.y[0], ls_orig.y[0], prec)
def test_difference(self):
"Testing difference()."
for i in range(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
d1 = OGRGeometry(self.geometries.diff_geoms[i].wkt)
d2 = a.difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a - b) # __sub__ is difference operator
a -= b # testing __isub__
self.assertEqual(d1, a)
def test_intersection(self):
"Testing intersects() and intersection()."
for i in range(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
i1 = OGRGeometry(self.geometries.intersect_geoms[i].wkt)
self.assertTrue(a.intersects(b))
i2 = a.intersection(b)
self.assertEqual(i1, i2)
self.assertEqual(i1, a & b) # __and__ is intersection operator
a &= b # testing __iand__
self.assertEqual(i1, a)
def test_symdifference(self):
"Testing sym_difference()."
for i in range(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
d1 = OGRGeometry(self.geometries.sdiff_geoms[i].wkt)
d2 = a.sym_difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator
a ^= b # testing __ixor__
self.assertEqual(d1, a)
def test_union(self):
"Testing union()."
for i in range(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
u1 = OGRGeometry(self.geometries.union_geoms[i].wkt)
u2 = a.union(b)
self.assertEqual(u1, u2)
self.assertEqual(u1, a | b) # __or__ is union operator
a |= b # testing __ior__
self.assertEqual(u1, a)
def test_add(self):
"Testing GeometryCollection.add()."
# Can't insert a Point into a MultiPolygon.
mp = OGRGeometry('MultiPolygon')
pnt = OGRGeometry('POINT(5 23)')
with self.assertRaises(GDALException):
mp.add(pnt)
# GeometryCollection.add may take an OGRGeometry (if another collection
# of the same type all child geoms will be added individually) or WKT.
for mp in self.geometries.multipolygons:
mpoly = OGRGeometry(mp.wkt)
mp1 = OGRGeometry('MultiPolygon')
mp2 = OGRGeometry('MultiPolygon')
mp3 = OGRGeometry('MultiPolygon')
for poly in mpoly:
mp1.add(poly) # Adding a geometry at a time
mp2.add(poly.wkt) # Adding WKT
mp3.add(mpoly) # Adding a MultiPolygon's entire contents at once.
for tmp in (mp1, mp2, mp3):
self.assertEqual(mpoly, tmp)
def test_extent(self):
"Testing `extent` property."
# The xmin, ymin, xmax, ymax of the MultiPoint should be returned.
mp = OGRGeometry('MULTIPOINT(5 23, 0 0, 10 50)')
self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent)
# Testing on the 'real world' Polygon.
poly = OGRGeometry(self.geometries.polygons[3].wkt)
ring = poly.shell
x, y = ring.x, ring.y
xmin, ymin = min(x), min(y)
xmax, ymax = max(x), max(y)
self.assertEqual((xmin, ymin, xmax, ymax), poly.extent)
def test_25D(self):
"Testing 2.5D geometries."
pnt_25d = OGRGeometry('POINT(1 2 3)')
self.assertEqual('Point25D', pnt_25d.geom_type.name)
self.assertEqual(3.0, pnt_25d.z)
self.assertEqual(3, pnt_25d.coord_dim)
ls_25d = OGRGeometry('LINESTRING(1 1 1,2 2 2,3 3 3)')
self.assertEqual('LineString25D', ls_25d.geom_type.name)
self.assertEqual([1.0, 2.0, 3.0], ls_25d.z)
self.assertEqual(3, ls_25d.coord_dim)
def test_pickle(self):
"Testing pickle support."
g1 = OGRGeometry('LINESTRING(1 1 1,2 2 2,3 3 3)', 'WGS84')
g2 = pickle.loads(pickle.dumps(g1))
self.assertEqual(g1, g2)
self.assertEqual(4326, g2.srs.srid)
self.assertEqual(g1.srs.wkt, g2.srs.wkt)
def test_ogrgeometry_transform_workaround(self):
"Testing coordinate dimensions on geometries after transformation."
# A bug in GDAL versions prior to 1.7 changes the coordinate
# dimension of a geometry after it has been transformed.
# This test ensures that the bug workarounds employed within
# `OGRGeometry.transform` indeed work.
wkt_2d = "MULTILINESTRING ((0 0,1 1,2 2))"
wkt_3d = "MULTILINESTRING ((0 0 0,1 1 1,2 2 2))"
srid = 4326
# For both the 2D and 3D MultiLineString, ensure _both_ the dimension
# of the collection and the component LineString have the expected
# coordinate dimension after transform.
geom = OGRGeometry(wkt_2d, srid)
geom.transform(srid)
self.assertEqual(2, geom.coord_dim)
self.assertEqual(2, geom[0].coord_dim)
self.assertEqual(wkt_2d, geom.wkt)
geom = OGRGeometry(wkt_3d, srid)
geom.transform(srid)
self.assertEqual(3, geom.coord_dim)
self.assertEqual(3, geom[0].coord_dim)
self.assertEqual(wkt_3d, geom.wkt)
# Testing binary predicates, `assertIs` is used to check that bool is returned.
def test_equivalence_regression(self):
"Testing equivalence methods with non-OGRGeometry instances."
self.assertIsNotNone(OGRGeometry('POINT(0 0)'))
self.assertNotEqual(OGRGeometry('LINESTRING(0 0, 1 1)'), 3)
def test_contains(self):
self.assertIs(OGRGeometry('POINT(0 0)').contains(OGRGeometry('POINT(0 0)')), True)
self.assertIs(OGRGeometry('POINT(0 0)').contains(OGRGeometry('POINT(0 1)')), False)
def test_crosses(self):
self.assertIs(OGRGeometry('LINESTRING(0 0, 1 1)').crosses(OGRGeometry('LINESTRING(0 1, 1 0)')), True)
self.assertIs(OGRGeometry('LINESTRING(0 0, 0 1)').crosses(OGRGeometry('LINESTRING(1 0, 1 1)')), False)
def test_disjoint(self):
self.assertIs(OGRGeometry('LINESTRING(0 0, 1 1)').disjoint(OGRGeometry('LINESTRING(0 1, 1 0)')), False)
self.assertIs(OGRGeometry('LINESTRING(0 0, 0 1)').disjoint(OGRGeometry('LINESTRING(1 0, 1 1)')), True)
def test_equals(self):
self.assertIs(OGRGeometry('POINT(0 0)').contains(OGRGeometry('POINT(0 0)')), True)
self.assertIs(OGRGeometry('POINT(0 0)').contains(OGRGeometry('POINT(0 1)')), False)
def test_intersects(self):
self.assertIs(OGRGeometry('LINESTRING(0 0, 1 1)').intersects(OGRGeometry('LINESTRING(0 1, 1 0)')), True)
self.assertIs(OGRGeometry('LINESTRING(0 0, 0 1)').intersects(OGRGeometry('LINESTRING(1 0, 1 1)')), False)
def test_overlaps(self):
self.assertIs(
OGRGeometry('POLYGON ((0 0, 0 2, 2 2, 2 0, 0 0))').overlaps(
OGRGeometry('POLYGON ((1 1, 1 5, 5 5, 5 1, 1 1))')
), True
)
self.assertIs(OGRGeometry('POINT(0 0)').overlaps(OGRGeometry('POINT(0 1)')), False)
def test_touches(self):
self.assertIs(
OGRGeometry('POLYGON ((0 0, 0 1, 1 1, 1 0, 0 0))').touches(OGRGeometry('LINESTRING(0 2, 2 0)')), True
)
self.assertIs(OGRGeometry('POINT(0 0)').touches(OGRGeometry('POINT(0 1)')), False)
def test_within(self):
self.assertIs(
OGRGeometry('POINT(0.5 0.5)').within(OGRGeometry('POLYGON ((0 0, 0 1, 1 1, 1 0, 0 0))')), True
)
self.assertIs(OGRGeometry('POINT(0 0)').within(OGRGeometry('POINT(0 1)')), False)
def test_from_gml(self):
self.assertEqual(
OGRGeometry('POINT(0 0)'),
OGRGeometry.from_gml(
'<gml:Point gml:id="p21" srsName="http://www.opengis.net/def/crs/EPSG/0/4326">'
' <gml:pos srsDimension="2">0 0</gml:pos>'
'</gml:Point>'
),
)
def test_empty(self):
self.assertIs(OGRGeometry('POINT (0 0)').empty, False)
self.assertIs(OGRGeometry('POINT EMPTY').empty, True)
def test_empty_point_to_geos(self):
p = OGRGeometry('POINT EMPTY', srs=4326)
self.assertEqual(p.geos.ewkt, p.ewkt)
|
|
from __future__ import unicode_literals
import datetime
from decimal import Decimal
import re
from django.db import connection
from django.db.models import Avg, Sum, Count, Max, Min
from django.test import TestCase, Approximate
from django.test.utils import CaptureQueriesContext
from .models import Author, Publisher, Book, Store
class BaseAggregateTestCase(TestCase):
fixtures = ["aggregation.json"]
def test_empty_aggregate(self):
self.assertEqual(Author.objects.all().aggregate(), {})
def test_single_aggregate(self):
vals = Author.objects.aggregate(Avg("age"))
self.assertEqual(vals, {"age__avg": Approximate(37.4, places=1)})
def test_multiple_aggregates(self):
vals = Author.objects.aggregate(Sum("age"), Avg("age"))
self.assertEqual(vals, {"age__sum": 337, "age__avg": Approximate(37.4, places=1)})
def test_filter_aggregate(self):
vals = Author.objects.filter(age__gt=29).aggregate(Sum("age"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["age__sum"], 254)
def test_related_aggregate(self):
vals = Author.objects.aggregate(Avg("friends__age"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["friends__age__avg"], 34.07, places=2)
vals = Book.objects.filter(rating__lt=4.5).aggregate(Avg("authors__age"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["authors__age__avg"], 38.2857, places=2)
vals = Author.objects.all().filter(name__contains="a").aggregate(Avg("book__rating"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__rating__avg"], 4.0)
vals = Book.objects.aggregate(Sum("publisher__num_awards"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["publisher__num_awards__sum"], 30)
vals = Publisher.objects.aggregate(Sum("book__price"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__price__sum"], Decimal("270.27"))
def test_aggregate_multi_join(self):
vals = Store.objects.aggregate(Max("books__authors__age"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["books__authors__age__max"], 57)
vals = Author.objects.aggregate(Min("book__publisher__num_awards"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__publisher__num_awards__min"], 1)
def test_aggregate_alias(self):
vals = Store.objects.filter(name="Amazon.com").aggregate(amazon_mean=Avg("books__rating"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["amazon_mean"], 4.08, places=2)
def test_annotate_basic(self):
self.assertQuerysetEqual(
Book.objects.annotate().order_by('pk'), [
"The Definitive Guide to Django: Web Development Done Right",
"Sams Teach Yourself Django in 24 Hours",
"Practical Django Projects",
"Python Web Development with Django",
"Artificial Intelligence: A Modern Approach",
"Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp"
],
lambda b: b.name
)
books = Book.objects.annotate(mean_age=Avg("authors__age"))
b = books.get(pk=1)
self.assertEqual(
b.name,
'The Definitive Guide to Django: Web Development Done Right'
)
self.assertEqual(b.mean_age, 34.5)
def test_annotate_m2m(self):
books = Book.objects.filter(rating__lt=4.5).annotate(Avg("authors__age")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 51.5),
('Practical Django Projects', 29.0),
('Python Web Development with Django', Approximate(30.3, places=1)),
('Sams Teach Yourself Django in 24 Hours', 45.0)
],
lambda b: (b.name, b.authors__age__avg),
)
books = Book.objects.annotate(num_authors=Count("authors")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Practical Django Projects', 1),
('Python Web Development with Django', 3),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 2)
],
lambda b: (b.name, b.num_authors)
)
def test_backwards_m2m_annotate(self):
authors = Author.objects.filter(name__contains="a").annotate(Avg("book__rating")).order_by("name")
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 4.5),
('Brad Dayley', 3.0),
('Jacob Kaplan-Moss', 4.5),
('James Bennett', 4.0),
('Paul Bissex', 4.0),
('Stuart Russell', 4.0)
],
lambda a: (a.name, a.book__rating__avg)
)
authors = Author.objects.annotate(num_books=Count("book")).order_by("name")
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 1),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 1),
('Peter Norvig', 2),
('Stuart Russell', 1),
('Wesley J. Chun', 1)
],
lambda a: (a.name, a.num_books)
)
def test_reverse_fkey_annotate(self):
books = Book.objects.annotate(Sum("publisher__num_awards")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 7),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 9),
('Practical Django Projects', 3),
('Python Web Development with Django', 7),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 3)
],
lambda b: (b.name, b.publisher__num_awards__sum)
)
publishers = Publisher.objects.annotate(Sum("book__price")).order_by("name")
self.assertQuerysetEqual(
publishers, [
('Apress', Decimal("59.69")),
("Jonno's House of Books", None),
('Morgan Kaufmann', Decimal("75.00")),
('Prentice Hall', Decimal("112.49")),
('Sams', Decimal("23.09"))
],
lambda p: (p.name, p.book__price__sum)
)
def test_annotate_values(self):
books = list(Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values())
self.assertEqual(
books, [
{
"contact_id": 1,
"id": 1,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": 1,
"rating": 4.5,
}
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg('authors__age')).values('pk', 'isbn', 'mean_age')
self.assertEqual(
list(books), [
{
"pk": 1,
"isbn": "159059725",
"mean_age": 34.5,
}
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values("name")
self.assertEqual(
list(books), [
{
"name": "The Definitive Guide to Django: Web Development Done Right"
}
]
)
books = Book.objects.filter(pk=1).values().annotate(mean_age=Avg('authors__age'))
self.assertEqual(
list(books), [
{
"contact_id": 1,
"id": 1,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": 1,
"rating": 4.5,
}
]
)
books = Book.objects.values("rating").annotate(n_authors=Count("authors__id"), mean_age=Avg("authors__age")).order_by("rating")
self.assertEqual(
list(books), [
{
"rating": 3.0,
"n_authors": 1,
"mean_age": 45.0,
},
{
"rating": 4.0,
"n_authors": 6,
"mean_age": Approximate(37.16, places=1)
},
{
"rating": 4.5,
"n_authors": 2,
"mean_age": 34.5,
},
{
"rating": 5.0,
"n_authors": 1,
"mean_age": 57.0,
}
]
)
authors = Author.objects.annotate(Avg("friends__age")).order_by("name")
self.assertEqual(len(authors), 9)
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 32.0),
('Brad Dayley', None),
('Jacob Kaplan-Moss', 29.5),
('James Bennett', 34.0),
('Jeffrey Forcier', 27.0),
('Paul Bissex', 31.0),
('Peter Norvig', 46.0),
('Stuart Russell', 57.0),
('Wesley J. Chun', Approximate(33.66, places=1))
],
lambda a: (a.name, a.friends__age__avg)
)
def test_count(self):
vals = Book.objects.aggregate(Count("rating"))
self.assertEqual(vals, {"rating__count": 6})
vals = Book.objects.aggregate(Count("rating", distinct=True))
self.assertEqual(vals, {"rating__count": 4})
def test_fkey_aggregate(self):
explicit = list(Author.objects.annotate(Count('book__id')))
implicit = list(Author.objects.annotate(Count('book')))
self.assertEqual(explicit, implicit)
def test_annotate_ordering(self):
books = Book.objects.values('rating').annotate(oldest=Max('authors__age')).order_by('oldest', 'rating')
self.assertEqual(
list(books), [
{
"rating": 4.5,
"oldest": 35,
},
{
"rating": 3.0,
"oldest": 45
},
{
"rating": 4.0,
"oldest": 57,
},
{
"rating": 5.0,
"oldest": 57,
}
]
)
books = Book.objects.values("rating").annotate(oldest=Max("authors__age")).order_by("-oldest", "-rating")
self.assertEqual(
list(books), [
{
"rating": 5.0,
"oldest": 57,
},
{
"rating": 4.0,
"oldest": 57,
},
{
"rating": 3.0,
"oldest": 45,
},
{
"rating": 4.5,
"oldest": 35,
}
]
)
def test_aggregate_annotation(self):
vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate(Avg("num_authors"))
self.assertEqual(vals, {"num_authors__avg": Approximate(1.66, places=1)})
def test_filtering(self):
p = Publisher.objects.create(name='Expensive Publisher', num_awards=0)
Book.objects.create(
name='ExpensiveBook1',
pages=1,
isbn='111',
rating=3.5,
price=Decimal("1000"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008,12,1)
)
Book.objects.create(
name='ExpensiveBook2',
pages=1,
isbn='222',
rating=4.0,
price=Decimal("1000"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008,12,2)
)
Book.objects.create(
name='ExpensiveBook3',
pages=1,
isbn='333',
rating=4.5,
price=Decimal("35"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008,12,3)
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Apress",
"Sams",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1, book__price__lt=Decimal("40.0")).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 2]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__in=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Sams",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__isnull=True)
self.assertEqual(len(publishers), 0)
def test_annotation(self):
vals = Author.objects.filter(pk=1).aggregate(Count("friends__id"))
self.assertEqual(vals, {"friends__id__count": 2})
books = Book.objects.annotate(num_authors=Count("authors__name")).filter(num_authors__ge=2).order_by("pk")
self.assertQuerysetEqual(
books, [
"The Definitive Guide to Django: Web Development Done Right",
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name
)
authors = Author.objects.annotate(num_friends=Count("friends__id", distinct=True)).filter(num_friends=0).order_by("pk")
self.assertQuerysetEqual(
authors, [
"Brad Dayley",
],
lambda a: a.name
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
],
lambda p: p.name
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).annotate(num_books=Count("book__id")).filter(num_books__gt=1)
self.assertQuerysetEqual(
publishers, [
"Apress",
],
lambda p: p.name
)
books = Book.objects.annotate(num_authors=Count("authors__id")).filter(authors__name__contains="Norvig", num_authors__gt=1)
self.assertQuerysetEqual(
books, [
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name
)
def test_more_aggregation(self):
a = Author.objects.get(name__contains='Norvig')
b = Book.objects.get(name__contains='Done Right')
b.authors.add(a)
b.save()
vals = Book.objects.annotate(num_authors=Count("authors__id")).filter(authors__name__contains="Norvig", num_authors__gt=1).aggregate(Avg("rating"))
self.assertEqual(vals, {"rating__avg": 4.25})
def test_even_more_aggregate(self):
publishers = Publisher.objects.annotate(earliest_book=Min("book__pubdate")).exclude(earliest_book=None).order_by("earliest_book").values()
self.assertEqual(
list(publishers), [
{
'earliest_book': datetime.date(1991, 10, 15),
'num_awards': 9,
'id': 4,
'name': 'Morgan Kaufmann'
},
{
'earliest_book': datetime.date(1995, 1, 15),
'num_awards': 7,
'id': 3,
'name': 'Prentice Hall'
},
{
'earliest_book': datetime.date(2007, 12, 6),
'num_awards': 3,
'id': 1,
'name': 'Apress'
},
{
'earliest_book': datetime.date(2008, 3, 3),
'num_awards': 1,
'id': 2,
'name': 'Sams'
}
]
)
vals = Store.objects.aggregate(Max("friday_night_closing"), Min("original_opening"))
self.assertEqual(
vals,
{
"friday_night_closing__max": datetime.time(23, 59, 59),
"original_opening__min": datetime.datetime(1945, 4, 25, 16, 24, 14),
}
)
def test_annotate_values_list(self):
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("pk", "isbn", "mean_age")
self.assertEqual(
list(books), [
(1, "159059725", 34.5),
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("isbn")
self.assertEqual(
list(books), [
('159059725',)
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("mean_age")
self.assertEqual(
list(books), [
(34.5,)
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("mean_age", flat=True)
self.assertEqual(list(books), [34.5])
books = Book.objects.values_list("price").annotate(count=Count("price")).order_by("-count", "price")
self.assertEqual(
list(books), [
(Decimal("29.69"), 2),
(Decimal('23.09'), 1),
(Decimal('30'), 1),
(Decimal('75'), 1),
(Decimal('82.8'), 1),
]
)
def test_dates_with_aggregation(self):
"""
Test that .dates() returns a distinct set of dates when applied to a
QuerySet with aggregation.
Refs #18056. Previously, .dates() would return distinct (date_kind,
aggregation) sets, in this case (year, num_authors), so 2008 would be
returned twice because there are books from 2008 with a different
number of authors.
"""
dates = Book.objects.annotate(num_authors=Count("authors")).dates('pubdate', 'year')
self.assertQuerysetEqual(
dates, [
"datetime.date(1991, 1, 1)",
"datetime.date(1995, 1, 1)",
"datetime.date(2007, 1, 1)",
"datetime.date(2008, 1, 1)"
]
)
def test_values_aggregation(self):
# Refs #20782
max_rating = Book.objects.values('rating').aggregate(max_rating=Max('rating'))
self.assertEqual(max_rating['max_rating'], 5)
max_books_per_rating = Book.objects.values('rating').annotate(
books_per_rating=Count('id')
).aggregate(Max('books_per_rating'))
self.assertEqual(
max_books_per_rating,
{'books_per_rating__max': 3})
def test_ticket17424(self):
"""
Check that doing exclude() on a foreign model after annotate()
doesn't crash.
"""
all_books = list(Book.objects.values_list('pk', flat=True).order_by('pk'))
annotated_books = Book.objects.order_by('pk').annotate(one=Count("id"))
# The value doesn't matter, we just need any negative
# constraint on a related model that's a noop.
excluded_books = annotated_books.exclude(publisher__name="__UNLIKELY_VALUE__")
# Try to generate query tree
str(excluded_books.query)
self.assertQuerysetEqual(excluded_books, all_books, lambda x: x.pk)
# Check internal state
self.assertIsNone(annotated_books.query.alias_map["aggregation_book"].join_type)
self.assertIsNone(excluded_books.query.alias_map["aggregation_book"].join_type)
def test_ticket12886(self):
"""
Check that aggregation over sliced queryset works correctly.
"""
qs = Book.objects.all().order_by('-rating')[0:3]
vals = qs.aggregate(average_top3_rating=Avg('rating'))['average_top3_rating']
self.assertAlmostEqual(vals, 4.5, places=2)
def test_ticket11881(self):
"""
Check that subqueries do not needlessly contain ORDER BY, SELECT FOR UPDATE
or select_related() stuff.
"""
qs = Book.objects.all().select_for_update().order_by(
'pk').select_related('publisher').annotate(max_pk=Max('pk'))
with CaptureQueriesContext(connection) as captured_queries:
qs.aggregate(avg_pk=Avg('max_pk'))
self.assertEqual(len(captured_queries), 1)
qstr = captured_queries[0]['sql'].lower()
self.assertNotIn('for update', qstr)
forced_ordering = connection.ops.force_no_ordering()
if forced_ordering:
# If the backend needs to force an ordering we make sure it's
# the only "ORDER BY" clause present in the query.
self.assertEqual(
re.findall(r'order by (\w+)', qstr),
[', '.join(forced_ordering).lower()]
)
else:
self.assertNotIn('order by', qstr)
self.assertEqual(qstr.count(' join '), 0)
|
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
import sys
import inspect
from collections import OrderedDict
from os.path import join as pjoin
this_dir = os.path.abspath(os.path.split(__file__)[0])
sys.path.insert(0, os.path.join(this_dir, '../'))
from libcloud.compute .base import NodeDriver
from libcloud.compute.providers import get_driver as get_compute_driver
from libcloud.compute.providers import DRIVERS as COMPUTE_DRIVERS
from libcloud.compute.types import Provider as ComputeProvider
from libcloud.loadbalancer.base import Driver as LBDriver
from libcloud.loadbalancer.providers import get_driver as get_lb_driver
from libcloud.loadbalancer.providers import DRIVERS as LB_DRIVERS
from libcloud.loadbalancer.types import Provider as LBProvider
from libcloud.storage.base import StorageDriver
from libcloud.storage.providers import get_driver as get_storage_driver
from libcloud.storage.providers import DRIVERS as STORAGE_DRIVERS
from libcloud.storage.types import Provider as StorageProvider
from libcloud.dns.base import DNSDriver
from libcloud.dns.providers import get_driver as get_dns_driver
from libcloud.dns.providers import DRIVERS as DNS_DRIVERS
from libcloud.dns.types import Provider as DNSProvider
REQUIRED_DEPENDENCIES = [
'pysphere'
]
for dependency in REQUIRED_DEPENDENCIES:
try:
__import__(dependency)
except ImportError:
msg = 'Missing required dependency: %s' % (dependency)
raise ImportError(msg)
HEADER = ('.. NOTE: This file has been generated automatically using '
'generate_provider_feature_matrix_table.py script, don\'t manually '
'edit it')
BASE_API_METHODS = {
'compute_main': ['list_nodes', 'create_node', 'reboot_node',
'destroy_node', 'list_images', 'list_sizes',
'deploy_node'],
'compute_image_management': ['list_images', 'get_image',
'create_image', 'delete_image', 'copy_image'],
'compute_block_storage': ['list_volumes', 'create_volume',
'destroy_volume',
'attach_volume', 'detach_volume',
'list_volume_snapshots',
'create_volume_snapshot'],
'compute_key_pair_management': ['list_key_pairs', 'get_key_pair',
'create_key_pair',
'import_key_pair_from_string',
'import_key_pair_from_file',
'delete_key_pair'],
'loadbalancer': ['create_balancer', 'list_balancers',
'balancer_list_members', 'balancer_attach_member',
'balancer_detach_member', 'balancer_attach_compute_node'],
'storage_main': ['list_containers', 'list_container_objects',
'iterate_containers', 'iterate_container_objects',
'create_container', 'delete_container', 'upload_object',
'upload_object_via_stream', 'download_object',
'download_object_as_stream', 'delete_object'],
'storage_cdn': ['enable_container_cdn', 'enable_object_cdn',
'get_container_cdn_url', 'get_object_cdn_url'],
'dns': ['list_zones', 'list_records', 'iterate_zones', 'iterate_records',
'create_zone', 'update_zone', 'create_record', 'update_record',
'delete_zone', 'delete_record']
}
FRIENDLY_METHODS_NAMES = {
'compute_main': {
'list_nodes': 'list nodes',
'create_node': 'create node',
'reboot_node': 'reboot node',
'destroy_node': 'destroy node',
'list_images': 'list images',
'list_sizes': 'list sizes',
'deploy_node': 'deploy node'
},
'compute_image_management': {
'list_images': 'list images',
'get_image': 'get image',
'create_image': 'create image',
'copy_image': 'copy image',
'delete_image': 'delete image'
},
'compute_block_storage': {
'list_volumes': 'list volumes',
'create_volume': 'create volume',
'destroy_volume': 'destroy volume',
'attach_volume': 'attach volume',
'detach_volume': 'detach volume',
'list_volume_snapshots': 'list snapshots',
'create_volume_snapshot': 'create snapshot'
},
'compute_key_pair_management': {
'list_key_pairs': 'list key pairs',
'get_key_pair': 'get key pair',
'create_key_pair': 'create key pair',
'import_key_pair_from_string': 'import public key from string',
'import_key_pair_from_file': 'import public key from file',
'delete_key_pair': 'delete key pair'
},
'loadbalancer': {
'create_balancer': 'create balancer',
'list_balancers': 'list balancers',
'balancer_list_members': 'list members',
'balancer_attach_member': 'attach member',
'balancer_detach_member': 'detach member',
'balancer_attach_compute_node': 'attach compute node'
},
'storage_main': {
'list_containers': 'list containers',
'list_container_objects': 'list objects',
'create_container': 'create container',
'delete_container': 'delete container',
'upload_object': 'upload object',
'upload_object_via_stream': 'streaming object upload',
'download_object': 'download object',
'download_object_as_stream': 'streaming object download',
'delete_object': 'delete object'
},
'storage_cdn': {
'enable_container_cdn': 'enable container cdn',
'enable_object_cdn': 'enable object cdn',
'get_container_cdn_url': 'get container cdn URL',
'get_object_cdn_url': 'get object cdn URL',
},
'dns': {
'list_zones': 'list zones',
'list_records': 'list records',
'create_zone': 'create zone',
'update_zone': 'update zone',
'create_record': 'create record',
'update_record': 'update record',
'delete_zone': 'delete zone',
'delete_record': 'delete record'
},
}
IGNORED_PROVIDERS = [
'dummy',
'local',
# Deprecated constants
'cloudsigma_us',
'cloudfiles_swift'
]
def get_provider_api_names(Provider):
names = [key for key, value in Provider.__dict__.items() if
not key.startswith('__')]
return names
def generate_providers_table(api):
result = {}
if api in ['compute_main', 'compute_image_management',
'compute_block_storage', 'compute_key_pair_management']:
driver = NodeDriver
drivers = COMPUTE_DRIVERS
provider = ComputeProvider
get_driver_method = get_compute_driver
elif api == 'loadbalancer':
driver = LBDriver
drivers = LB_DRIVERS
provider = LBProvider
get_driver_method = get_lb_driver
elif api in ['storage_main', 'storage_cdn']:
driver = StorageDriver
drivers = STORAGE_DRIVERS
provider = StorageProvider
get_driver_method = get_storage_driver
elif api == 'dns':
driver = DNSDriver
drivers = DNS_DRIVERS
provider = DNSProvider
get_driver_method = get_dns_driver
else:
raise Exception('Invalid api: %s' % (api))
names = get_provider_api_names(provider)
result = OrderedDict()
for name in names:
enum = getattr(provider, name)
try:
cls = get_driver_method(enum)
except:
# Deprecated providers throw an exception
continue
# Hack for providers which expose multiple classes and support multiple
# API versions
# TODO: Make entry per version
if name.lower() == 'cloudsigma':
from libcloud.compute.drivers.cloudsigma import \
CloudSigma_2_0_NodeDriver
cls = CloudSigma_2_0_NodeDriver
elif name.lower() == 'opennebula':
from libcloud.compute.drivers.opennebula import \
OpenNebula_3_8_NodeDriver
cls = OpenNebula_3_8_NodeDriver
elif name.lower() == 'digital_ocean' and api.startswith('compute'):
from libcloud.compute.drivers.digitalocean import \
DigitalOcean_v2_NodeDriver
cls = DigitalOcean_v2_NodeDriver
if name.lower() in IGNORED_PROVIDERS:
continue
driver_methods = dict(inspect.getmembers(cls,
predicate=inspect.ismethod))
base_methods = dict(inspect.getmembers(driver,
predicate=inspect.ismethod))
base_api_methods = BASE_API_METHODS[api]
result[name] = {'name': cls.name, 'website': cls.website,
'constant': name, 'module': drivers[enum][0],
'class': drivers[enum][1],
'methods': {}}
for method_name in base_api_methods:
base_method = base_methods[method_name]
driver_method = driver_methods[method_name]
if method_name == 'deploy_node':
features = getattr(cls, 'features', {}).get('create_node', [])
is_implemented = len(features) >= 1
else:
is_implemented = (id(driver_method.im_func) !=
id(base_method.im_func))
result[name]['methods'][method_name] = is_implemented
return result
def generate_rst_table(data):
cols = len(data[0])
col_len = [max(len(r[i]) for r in data) for i in range(cols)]
formatter = ' '.join('{:<%d}' % c for c in col_len)
header = formatter.format(*['=' * c for c in col_len])
rows = [formatter.format(*row) for row in data]
result = header + '\n' + rows[0] + '\n' + header + '\n' +\
'\n'.join(rows[1:]) + '\n' + header
return result
def generate_supported_methods_table(api, provider_matrix):
base_api_methods = BASE_API_METHODS[api]
data = []
header = [FRIENDLY_METHODS_NAMES[api][method_name] for method_name in
base_api_methods if not method_name.startswith('iterate_')]
data.append(['Provider'] + header)
for provider, values in sorted(provider_matrix.items()):
provider_name = '`%s`_' % (values['name'])
row = [provider_name]
# TODO: Make it nicer
# list_* methods don't need to be implemented if iterate_* methods are
# implemented
if api == 'storage_main':
if values['methods']['iterate_containers']:
values['methods']['list_containers'] = True
if values['methods']['iterate_container_objects']:
values['methods']['list_container_objects'] = True
elif api == 'dns':
# list_zones and list_records don't need to be implemented if
if values['methods']['iterate_zones']:
values['methods']['list_zones'] = True
if values['methods']['iterate_records']:
values['methods']['list_records'] = True
for method in base_api_methods:
# TODO: ghetto
if method.startswith('iterate_'):
continue
supported = values['methods'][method]
if supported:
row.append('yes')
else:
row.append('no')
data.append(row)
result = generate_rst_table(data)
result += '\n\n'
for provider, values in sorted(provider_matrix.items()):
result += '.. _`%s`: %s\n' % (values['name'], values['website'])
return result
def generate_supported_providers_table(api, provider_matrix):
data = []
header = ['Provider', 'Documentation', 'Provider constant', 'Module',
'Class Name']
data.append(header)
for provider, values in sorted(provider_matrix.items()):
name_str = '`%s`_' % (values['name'])
module_str = ':mod:`%s`' % (values['module'])
class_str = ':class:`%s`' % (values['class'])
params = {'api': api, 'provider': provider.lower()}
driver_docs_path = pjoin(this_dir,
'../docs/%(api)s/drivers/%(provider)s.rst'
% params)
if os.path.exists(driver_docs_path):
docs_link = ':doc:`Click </%(api)s/drivers/%(provider)s>`' % params
else:
docs_link = ''
row = [name_str, docs_link, values['constant'], module_str, class_str]
data.append(row)
result = generate_rst_table(data)
result += '\n\n'
for provider, values in sorted(provider_matrix.items()):
result += '.. _`%s`: %s\n' % (values['name'], values['website'])
return result
def generate_tables():
apis = BASE_API_METHODS.keys()
for api in apis:
result = generate_providers_table(api)
docs_dir = api
if api.startswith('compute'):
docs_dir = 'compute'
elif api.startswith('storage'):
docs_dir = 'storage'
supported_providers = generate_supported_providers_table(docs_dir,
result)
supported_methods = generate_supported_methods_table(api, result)
current_path = os.path.dirname(__file__)
target_dir = os.path.abspath(pjoin(current_path,
'../docs/%s/' % (docs_dir)))
file_name_1 = '_supported_providers.rst'
file_name_2 = '_supported_methods.rst'
if api == 'compute_main':
file_name_2 = '_supported_methods_main.rst'
elif api == 'compute_image_management':
file_name_2 = '_supported_methods_image_management.rst'
elif api == 'compute_block_storage':
file_name_2 = '_supported_methods_block_storage.rst'
elif api == 'compute_key_pair_management':
file_name_2 = '_supported_methods_key_pair_management.rst'
elif api == 'storage_main':
file_name_2 = '_supported_methods_main.rst'
elif api == 'storage_cdn':
file_name_2 = '_supported_methods_cdn.rst'
supported_providers_path = pjoin(target_dir, file_name_1)
supported_methods_path = pjoin(target_dir, file_name_2)
with open(supported_providers_path, 'w') as fp:
fp.write(HEADER + '\n\n')
fp.write(supported_providers)
with open(supported_methods_path, 'w') as fp:
fp.write(HEADER + '\n\n')
fp.write(supported_methods)
generate_tables()
|
|
from flask import render_template, Blueprint, jsonify, request, abort, Response, current_app
from paramiko.client import SSHClient, AutoAddPolicy
import sys
import random
views = Blueprint("views", __name__, template_folder="templates")
def _exec_command(command, switch_name=''):
if not switch_name:
switch_name = [s for s in current_app.config['switches']][0]
current_app.logger.info('running on %s: %s' % (switch_name, command))
client = SSHClient()
client.set_missing_host_key_policy(AutoAddPolicy())
client.connect(current_app.config['switches'][switch_name]['address'],
username=current_app.config['switches'][switch_name]['user'],
key_filename=current_app.config['switches'][switch_name]['key'])
sin, sout, serr = client.exec_command(command)
output = sout.read().decode('ascii')
errout = serr.read().decode('ascii')
client.close()
if errout or 'Cmd exec error' in output:
abort(500, "Error executing '%s' on %s" % (command, switch_name))
current_app.logger.info('output from %s: %s' % (switch_name, output))
return output, errout
def _encode_mac(mac):
# turns 0A:1b:2c:3D:4e:5F or 0A1b2C3d4E5f into 0a1b.2c3d.4e5f (switch format)
if ':' in mac:
mac = mac.lower().replace(':','')
return '%s.%s.%s' % (mac[0:4], mac[4:8], mac[8:12])
return mac
def _decode_mac(mac):
# turns 0a1b.2c3d.4e5f (switch format) into 0a:1b:2c:3d:4e:5f
mac = mac.lower().replace('.','')
return '%s:%s:%s:%s:%s:%s' % (mac[0:2], mac[2:4], mac[4:6], mac[6:8], mac[8:10], mac[10:12])
@views.route("/")
def index():
verb = current_app.config['verbs'][random.randint(0,len(current_app.config['verbs'])-1)]
noun = current_app.config['nouns'][random.randint(0,len(current_app.config['nouns'])-1)]
return Response('a LANister always %s their %s' % (verb, noun), mimetype='text/plain')
@views.route("/api/<switch_name>/interfaces/", methods=['GET'])
def interface_list(switch_name):
if 'macs' in request.args:
macs = [_encode_mac(mac) for mac in request.args['macs'].split(',') if mac]
output, errout = _exec_command('show mac address-table | i "%s"' % ('|'.join(macs)), switch_name)
ports = {}
for port in [i for i in output.strip().split('\n') if i]:
port = port.split()
if 'Eth' in port[-1]:
ports[_decode_mac(port[2].strip())] = port[-1].strip()
return jsonify(dict(ports=ports))
if 'descriptions' in request.args:
descriptions = [i for i in request.args['descriptions'].split(',') if i]
output, errout = _exec_command('show interface description | i %s' % ('|'.join(descriptions)), switch_name)
ports = {}
for port in [i for i in output.strip().split('\n') if i]:
port = port.split()
ports[port[-1].strip()] = port[0].strip()
return jsonify(dict(ports=ports))
output, errout = _exec_command('show interface brief', switch_name)
#TODO: parse this to json
return Response(output, mimetype='text/plain')
@views.route("/api/<switch_name>/interfaces/<interface_name>/", methods=['PUT','GET'])
def interface(switch_name, interface_name):
interface_name = interface_name.replace('_','/')
if request.method == 'PUT':
data = request.get_json(force=True)
if 'bind' in data:
# bind can be a channel or None
command = ''
if data['bind']:
command = 'channel-group %s mode active' % (data['bind'])
else:
command = 'no channel-group'
output, errout = _exec_command('config t ; interface %s ; %s ; exit' % (interface_name, command),
switch_name)
if 'state' in data:
command = ''
if data['state'] == 'down':
command = 'shutdown'
elif data['state'] == 'up':
command = 'no shutdown'
else:
abort(400, '"state" must be "up" or "down" in interface configuration')
ifname = interface_name.replace('Eth','')
output, errout = _exec_command('config ; interface ethernet %s ; %s ; exit ; exit' %
(ifname, command), switch_name)
output, errout = _exec_command('show running-config interface %s' % (interface_name), switch_name)
config = [i.strip() for i in output.split(interface_name.replace('Eth',''))[-1].strip().split('\n')]
return jsonify(dict(name=interface_name, config=config))
@views.route("/api/<switch_name>/channels/<channel_name>/", methods=['POST','GET','DELETE'])
def channel(switch_name, channel_name):
if request.method == 'POST':
output, errout = _exec_command('show interface brief | i Po%s' % (channel_name), switch_name)
if output:
abort(400, 'Channel group Po%s already exists' % (channel_name))
data = request.get_json(force=True, silent=True)
command = 'config t ; interface port-channel %s' % (channel_name)
if data and 'config' in data:
command += ' ; %s' % (' ; '.join(data['config']))
if data and 'description' in data:
command += ' ; %s' % (data['description'])
output, errout = _exec_command(command, switch_name)
else:
output, errout = _exec_command('show interface brief | i Po%s' % (channel_name), switch_name)
if not output:
abort(404, 'No such channel group %s' % (channel_name))
if request.method == 'DELETE':
output, errout = _exec_command('config t ; no interface port-channel %s ; exit' % (channel_name),
switch_name)
return Response(output, mimetype='text/plain')
output, errout = _exec_command('show running-config interface port-channel%s' % (channel_name), switch_name)
config = [i.strip() for i in output.split('port-channel%s' % (channel_name))[-1].strip().split('\n')]
return jsonify(dict(name='port-channel%s' % (channel_name), config=config))
@views.route("/api/<switch_name>/macaddresses/", methods=['GET'])
def mac_addresses(switch_name):
output, errout = _exec_command('show mac address-table | i Eth', switch_name)
output_desc, errout = _exec_command('show interface description | i Eth', switch_name)
interfaces = {}
for desc_line in [i for i in output_desc.strip().split('\n') if i]:
interfaces[desc_line.split()[0].strip()] = desc_line.split()[-1].strip()
macs = {}
for line in [i for i in output.strip().split('\n') if i]:
line = line.split()
interface_description = line[-1].strip()
macs[_decode_mac(line[2].strip())] = dict(interface=line[-1].strip(),
description=interfaces[line[-1].strip()])
return jsonify(dict(macs=macs))
@views.route("/api/<switch_name>/macaddresses/<mac_address>/", methods=['GET'])
def mac_address(switch_name, mac_address):
mac_addresses = [_encode_mac(mac_address)]
output, errout = _exec_command('show mac address-table | i "%s"' % ('|'.join(mac_addresses)), switch_name)
macs = {}
for line in [i for i in output.strip().split('\n') if i]:
line = line.split()
if 'Eth' in line[-1]:
interface_description = line[-1].strip()
output_desc, errout = _exec_command('show interface description | i %s' % (interface_description), switch_name)
for desc_line in [i for i in output_desc.strip().split('\n') if i]:
desc_line = desc_line.split()
macs[_decode_mac(line[2].strip())] = dict(interface=line[-1].strip(),
slot=desc_line[-1].strip())
return jsonify(dict(macs=macs))
@views.route("/api/<switch_name>/slots/", methods=['GET'])
def slots(switch_name):
output_macs, errout = _exec_command('show mac address-table | i Eth', switch_name)
output_desc, errout = _exec_command('show interface description | i Eth', switch_name)
interfaces = {}
for mac_line in [i for i in output_macs.strip().split('\n') if i]:
interfaces[mac_line.split()[-1].strip()] = _decode_mac(mac_line.split()[2].strip())
slots = {}
for line in [i for i in output_desc.strip().split('\n') if i]:
line = line.split()
if 'SLOT' in line[-1] and len(line[-1].split('.')) in [2,3,4] and 'SLOT' in line[-1].split('.')[0]:
slot_name, component = _parse_description(line[-1])
interface = line[0].strip()
if not slot_name in slots:
slots[slot_name] = dict(interfaces=[])
slots[slot_name]['interfaces'].append(dict(interface=interface,
component=component,
mac_address=interfaces[interface] if interface in interfaces else ''))
return jsonify(dict(slots=slots))
@views.route("/api/<switch_name>/slots/<slot_name>/", methods=['GET'])
def slot(switch_name, slot_name):
output, errout = _exec_command('show interface description | i %s' % (slot_name), switch_name)
slots = {}
for line in [i for i in output.strip().split('\n') if i]:
line = line.split()
if 'Eth' in line[0]:
slot_name, component = _parse_description(line[-1])
interface = line[0].strip()
output_mac, errout = _exec_command('show mac address-table | i %s' % (interface), switch_name)
for mac_line in [i for i in output_mac.strip().split('\n') if i]:
mac_line = mac_line.split()
if not slot_name in slots:
slots[slot_name] = dict(interfaces=[])
slots[slot_name]['interfaces'].append(dict(interface=interface,
component=component,
mac_address=_decode_mac(mac_line[2].strip())))
return jsonify(dict(slots=slots))
def _parse_description(interface_description):
slot_name = interface_description.split('.')[0].strip()
if len(interface_description.split('.')) == 2:
component = interface_description.split('.')[-1].strip()
elif len(interface_description.split('.')) == 3:
if interface_description.split('.')[-1].strip() in ['1ST', '2ND', '3RD', '4TH', '5TH', '6TH']:
slot_name = ".".join([slot_name, interface_description.split('.')[-1].strip()])
component = interface_description.split('.')[-2].strip()
else:
component = ".".join(interface_description.split('.')[-2:])
else:
if interface_description.split('.')[-1].strip() in ['1ST', '2ND', '3RD', '4TH', '5TH', '6TH']:
slot_name = ".".join([slot_name, interface_description.split('.')[-1].strip()])
component = ".".join(interface_description.split('.')[-3:-1])
else:
component = ".".join(interface_description.split('.')[-3:])
return slot_name, component
|
|
import pytest
from itertools import chain, combinations, product
import networkx as nx
tree_all_pairs_lca = nx.tree_all_pairs_lowest_common_ancestor
all_pairs_lca = nx.all_pairs_lowest_common_ancestor
def get_pair(dictionary, n1, n2):
if (n1, n2) in dictionary:
return dictionary[n1, n2]
else:
return dictionary[n2, n1]
class TestTreeLCA(object):
@classmethod
def setup_class(cls):
cls.DG = nx.DiGraph()
edges = [(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)]
cls.DG.add_edges_from(edges)
cls.ans = dict(tree_all_pairs_lca(cls.DG, 0))
gold = dict([((n, n), n) for n in cls.DG])
gold.update(dict(((0, i), 0) for i in range(1, 7)))
gold.update({(1, 2): 0,
(1, 3): 1,
(1, 4): 1,
(1, 5): 0,
(1, 6): 0,
(2, 3): 0,
(2, 4): 0,
(2, 5): 2,
(2, 6): 2,
(3, 4): 1,
(3, 5): 0,
(3, 6): 0,
(4, 5): 0,
(4, 6): 0,
(5, 6): 2})
cls.gold = gold
@staticmethod
def assert_has_same_pairs(d1, d2):
for (a, b) in ((min(pair), max(pair)) for pair in chain(d1, d2)):
assert get_pair(d1, a, b) == get_pair(d2, a, b)
def test_tree_all_pairs_lowest_common_ancestor1(self):
"""Specifying the root is optional."""
assert dict(tree_all_pairs_lca(self.DG)) == self.ans
def test_tree_all_pairs_lowest_common_ancestor2(self):
"""Specifying only some pairs gives only those pairs."""
test_pairs = [(0, 1), (0, 1), (1, 0)]
ans = dict(tree_all_pairs_lca(self.DG, 0, test_pairs))
assert (0, 1) in ans and (1, 0) in ans
assert len(ans) == 2
def test_tree_all_pairs_lowest_common_ancestor3(self):
"""Specifying no pairs same as specifying all."""
all_pairs = chain(combinations(self.DG, 2),
((node, node) for node in self.DG))
ans = dict(tree_all_pairs_lca(self.DG, 0, all_pairs))
self.assert_has_same_pairs(ans, self.ans)
def test_tree_all_pairs_lowest_common_ancestor4(self):
"""Gives the right answer."""
ans = dict(tree_all_pairs_lca(self.DG))
self.assert_has_same_pairs(self.gold, ans)
def test_tree_all_pairs_lowest_common_ancestor5(self):
"""Handles invalid input correctly."""
empty_digraph = tree_all_pairs_lca(nx.DiGraph())
pytest.raises(nx.NetworkXPointlessConcept, list, empty_digraph)
bad_pairs_digraph = tree_all_pairs_lca(self.DG, pairs=[(-1, -2)])
pytest.raises(nx.NodeNotFound, list, bad_pairs_digraph)
def test_tree_all_pairs_lowest_common_ancestor6(self):
"""Works on subtrees."""
ans = dict(tree_all_pairs_lca(self.DG, 1))
gold = dict((pair, lca) for (pair, lca) in self.gold.items()
if all(n in (1, 3, 4) for n in pair))
self.assert_has_same_pairs(gold, ans)
def test_tree_all_pairs_lowest_common_ancestor7(self):
"""Works on disconnected nodes."""
G = nx.DiGraph()
G.add_node(1)
assert {(1, 1): 1} == dict(tree_all_pairs_lca(G))
G.add_node(0)
assert {(1, 1): 1} == dict(tree_all_pairs_lca(G, 1))
assert {(0, 0): 0} == dict(tree_all_pairs_lca(G, 0))
pytest.raises(nx.NetworkXError, list, tree_all_pairs_lca(G))
def test_tree_all_pairs_lowest_common_ancestor8(self):
"""Raises right errors if not a tree."""
# Cycle
G = nx.DiGraph([(1, 2), (2, 1)])
pytest.raises(nx.NetworkXError, list, tree_all_pairs_lca(G))
# DAG
G = nx.DiGraph([(0, 2), (1, 2)])
pytest.raises(nx.NetworkXError, list, tree_all_pairs_lca(G))
def test_tree_all_pairs_lowest_common_ancestor9(self):
"""Test that pairs works correctly as a generator."""
pairs = iter([(0, 1), (0, 1), (1, 0)])
some_pairs = dict(tree_all_pairs_lca(self.DG, 0, pairs))
assert (0, 1) in some_pairs and (1, 0) in some_pairs
assert len(some_pairs) == 2
def test_tree_all_pairs_lowest_common_ancestor10(self):
"""Test that pairs not in the graph raises error."""
lca = tree_all_pairs_lca(self.DG, 0, [(-1, -1)])
pytest.raises(nx.NodeNotFound, list, lca)
def test_tree_all_pairs_lowest_common_ancestor11(self):
"""Test that None as a node in the graph raises an error."""
G = nx.DiGraph([(None, 3)])
pytest.raises(nx.NetworkXError, list, tree_all_pairs_lca(G))
pytest.raises(nx.NodeNotFound, list,
tree_all_pairs_lca(self.DG, pairs=G.edges()))
def test_tree_all_pairs_lowest_common_ancestor12(self):
"""Test that tree routine bails on DAGs."""
G = nx.DiGraph([(3, 4), (5, 4)])
pytest.raises(nx.NetworkXError, list, tree_all_pairs_lca(G))
def test_not_implemented_for(self):
NNI = nx.NetworkXNotImplemented
G = nx.Graph([(0, 1)])
pytest.raises(NNI, tree_all_pairs_lca, G)
pytest.raises(NNI, all_pairs_lca, G)
pytest.raises(NNI, nx.lowest_common_ancestor, G, 0, 1)
G = nx.MultiGraph([(0, 1)])
pytest.raises(NNI, tree_all_pairs_lca, G)
pytest.raises(NNI, all_pairs_lca, G)
pytest.raises(NNI, nx.lowest_common_ancestor, G, 0, 1)
G = nx.MultiDiGraph([(0, 1)])
pytest.raises(NNI, tree_all_pairs_lca, G)
pytest.raises(NNI, all_pairs_lca, G)
pytest.raises(NNI, nx.lowest_common_ancestor, G, 0, 1)
def test_tree_all_pairs_lowest_common_ancestor13(self):
"""Test that it works on non-empty trees with no LCAs."""
G = nx.DiGraph()
G.add_node(3)
ans = list(tree_all_pairs_lca(G))
assert ans == [((3, 3), 3)]
class TestDAGLCA:
@classmethod
def setup_class(cls):
cls.DG = nx.DiGraph()
nx.add_path(cls.DG, (0, 1, 2, 3))
nx.add_path(cls.DG, (0, 4, 3))
nx.add_path(cls.DG, (0, 5, 6, 8, 3))
nx.add_path(cls.DG, (5, 7, 8))
cls.DG.add_edge(6, 2)
cls.DG.add_edge(7, 2)
cls.root_distance = nx.shortest_path_length(cls.DG, source=0)
cls.gold = {(1, 1): 1,
(1, 2): 1,
(1, 3): 1,
(1, 4): 0,
(1, 5): 0,
(1, 6): 0,
(1, 7): 0,
(1, 8): 0,
(2, 2): 2,
(2, 3): 2,
(2, 4): 0,
(2, 5): 5,
(2, 6): 6,
(2, 7): 7,
(2, 8): 7,
(3, 3): 8,
(3, 4): 4,
(3, 5): 5,
(3, 6): 6,
(3, 7): 7,
(3, 8): 8,
(4, 4): 4,
(4, 5): 0,
(4, 6): 0,
(4, 7): 0,
(4, 8): 0,
(5, 5): 5,
(5, 6): 5,
(5, 7): 5,
(5, 8): 5,
(6, 6): 6,
(6, 7): 5,
(6, 8): 6,
(7, 7): 7,
(7, 8): 7,
(8, 8): 8}
cls.gold.update(((0, n), 0) for n in cls.DG)
def assert_lca_dicts_same(self, d1, d2, G=None):
"""Checks if d1 and d2 contain the same pairs and
have a node at the same distance from root for each.
If G is None use self.DG."""
if G is None:
G = self.DG
root_distance = self.root_distance
else:
roots = [n for n, deg in G.in_degree if deg == 0]
assert(len(roots) == 1)
root_distance = nx.shortest_path_length(G, source=roots[0])
for a, b in ((min(pair), max(pair)) for pair in chain(d1, d2)):
assert (root_distance[get_pair(d1, a, b)] ==
root_distance[get_pair(d2, a, b)])
def test_all_pairs_lowest_common_ancestor1(self):
"""Produces the correct results."""
self.assert_lca_dicts_same(dict(all_pairs_lca(self.DG)), self.gold)
def test_all_pairs_lowest_common_ancestor2(self):
"""Produces the correct results when all pairs given."""
all_pairs = list(product(self.DG.nodes(), self.DG.nodes()))
ans = all_pairs_lca(self.DG, pairs=all_pairs)
self.assert_lca_dicts_same(dict(ans), self.gold)
def test_all_pairs_lowest_common_ancestor3(self):
"""Produces the correct results when all pairs given as a generator."""
all_pairs = product(self.DG.nodes(), self.DG.nodes())
ans = all_pairs_lca(self.DG, pairs=all_pairs)
self.assert_lca_dicts_same(dict(ans), self.gold)
def test_all_pairs_lowest_common_ancestor4(self):
"""Graph with two roots."""
G = self.DG.copy()
G.add_edge(9, 10)
G.add_edge(9, 4)
gold = self.gold.copy()
gold[9, 9] = 9
gold[9, 10] = 9
gold[9, 4] = 9
gold[9, 3] = 9
gold[10, 4] = 9
gold[10, 3] = 9
gold[10, 10] = 10
testing = dict(all_pairs_lca(G))
G.add_edge(-1, 9)
G.add_edge(-1, 0)
self.assert_lca_dicts_same(testing, gold, G)
def test_all_pairs_lowest_common_ancestor5(self):
"""Test that pairs not in the graph raises error."""
pytest.raises(nx.NodeNotFound, all_pairs_lca, self.DG, [(-1, -1)])
def test_all_pairs_lowest_common_ancestor6(self):
"""Test that pairs with no LCA specified emits nothing."""
G = self.DG.copy()
G.add_node(-1)
gen = all_pairs_lca(G, [(-1, -1), (-1, 0)])
assert dict(gen) == {(-1, -1): -1}
def test_all_pairs_lowest_common_ancestor7(self):
"""Test that LCA on null graph bails."""
pytest.raises(nx.NetworkXPointlessConcept,
all_pairs_lca,
nx.DiGraph())
def test_all_pairs_lowest_common_ancestor8(self):
"""Test that LCA on non-dags bails."""
pytest.raises(nx.NetworkXError, all_pairs_lca,
nx.DiGraph([(3, 4), (4, 3)]))
def test_all_pairs_lowest_common_ancestor9(self):
"""Test that it works on non-empty graphs with no LCAs."""
G = nx.DiGraph()
G.add_node(3)
ans = list(all_pairs_lca(G))
assert ans == [((3, 3), 3)]
def test_all_pairs_lowest_common_ancestor10(self):
"""Test that it bails on None as a node."""
G = nx.DiGraph([(None, 3)])
pytest.raises(nx.NetworkXError, all_pairs_lca, G)
pytest.raises(nx.NodeNotFound, all_pairs_lca,
self.DG, pairs=G.edges())
def test_lowest_common_ancestor1(self):
"""Test that the one-pair function works on default."""
G = nx.DiGraph([(0, 1), (2, 1)])
sentinel = object()
assert (nx.lowest_common_ancestor(G, 0, 2, default=sentinel) is
sentinel)
def test_lowest_common_ancestor2(self):
"""Test that the one-pair function works on identity."""
G = nx.DiGraph()
G.add_node(3)
assert nx.lowest_common_ancestor(G, 3, 3) == 3
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import logging
import operator
import os
import subprocess
import tempfile
from luigi import six
import luigi
import luigi.hadoop
from luigi.target import FileAlreadyExists, FileSystemTarget
from luigi.task import flatten
if six.PY3:
unicode = str
logger = logging.getLogger('luigi-interface')
class HiveCommandError(RuntimeError):
def __init__(self, message, out=None, err=None):
super(HiveCommandError, self).__init__(message, out, err)
self.message = message
self.out = out
self.err = err
def load_hive_cmd():
return luigi.configuration.get_config().get('hive', 'command', 'hive')
def get_hive_syntax():
return luigi.configuration.get_config().get('hive', 'release', 'cdh4')
def run_hive(args, check_return_code=True):
"""
Runs the `hive` from the command line, passing in the given args, and
returning stdout.
With the apache release of Hive, so of the table existence checks
(which are done using DESCRIBE do not exit with a return code of 0
so we need an option to ignore the return code and just return stdout for parsing
"""
cmd = [load_hive_cmd()] + args
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if check_return_code and p.returncode != 0:
raise HiveCommandError("Hive command: {0} failed with error code: {1}".format(" ".join(cmd), p.returncode),
stdout, stderr)
return stdout
def run_hive_cmd(hivecmd, check_return_code=True):
"""
Runs the given hive query and returns stdout.
"""
return run_hive(['-e', hivecmd], check_return_code)
def run_hive_script(script):
"""
Runs the contents of the given script in hive and returns stdout.
"""
if not os.path.isfile(script):
raise RuntimeError("Hive script: {0} does not exist.".format(script))
return run_hive(['-f', script])
@six.add_metaclass(abc.ABCMeta)
class HiveClient(object): # interface
@abc.abstractmethod
def table_location(self, table, database='default', partition=None):
"""
Returns location of db.table (or db.table.partition). partition is a dict of partition key to
value.
"""
pass
@abc.abstractmethod
def table_schema(self, table, database='default'):
"""
Returns list of [(name, type)] for each column in database.table.
"""
pass
@abc.abstractmethod
def table_exists(self, table, database='default', partition=None):
"""
Returns true if db.table (or db.table.partition) exists. partition is a dict of partition key to
value.
"""
pass
@abc.abstractmethod
def partition_spec(self, partition):
""" Turn a dict into a string partition specification """
pass
class HiveCommandClient(HiveClient):
"""
Uses `hive` invocations to find information.
"""
def table_location(self, table, database='default', partition=None):
cmd = "use {0}; describe formatted {1}".format(database, table)
if partition is not None:
cmd += " PARTITION ({0})".format(self.partition_spec(partition))
stdout = run_hive_cmd(cmd)
for line in stdout.split("\n"):
if "Location:" in line:
return line.split("\t")[1]
def table_exists(self, table, database='default', partition=None):
if partition is None:
stdout = run_hive_cmd('use {0}; show tables like "{1}";'.format(database, table))
return stdout and table.lower() in stdout
else:
stdout = run_hive_cmd("""use %s; show partitions %s partition
(%s)""" % (database, table, self.partition_spec(partition)))
if stdout:
return True
else:
return False
def table_schema(self, table, database='default'):
describe = run_hive_cmd("use {0}; describe {1}".format(database, table))
if not describe or "does not exist" in describe:
return None
return [tuple([x.strip() for x in line.strip().split("\t")]) for line in describe.strip().split("\n")]
def partition_spec(self, partition):
"""
Turns a dict into the a Hive partition specification string.
"""
return ','.join(["{0}='{1}'".format(k, v) for (k, v) in
sorted(six.iteritems(partition), key=operator.itemgetter(0))])
class ApacheHiveCommandClient(HiveCommandClient):
"""
A subclass for the HiveCommandClient to (in some cases) ignore the return code from
the hive command so that we can just parse the output.
"""
def table_schema(self, table, database='default'):
describe = run_hive_cmd("use {0}; describe {1}".format(database, table), False)
if not describe or "Table not found" in describe:
return None
return [tuple([x.strip() for x in line.strip().split("\t")]) for line in describe.strip().split("\n")]
class MetastoreClient(HiveClient):
def table_location(self, table, database='default', partition=None):
with HiveThriftContext() as client:
if partition is not None:
partition_str = self.partition_spec(partition)
thrift_table = client.get_partition_by_name(database, table, partition_str)
else:
thrift_table = client.get_table(database, table)
return thrift_table.sd.location
def table_exists(self, table, database='default', partition=None):
with HiveThriftContext() as client:
if partition is None:
return table in client.get_all_tables(database)
else:
return partition in self._existing_partitions(table, database, client)
def _existing_partitions(self, table, database, client):
def _parse_partition_string(partition_string):
partition_def = {}
for part in partition_string.split("/"):
name, value = part.split("=")
partition_def[name] = value
return partition_def
# -1 is max_parts, the # of partition names to return (-1 = unlimited)
partition_strings = client.get_partition_names(database, table, -1)
return [_parse_partition_string(existing_partition) for existing_partition in partition_strings]
def table_schema(self, table, database='default'):
with HiveThriftContext() as client:
return [(field_schema.name, field_schema.type) for field_schema in client.get_schema(database, table)]
def partition_spec(self, partition):
return "/".join("%s=%s" % (k, v) for (k, v) in sorted(six.iteritems(partition), key=operator.itemgetter(0)))
class HiveThriftContext(object):
"""
Context manager for hive metastore client.
"""
def __enter__(self):
try:
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
# Note that this will only work with a CDH release.
# This uses the thrift bindings generated by the ThriftHiveMetastore service in Beeswax.
# If using the Apache release of Hive this import will fail.
from hive_metastore import ThriftHiveMetastore
config = luigi.configuration.get_config()
host = config.get('hive', 'metastore_host')
port = config.getint('hive', 'metastore_port')
transport = TSocket.TSocket(host, port)
transport = TTransport.TBufferedTransport(transport)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
transport.open()
self.transport = transport
return ThriftHiveMetastore.Client(protocol)
except ImportError as e:
raise Exception('Could not import Hive thrift library:' + str(e))
def __exit__(self, exc_type, exc_val, exc_tb):
self.transport.close()
def get_default_client():
if get_hive_syntax() == "apache":
return ApacheHiveCommandClient()
else:
return HiveCommandClient()
client = get_default_client()
class HiveQueryTask(luigi.hadoop.BaseHadoopJobTask):
"""
Task to run a hive query.
"""
# by default, we let hive figure these out.
n_reduce_tasks = None
bytes_per_reducer = None
reducers_max = None
@abc.abstractmethod
def query(self):
""" Text of query to run in hive """
raise RuntimeError("Must implement query!")
def hiverc(self):
"""
Location of an rc file to run before the query
if hiverc-location key is specified in client.cfg, will default to the value there
otherwise returns None.
Returning a list of rc files will load all of them in order.
"""
return luigi.configuration.get_config().get('hive', 'hiverc-location', default=None)
def hiveconfs(self):
"""
Returns an dict of key=value settings to be passed along
to the hive command line via --hiveconf. By default, sets
mapred.job.name to task_id and if not None, sets:
* mapred.reduce.tasks (n_reduce_tasks)
* mapred.fairscheduler.pool (pool) or mapred.job.queue.name (pool)
* hive.exec.reducers.bytes.per.reducer (bytes_per_reducer)
* hive.exec.reducers.max (reducers_max)
"""
jcs = {}
jcs['mapred.job.name'] = self.task_id
if self.n_reduce_tasks is not None:
jcs['mapred.reduce.tasks'] = self.n_reduce_tasks
if self.pool is not None:
# Supporting two schedulers: fair (default) and capacity using the same option
scheduler_type = luigi.configuration.get_config().get('hadoop', 'scheduler', 'fair')
if scheduler_type == 'fair':
jcs['mapred.fairscheduler.pool'] = self.pool
elif scheduler_type == 'capacity':
jcs['mapred.job.queue.name'] = self.pool
if self.bytes_per_reducer is not None:
jcs['hive.exec.reducers.bytes.per.reducer'] = self.bytes_per_reducer
if self.reducers_max is not None:
jcs['hive.exec.reducers.max'] = self.reducers_max
return jcs
def job_runner(self):
return HiveQueryRunner()
class HiveQueryRunner(luigi.hadoop.JobRunner):
"""
Runs a HiveQueryTask by shelling out to hive.
"""
def prepare_outputs(self, job):
"""
Called before job is started.
If output is a `FileSystemTarget`, create parent directories so the hive command won't fail
"""
outputs = flatten(job.output())
for o in outputs:
if isinstance(o, FileSystemTarget):
parent_dir = os.path.dirname(o.path)
if parent_dir and not o.fs.exists(parent_dir):
logger.info("Creating parent directory %r", parent_dir)
try:
# there is a possible race condition
# which needs to be handled here
o.fs.mkdir(parent_dir)
except FileAlreadyExists:
pass
def run_job(self, job):
self.prepare_outputs(job)
with tempfile.NamedTemporaryFile() as f:
query = job.query()
if isinstance(query, unicode):
query = query.encode('utf8')
f.write(query)
f.flush()
arglist = [load_hive_cmd(), '-f', f.name]
hiverc = job.hiverc()
if hiverc:
if isinstance(hiverc, str):
hiverc = [hiverc]
for rcfile in hiverc:
arglist += ['-i', rcfile]
if job.hiveconfs():
for k, v in six.iteritems(job.hiveconfs()):
arglist += ['--hiveconf', '{0}={1}'.format(k, v)]
logger.info(arglist)
return luigi.hadoop.run_and_track_hadoop_job(arglist)
class HiveTableTarget(luigi.Target):
"""
exists returns true if the table exists.
"""
def __init__(self, table, database='default', client=None):
self.database = database
self.table = table
self.hive_cmd = load_hive_cmd()
if client is None:
client = get_default_client()
self.client = client
def exists(self):
logger.debug("Checking Hive table '%s.%s' exists", self.database, self.table)
return self.client.table_exists(self.table, self.database)
@property
def path(self):
"""
Returns the path to this table in HDFS.
"""
location = self.client.table_location(self.table, self.database)
if not location:
raise Exception("Couldn't find location for table: {0}".format(str(self)))
return location
def open(self, mode):
return NotImplementedError("open() is not supported for HiveTableTarget")
class HivePartitionTarget(luigi.Target):
"""
exists returns true if the table's partition exists.
"""
def __init__(self, table, partition, database='default', fail_missing_table=True, client=None):
self.database = database
self.table = table
self.partition = partition
if client is None:
client = get_default_client()
self.client = client
self.fail_missing_table = fail_missing_table
def exists(self):
try:
logger.debug("Checking Hive table '{d}.{t}' for partition {p}".format(d=self.database, t=self.table, p=str(self.partition)))
return self.client.table_exists(self.table, self.database, self.partition)
except HiveCommandError:
if self.fail_missing_table:
raise
else:
if self.client.table_exists(self.table, self.database):
# a real error occurred
raise
else:
# oh the table just doesn't exist
return False
@property
def path(self):
"""
Returns the path for this HiveTablePartitionTarget's data.
"""
location = self.client.table_location(self.table, self.database, self.partition)
if not location:
raise Exception("Couldn't find location for table: {0}".format(str(self)))
return location
def open(self, mode):
return NotImplementedError("open() is not supported for HivePartitionTarget")
class ExternalHiveTask(luigi.ExternalTask):
"""
External task that depends on a Hive table/partition.
"""
database = luigi.Parameter(default='default')
table = luigi.Parameter()
# since this is an external task and will never be initialized from the CLI, partition can be any python object, in this case a dictionary
partition = luigi.Parameter(default=None, description='Python dictionary specifying the target partition e.g. {"date": "2013-01-25"}')
def output(self):
if self.partition is not None:
assert self.partition, "partition required"
return HivePartitionTarget(table=self.table,
partition=self.partition,
database=self.database)
else:
return HiveTableTarget(self.table, self.database)
|
|
# Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 OpenStack Foundation
# Copyright 2012 Grid Dynamics
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base utilities to build API operation managers and objects on top of.
"""
########################################################################
#
# THIS MODULE IS DEPRECATED
#
# Please refer to
# https://etherpad.openstack.org/p/kilo-oslo-library-proposals for
# the discussion leading to this deprecation.
#
# We recommend checking out the python-openstacksdk project
# (https://launchpad.net/python-openstacksdk) instead.
#
########################################################################
# E1102: %s is not callable
# pylint: disable=E1102
import abc
import copy
from oslo_utils import strutils
import six
from six.moves.urllib import parse
from openstack.common._i18n import _
from openstack.common.apiclient import exceptions
def getid(obj):
"""Return id if argument is a Resource.
Abstracts the common pattern of allowing both an object or an object's ID
(UUID) as a parameter when dealing with relationships.
"""
try:
if obj.uuid:
return obj.uuid
except AttributeError:
pass
try:
return obj.id
except AttributeError:
return obj
# TODO(aababilov): call run_hooks() in HookableMixin's child classes
class HookableMixin(object):
"""Mixin so classes can register and run hooks."""
_hooks_map = {}
@classmethod
def add_hook(cls, hook_type, hook_func):
"""Add a new hook of specified type.
:param cls: class that registers hooks
:param hook_type: hook type, e.g., '__pre_parse_args__'
:param hook_func: hook function
"""
if hook_type not in cls._hooks_map:
cls._hooks_map[hook_type] = []
cls._hooks_map[hook_type].append(hook_func)
@classmethod
def run_hooks(cls, hook_type, *args, **kwargs):
"""Run all hooks of specified type.
:param cls: class that registers hooks
:param hook_type: hook type, e.g., '__pre_parse_args__'
:param args: args to be passed to every hook function
:param kwargs: kwargs to be passed to every hook function
"""
hook_funcs = cls._hooks_map.get(hook_type) or []
for hook_func in hook_funcs:
hook_func(*args, **kwargs)
class BaseManager(HookableMixin):
"""Basic manager type providing common operations.
Managers interact with a particular type of API (servers, flavors, images,
etc.) and provide CRUD operations for them.
"""
resource_class = None
def __init__(self, client):
"""Initializes BaseManager with `client`.
:param client: instance of BaseClient descendant for HTTP requests
"""
super(BaseManager, self).__init__()
self.client = client
def _list(self, url, response_key=None, obj_class=None, json=None):
"""List the collection.
:param url: a partial URL, e.g., '/servers'
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'. If response_key is None - all response body
will be used.
:param obj_class: class for constructing the returned objects
(self.resource_class will be used by default)
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
"""
if json:
body = self.client.post(url, json=json).json()
else:
body = self.client.get(url).json()
if obj_class is None:
obj_class = self.resource_class
data = body[response_key] if response_key is not None else body
# NOTE(ja): keystone returns values as list as {'values': [ ... ]}
# unlike other services which just return the list...
try:
data = data['values']
except (KeyError, TypeError):
pass
return [obj_class(self, res, loaded=True) for res in data if res]
def _get(self, url, response_key=None):
"""Get an object from collection.
:param url: a partial URL, e.g., '/servers'
:param response_key: the key to be looked up in response dictionary,
e.g., 'server'. If response_key is None - all response body
will be used.
"""
body = self.client.get(url).json()
data = body[response_key] if response_key is not None else body
return self.resource_class(self, data, loaded=True)
def _head(self, url):
"""Retrieve request headers for an object.
:param url: a partial URL, e.g., '/servers'
"""
resp = self.client.head(url)
return resp.status_code == 204
def _post(self, url, json, response_key=None, return_raw=False):
"""Create an object.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'server'. If response_key is None - all response body
will be used.
:param return_raw: flag to force returning raw JSON instead of
Python object of self.resource_class
"""
body = self.client.post(url, json=json).json()
data = body[response_key] if response_key is not None else body
if return_raw:
return data
return self.resource_class(self, data)
def _put(self, url, json=None, response_key=None):
"""Update an object with PUT method.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'. If response_key is None - all response body
will be used.
"""
resp = self.client.put(url, json=json)
# PUT requests may not return a body
if resp.content:
body = resp.json()
if response_key is not None:
return self.resource_class(self, body[response_key])
else:
return self.resource_class(self, body)
def _patch(self, url, json=None, response_key=None):
"""Update an object with PATCH method.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'. If response_key is None - all response body
will be used.
"""
body = self.client.patch(url, json=json).json()
if response_key is not None:
return self.resource_class(self, body[response_key])
else:
return self.resource_class(self, body)
def _delete(self, url):
"""Delete an object.
:param url: a partial URL, e.g., '/servers/my-server'
"""
return self.client.delete(url)
@six.add_metaclass(abc.ABCMeta)
class ManagerWithFind(BaseManager):
"""Manager with additional `find()`/`findall()` methods."""
@abc.abstractmethod
def list(self):
pass
def find(self, **kwargs):
"""Find a single item with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
matches = self.findall(**kwargs)
num_matches = len(matches)
if num_matches == 0:
msg = _("No %(name)s matching %(args)s.") % {
'name': self.resource_class.__name__,
'args': kwargs
}
raise exceptions.NotFound(msg)
elif num_matches > 1:
raise exceptions.NoUniqueMatch()
else:
return matches[0]
def findall(self, **kwargs):
"""Find all items with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
found = []
searches = kwargs.items()
for obj in self.list():
try:
if all(getattr(obj, attr) == value
for (attr, value) in searches):
found.append(obj)
except AttributeError:
continue
return found
class CrudManager(BaseManager):
"""Base manager class for manipulating entities.
Children of this class are expected to define a `collection_key` and `key`.
- `collection_key`: Usually a plural noun by convention (e.g. `entities`);
used to refer collections in both URL's (e.g. `/v3/entities`) and JSON
objects containing a list of member resources (e.g. `{'entities': [{},
{}, {}]}`).
- `key`: Usually a singular noun by convention (e.g. `entity`); used to
refer to an individual member of the collection.
"""
collection_key = None
key = None
def build_url(self, base_url=None, **kwargs):
"""Builds a resource URL for the given kwargs.
Given an example collection where `collection_key = 'entities'` and
`key = 'entity'`, the following URL's could be generated.
By default, the URL will represent a collection of entities, e.g.::
/entities
If kwargs contains an `entity_id`, then the URL will represent a
specific member, e.g.::
/entities/{entity_id}
:param base_url: if provided, the generated URL will be appended to it
"""
url = base_url if base_url is not None else ''
url += '/%s' % self.collection_key
# do we have a specific entity?
entity_id = kwargs.get('%s_id' % self.key)
if entity_id is not None:
url += '/%s' % entity_id
return url
def _filter_kwargs(self, kwargs):
"""Drop null values and handle ids."""
for key, ref in six.iteritems(kwargs.copy()):
if ref is None:
kwargs.pop(key)
else:
if isinstance(ref, Resource):
kwargs.pop(key)
kwargs['%s_id' % key] = getid(ref)
return kwargs
def create(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._post(
self.build_url(**kwargs),
{self.key: kwargs},
self.key)
def get(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._get(
self.build_url(**kwargs),
self.key)
def head(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._head(self.build_url(**kwargs))
def list(self, base_url=None, **kwargs):
"""List the collection.
:param base_url: if provided, the generated URL will be appended to it
"""
kwargs = self._filter_kwargs(kwargs)
return self._list(
'%(base_url)s%(query)s' % {
'base_url': self.build_url(base_url=base_url, **kwargs),
'query': '?%s' % parse.urlencode(kwargs) if kwargs else '',
},
self.collection_key)
def put(self, base_url=None, **kwargs):
"""Update an element.
:param base_url: if provided, the generated URL will be appended to it
"""
kwargs = self._filter_kwargs(kwargs)
return self._put(self.build_url(base_url=base_url, **kwargs))
def update(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
params = kwargs.copy()
params.pop('%s_id' % self.key)
return self._patch(
self.build_url(**kwargs),
{self.key: params},
self.key)
def delete(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._delete(
self.build_url(**kwargs))
def find(self, base_url=None, **kwargs):
"""Find a single item with attributes matching ``**kwargs``.
:param base_url: if provided, the generated URL will be appended to it
"""
kwargs = self._filter_kwargs(kwargs)
rl = self._list(
'%(base_url)s%(query)s' % {
'base_url': self.build_url(base_url=base_url, **kwargs),
'query': '?%s' % parse.urlencode(kwargs) if kwargs else '',
},
self.collection_key)
num = len(rl)
if num == 0:
msg = _("No %(name)s matching %(args)s.") % {
'name': self.resource_class.__name__,
'args': kwargs
}
raise exceptions.NotFound(404, msg)
elif num > 1:
raise exceptions.NoUniqueMatch
else:
return rl[0]
class Extension(HookableMixin):
"""Extension descriptor."""
SUPPORTED_HOOKS = ('__pre_parse_args__', '__post_parse_args__')
manager_class = None
def __init__(self, name, module):
super(Extension, self).__init__()
self.name = name
self.module = module
self._parse_extension_module()
def _parse_extension_module(self):
self.manager_class = None
for attr_name, attr_value in self.module.__dict__.items():
if attr_name in self.SUPPORTED_HOOKS:
self.add_hook(attr_name, attr_value)
else:
try:
if issubclass(attr_value, BaseManager):
self.manager_class = attr_value
except TypeError:
pass
def __repr__(self):
return "<Extension '%s'>" % self.name
class Resource(object):
"""Base class for OpenStack resources (tenant, user, etc.).
This is pretty much just a bag for attributes.
"""
HUMAN_ID = False
NAME_ATTR = 'name'
def __init__(self, manager, info, loaded=False):
"""Populate and bind to a manager.
:param manager: BaseManager object
:param info: dictionary representing resource attributes
:param loaded: prevent lazy-loading if set to True
"""
self.manager = manager
self._info = info
self._add_details(info)
self._loaded = loaded
def __repr__(self):
reprkeys = sorted(k
for k in self.__dict__.keys()
if k[0] != '_' and k != 'manager')
info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys)
return "<%s %s>" % (self.__class__.__name__, info)
@property
def human_id(self):
"""Human-readable ID which can be used for bash completion.
"""
if self.HUMAN_ID:
name = getattr(self, self.NAME_ATTR, None)
if name is not None:
return strutils.to_slug(name)
return None
def _add_details(self, info):
for (k, v) in six.iteritems(info):
try:
setattr(self, k, v)
self._info[k] = v
except AttributeError:
# In this case we already defined the attribute on the class
pass
def __getattr__(self, k):
if k not in self.__dict__:
# NOTE(bcwaldon): disallow lazy-loading if already loaded once
if not self.is_loaded():
self.get()
return self.__getattr__(k)
raise AttributeError(k)
else:
return self.__dict__[k]
def get(self):
"""Support for lazy loading details.
Some clients, such as novaclient have the option to lazy load the
details, details which can be loaded with this function.
"""
# set_loaded() first ... so if we have to bail, we know we tried.
self.set_loaded(True)
if not hasattr(self.manager, 'get'):
return
new = self.manager.get(self.id)
if new:
self._add_details(new._info)
self._add_details(
{'x_request_id': self.manager.client.last_request_id})
def __eq__(self, other):
if not isinstance(other, Resource):
return NotImplemented
# two resources of different types are not equal
if not isinstance(other, self.__class__):
return False
if hasattr(self, 'id') and hasattr(other, 'id'):
return self.id == other.id
return self._info == other._info
def is_loaded(self):
return self._loaded
def set_loaded(self, val):
self._loaded = val
def to_dict(self):
return copy.deepcopy(self._info)
|
|
'''
/******************************************************************
*
* Copyright 2018 Samsung Electronics All Rights Reserved.
*
*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
******************************************************************/
'''
import re
import os
import operator
from itertools import takewhile
from ite.config import *
from ite.util import *
class RobotTCAnalyzer:
FILE_PATTERN = re.compile(r'(?P<testsuite>\w+)\.\w+')
TESTSUITE_PATTERN = re.compile(r'(?P<service>\w+)_(?P<testarea>\w+)_(?P<module>\w+)_(?P<objective>\w+)')
TC_HEADER_PATTERN = re.compile(r'\*\*\* Test Cases \*\*\*', re.DOTALL)
SECTION_PATTERN = re.compile(r'\*\*\*.+\*\*\*', re.DOTALL)
TC_NAME_PATTERN = re.compile(r'^(?P<testcase>\w+.*)')
DOCUMENTATION_PATTERN = re.compile(r'\[Documentation\]')
TAG_PATTERN = re.compile(r'\[Tags\]')
def count_file_bytes(self, iterable):
file_bytes = 0
for item in iterable:
file_bytes += len(item)
yield file_bytes
def get_line_number(self, path, pos):
file = open_read_file(path);
if file == None:
return 0
line = 1+len(list(takewhile(lambda x: x <= pos, self.count_file_bytes(file))))
file.close()
return line
def get_testsuite_info(self, path):
match = self.FILE_PATTERN.search(path)
if match == None:
return None, None, None
testsuite = match.group('testsuite')
match = self.TESTSUITE_PATTERN.search(testsuite)
if match == None:
return None, None, None
module = match.group('module')
objective = match.group('objective')
platform = ''
for candidate in TEST_PLATFORM:
if candidate.lower() in objective.lower():
platform = candidate.upper()
break;
return testsuite, platform, module
def get_testcase_section(self, source):
header_match = self.TC_HEADER_PATTERN.search(source)
if header_match == None:
return ''
header_pos = header_match.end()
testcase_section = source[header_pos:]
end_match = self.SECTION_PATTERN.search(testcase_section)
if end_match != None:
testcase_section = testcase_section[:end_match.start()]
return testcase_section, header_pos
def get_comments_area(self, tc_area):
doc_match = self.DOCUMENTATION_PATTERN.search(tc_area)
tag_match = self.TAG_PATTERN.search(tc_area)
if doc_match == None:
return '', ''
comments = tc_area[doc_match.start():]
tag = ''
if not tag_match == None:
comments = tc_area[doc_match.start():tag_match.start()]
tag = tc_area[tag_match.end():].splitlines()[0].strip()
return comments.strip(), tag.strip()
def get_tc_info(self, path):
source = read_file(path)
if source == '':
return
testsuite, platform, module = self.get_testsuite_info(path)
testcase_section, pos = self.get_testcase_section(source)
comments = ''
comment = ''
testcase = ''
line = 0
line_count = self.get_line_number(path, pos)
tag = ''
for strline in testcase_section.splitlines():
line_count += 1
match = self.TC_NAME_PATTERN.search(strline)
if match == None:
comments += strline + '\n'
continue
if not testcase == '':
comment, tag = self.get_comments_area(comments)
yield line, platform, module, testsuite, testcase, comment, tag
testcase = strline
line = line_count - 1
comments = ''
comment, tag = self.get_comments_area(comments)
yield line, platform, module, testsuite, testcase, comment, tag
def analyze_tc_file(self, path):
if (os.path.isfile(path) == False):
return
print("### Start to analyze test case file: " + path)
test_data = list()
invalid_tc = list()
for line, platform, module, suite, name, comments, tag in self.get_tc_info(path):
#print("line: %d, platform: %s, moudle: %s, testsuite: %s, testcase: %s, tag: %s, comments: %s" %
# (line, platform, module, suite, name, tag, comments))
spec = TestSpec(line, suite, name)
success, message = spec.check_tc_naming_rule()
if (success == False):
invalid_tc.append((line, suite, name, message))
continue
if len(comments) == 0:
invalid_tc.append((line, suite, name, 'Cannot Find TC Comments'))
continue
if len(tag) == 0:
invalid_tc.append((line, suite, name, 'Cannot Find TC Tag'))
continue
if not 'positive' in tag.lower() and not 'negative' in tag.lower():
invalid_tc.append((line, suite, name, 'TC Tag do not include (Positive|Negative) Category: ' + tag))
continue
success, message = spec.parse_comment(comments)
if (success == False):
invalid_tc.append((line, suite, name, message))
continue
if not platform in TEST_PLATFORM:
invalid_tc.append((line, suite, name, 'Invalid Platform Definition: ' + platform))
continue
test_data.append((platform, module, spec))
#print("test spec: " + spec.to_string())
return test_data, invalid_tc
class TestSpec:
COMMENT_PATTERN = re.compile('\|(?P<tag>.*)\|(?P<description>.*)\|')
def __init__(self, line, suite, name):
self.line = line
self.suite = suite
self.name = name
for tag in list(TAG_DIC):
self.__dict__[tag] = ''
def to_string(self):
result = "%s.%s(line: %d)\n" %(self.suite, self.name, self.line)
for key, title in sorted(TAG_DIC.items(), key=operator.itemgetter(1)):
if (self.__dict__[key] == None or title[1] == ''):
continue
result += "\t%-15s : %s\n" %(title[1], self.__dict__[key].replace("\n", "\t\n "))
return result
def check_tc_naming_rule(self):
if (self.suite == '' or self.name == ''):
return False, 'Empty Test Suite Name or Empty Test Case Name'
return True, '';
def parse_comment(self, comments):
tag = ''
description = ''
prev_tag = ''
for match in self.COMMENT_PATTERN.finditer(comments):
tag = match.group('tag').strip()
description = match.group('description').strip()
if tag == '':
tag = prev_tag
if (tag in self.__dict__):
self.__dict__[tag] = '\n'.join([self.__dict__[tag], description]).strip()
prev_tag = tag
return True, ''
|
|
#!/usr/bin/env python
"""
map.py
State Estimation and Analysis for PYthon
Utilities for dealing with basemap plotting. These routnes are simply
abstractions over the existing basemap to make it quicker for generating
basemap plots and figures.
Examples
-------
Assume you have longitude, latitude, and sst values:
>>> m=seapy.mapping.map(llcrnrlon=lon[0,0],llcrnrlat=lat[0,0],
>>> urcrnrlon=lon[-1,-1],urcrnrlat=lat[-1,-1],dlat=2,dlon=2)
>>> m.pcolormesh(lon,lat,sst,vmin=22,vmax=26,cmap=plt.cm.bwr)
>>> m.land()
>>> m.colorbar(label="Sea Surface Temp [$^\circ$C]",cticks=[22,23,24,25,26])
>>> m.ax.patch.set_facecolor("aqua")
>>> m.ax.patch.set_alpha(1)
>>> m.fig.patch.set_alpha(0.0)
>>> m.fig.savefig("sst.png",dpi=100)
Written by Brian Powell on 9/4/14
Copyright (c)2017 University of Hawaii under the MIT-License.
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from seapy.model import asgrid
def gen_coastline(lon, lat, bathy, depth=0):
"""
Given lon, lat, and bathymetry, generate vectors of line segments
of the coastline. This can be exported to matlab (via savemat) to be
used with the 'editmask' routine for creating grid masks.
Input
-----
lon : array,
longitudes of bathymetry locations
lat : array,
latitudes of bathymetry locations
bathy : array,
bathymetry (negative for ocean, positive for land) values
depth : float,
depth to use as the definition of the coast
Returns
-------
lon : ndarray,
vector of coastlines, separated by nan (matlab-style)
lat : ndarray,
vector of coastlines, separated by nan (matlab-style)
"""
CS = plt.contour(lon, lat, bathy, [depth - 0.25, depth + 0.25])
lon = list()
lat = list()
for col in CS.collections:
for path in col.get_paths():
lon.append(path.vertices[:, 0])
lon.append(np.nan)
lat.append(path.vertices[:, 1])
lat.append(np.nan)
return (np.hstack(lon), np.hstack(lat))
class map(object):
def __init__(self, grid=None, llcrnrlon=-180, llcrnrlat=-40, urcrnrlon=180,
urcrnrlat=40, proj='lcc', resolution='c', figsize=(8., 6.),
dlat=1, dlon=2, fig=None, ax=None, fill_color="aqua"):
"""
map class for abstracting the basemap methods for quick and easy creation
of geographically referenced data figures
Parameters
----------
grid: seapy.model.grid or string, optional:
grid to use to define boundaries
llcrnrlon: float, optional
longitude of lower, left corner
llcrnrlat: float, optional
latitude of lower, left corner
urcrnrlon: float, optional
longitude of upper, right corner
urcrnrlat: float, optional
latitude of upper, right corner
proj: string, optional
projection to use for map
resolution: character
resolution to use for coastline, etc. From Basemap:
'c' (crude), 'l' (low), 'i' (intermediate),
'h' (high), 'f' (full), or None
figsize: list, optional
dimensions to use for creation of figure
dlat: float, optional
interval to mark latitude lines (e.g., if dlat=0.5 every 0.5deg mark)
dlon: float, optional
interval to mark longitude lines (e.g., if dlon=0.5 every 0.5deg mark)
fig: matplotlib.pyplot.figure object, optional
If you want to plot on a pre-configured figure, pass the figure object
along with the axis object.
ax: matplotlib.pyplot.axis object, optional
If you want to plot on a pre-configured figure, pass the axis object
along with the figure object.
fill_color: string, optional
The color to use for the axis background
Returns
-------
None
"""
if grid is not None:
grid = asgrid(grid)
llcrnrlat = np.min(grid.lat_rho)
urcrnrlat = np.max(grid.lat_rho)
llcrnrlon = np.min(grid.lon_rho)
urcrnrlon = np.max(grid.lon_rho)
self.basemap = Basemap(llcrnrlon=llcrnrlon, llcrnrlat=llcrnrlat,
urcrnrlon=urcrnrlon, urcrnrlat=urcrnrlat,
projection=proj,
lat_0=urcrnrlat - (urcrnrlat - llcrnrlat) / 2.,
lon_0=urcrnrlon - (urcrnrlon - llcrnrlon) / 2.,
resolution=resolution, area_thresh=0.0, ax=ax)
self.figsize = figsize
self.dlon = dlon
self.dlat = dlat
self.fig = fig
self.ax = ax
self.fill_color = fill_color
reset = True if fig is None else False
self.new_figure(reset=reset)
def new_figure(self, fill_color=None, reset=False, dpi=150):
"""
Create or update a figure for plotting
Parameters
----------
fill_color: string, optional
Color to fill the background of the axes with
reset: bool, optional
Reset the figure
"""
if reset:
if self.ax:
self.ax.set_axis_off()
self.ax = None
if self.fig:
self.fig.clf()
self.fig = None
if self.fig is None or self.ax is None:
self.fig = plt.figure(figsize=self.figsize, dpi=dpi)
self.ax = self.fig.add_axes([-0.01, 0.25, 1.01, 0.7])
if fill_color is None:
fill_color = self.fill_color
self.basemap.drawmapboundary(fill_color=fill_color)
# Create the longitude lines
nticks = int((self.basemap.urcrnrlon - self.basemap.llcrnrlon) /
self.dlon)
md = np.mod(self.basemap.llcrnrlon, self.dlon)
if md:
slon = self.basemap.llcrnrlon + self.dlon - md
else:
slon = self.basemap.llcrnrlon
nticks += 1
lon_lines = np.arange(nticks) * self.dlon + slon
self.basemap.drawmeridians(lon_lines, color="0.5",
linewidth=0.25, dashes=[1, 1, 0.1, 1],
labels=[0, 0, 0, 1], fontsize=12)
# Create the latitude lines
nticks = int((self.basemap.urcrnrlat - self.basemap.llcrnrlat) /
self.dlat)
md = np.mod(self.basemap.llcrnrlat, self.dlat)
if md:
slat = self.basemap.llcrnrlat + self.dlat - md
else:
slat = self.basemap.llcrnrlat
nticks += 1
lat_lines = np.arange(nticks) * self.dlat + slat
self.basemap.drawparallels(lat_lines, color="0.5",
linewidth=0.25, dashes=[1, 1, 0.1, 1],
labels=[1, 0, 0, 0], fontsize=12)
def land(self, color="black"):
"""
Draw the land mask
Parameters
----------
color: string, optional
color to draw the mask with
"""
self.basemap.drawcoastlines()
self.basemap.drawcountries()
self.basemap.fillcontinents(color=color)
def zoom(self, xrange, yrange):
"""
zoom the figure to a specified lat, lon range
Parameters
----------
xrange: array
minimum and maximum longitudes to display
yrange: array
minimum and maximum latitudes to display
"""
x, y = self.basemap(xrange, yrange)
self.ax.set_xlim(x)
self.ax.set_ylim(y)
self.fig.canvas.draw()
def pcolormesh(self, lon, lat, data, **kwargs):
"""
pcolormesh field data onto our geographic plot
Parameters
----------
lon: array
Longitude field for data
lat: array
Latitude field for data
data: array
data to pcolor
**kwargs: arguments, optional
additional arguments to pass to pcolor
"""
# Pcolor requires a modification to the locations to line up with
# the geography
dlon = lon * 0
dlat = lat * 0
dlon[:, 0:-1] = lon[:, 1:] - lon[:, 0:-1]
dlat[0:-1, :] = lat[1:, :] - lat[0:-1, :]
x, y = self.basemap(lon - dlon * 0.5, lat - dlat * 0.5)
self.pc = self.ax.pcolormesh(x, y, data, **kwargs)
def scatter(self, lon, lat, data, **kwargs):
"""
scatter plot data onto our geographic plot
Parameters
----------
lon: array
Longitude field for data
lat: array
Latitude field for data
data: array
data to pcolor
**kwargs: arguments, optional
additional arguments to pass to pcolor
"""
x, y = self.basemap(lon, lat)
self.pc = self.ax.scatter(x, y, c=data, **kwargs)
def colorbar(self, label=None, cticks=None, **kwargs):
"""
Display a colorbar on the figure
Parameters
----------
label: string, optional
Colorbar label title
cticks: array, optional
Where to place the tick marks and values for the colorbar
**kwargs: arguments, optional
additional arguments to pass to colorbar
"""
self.cax = self.fig.add_axes([0.25, 0.16, 0.5, 0.03])
self.cb = plt.colorbar(self.pc, cax=self.cax, orientation="horizontal",
ticks=cticks, **kwargs)
self.basemap.set_axes_limits(ax=self.ax)
if label is not None:
self.cb.set_label(label)
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from hashlib import sha1
from six import string_types
from pants.backend.core.wrapped_globs import FilesetWithSpec
from pants.base.address import Addresses, SyntheticAddress
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TargetDefinitionException
from pants.base.fingerprint_strategy import DefaultFingerprintStrategy
from pants.base.hash_utils import hash_all
from pants.base.payload import Payload
from pants.base.payload_field import DeferredSourcesField, SourcesField
from pants.base.source_root import SourceRoot
from pants.base.target_addressable import TargetAddressable
from pants.base.validation import assert_list
class AbstractTarget(object):
@classmethod
def subsystems(cls):
"""The subsystems this target uses.
Targets always use the global subsystem instance. They have no notion of any other scope.
:return: A tuple of subsystem types.
"""
return tuple()
@property
def has_resources(self):
"""Returns True if the target has an associated set of Resources."""
return hasattr(self, 'resources') and self.resources
@property
def is_exported(self):
"""Returns True if the target provides an artifact exportable from the repo."""
# TODO(John Sirois): fixup predicate dipping down into details here.
return self.has_label('exportable') and self.provides
# DEPRECATED to be removed after 0.0.29
# do not use this method, use isinstance(..., JavaThriftLibrary) or a yet-to-be-defined mixin
@property
def is_thrift(self):
"""Returns True if the target has thrift IDL sources."""
return False
# DEPRECATED to be removed after 0.0.29
# do not use this method, use an isinstance check on a yet-to-be-defined mixin
@property
def is_jvm(self):
"""Returns True if the target produces jvm bytecode."""
return self.has_label('jvm')
# DEPRECATED to be removed after 0.0.29
# do not use this method, use an isinstance check on a yet-to-be-defined mixin
@property
def is_codegen(self):
"""Returns True if the target is a codegen target."""
return self.has_label('codegen')
# DEPRECATED to be removed after 0.0.29
# do not use this method, use an isinstance check on a yet-to-be-defined mixin
@property
def is_java(self):
"""Returns True if the target has or generates java sources."""
return self.has_label('java')
# DEPRECATED to be removed after 0.0.29
# do not use this method, use an isinstance check on a yet-to-be-defined mixin
@property
def is_python(self):
"""Returns True if the target has python sources."""
return self.has_label('python')
# DEPRECATED to be removed after 0.0.29
# do not use this method, use an isinstance check on a yet-to-be-defined mixin
@property
def is_scala(self):
"""Returns True if the target has scala sources."""
return self.has_label('scala')
# DEPRECATED to be removed after 0.0.29
# do not use this method, use an isinstance check on a yet-to-be-defined mixin
@property
def is_scalac_plugin(self):
"""Returns True if the target builds a scalac plugin."""
return self.has_label('scalac_plugin')
# DEPRECATED to be removed after 0.0.29
# do not use this method, use an isinstance check on a yet-to-be-defined mixin
@property
def is_test(self):
"""Returns True if the target is comprised of tests."""
return self.has_label('tests')
# DEPRECATED to be removed after 0.0.29
# do not use this method, use an isinstance check on a yet-to-be-defined mixin
@property
def is_android(self):
"""Returns True if the target is an android target."""
return self.has_label('android')
class Target(AbstractTarget):
"""The baseclass for all pants targets.
Handles registration of a target amongst all parsed targets as well as location of the target
parse context.
"""
class WrongNumberOfAddresses(Exception):
"""Internal error, too many elements in Addresses"""
class UnknownArguments(TargetDefinitionException):
"""Unknown keyword arguments supplied to Target."""
class IllegalArgument(TargetDefinitionException):
"""Argument that isn't allowed supplied to Target."""
LANG_DISCRIMINATORS = {
'java': lambda t: t.is_jvm,
'python': lambda t: t.is_python,
}
@classmethod
def lang_discriminator(cls, lang):
"""Returns a tuple of target predicates that select the given lang vs all other supported langs.
The left hand side accepts targets for the given language; the right hand side accepts
targets for all other supported languages.
"""
def is_other_lang(target):
for name, discriminator in cls.LANG_DISCRIMINATORS.items():
if name != lang and discriminator(target):
return True
return False
return (cls.LANG_DISCRIMINATORS[lang], is_other_lang)
@classmethod
def get_addressable_type(target_cls):
class ConcreteTargetAddressable(TargetAddressable):
@classmethod
def get_target_type(cls):
return target_cls
return ConcreteTargetAddressable
@property
def target_base(self):
""":returns: the source root path for this target."""
return SourceRoot.find(self)
@classmethod
def identify(cls, targets):
"""Generates an id for a set of targets."""
return cls.combine_ids(target.id for target in targets)
@classmethod
def maybe_readable_identify(cls, targets):
"""Generates an id for a set of targets.
If the set is a single target, just use that target's id."""
return cls.maybe_readable_combine_ids([target.id for target in targets])
@staticmethod
def combine_ids(ids):
"""Generates a combined id for a set of ids."""
return hash_all(sorted(ids)) # We sort so that the id isn't sensitive to order.
@classmethod
def maybe_readable_combine_ids(cls, ids):
"""Generates combined id for a set of ids, but if the set is a single id, just use that."""
ids = list(ids) # We can't len a generator.
return ids[0] if len(ids) == 1 else cls.combine_ids(ids)
def __init__(self, name, address, build_graph, payload=None, tags=None, description=None,
**kwargs):
"""
:param string name: The name of this target, which combined with this
build file defines the target address.
:param dependencies: Other targets that this target depends on.
:type dependencies: list of target specs
:param Address address: The Address that maps to this Target in the BuildGraph
:param BuildGraph build_graph: The BuildGraph that this Target lives within
:param Payload payload: The configuration encapsulated by this target. Also in charge of
most fingerprinting details.
:param iterable<string> tags: Arbitrary string tags that describe this target. Usable
by downstream/custom tasks for reasoning about build graph. NOT included in payloads
and thus not used in fingerprinting, thus not suitable for anything that affects how
a particular target is built.
:param string description: Human-readable description of this target.
"""
# dependencies is listed above; implementation hides in TargetAddressable
self.payload = payload or Payload()
self.payload.freeze()
self.name = name
self.address = address
self._tags = set(tags or [])
self._build_graph = build_graph
self.description = description
self.labels = set()
self._cached_fingerprint_map = {}
self._cached_transitive_fingerprint_map = {}
if kwargs:
error_message = '{target_type} received unknown arguments: {args}'
raise self.UnknownArguments(address.spec, error_message.format(
target_type=type(self).__name__,
args=''.join('\n {} = {}'.format(key, value) for key, value in kwargs.items())
))
@property
def tags(self):
return self._tags
@property
def num_chunking_units(self):
return max(1, len(self.sources_relative_to_buildroot()))
def assert_list(self, maybe_list, expected_type=string_types, key_arg=None):
return assert_list(maybe_list, expected_type, key_arg=key_arg,
raise_type=lambda msg: TargetDefinitionException(self, msg))
def compute_invalidation_hash(self, fingerprint_strategy=None):
"""
:param FingerprintStrategy fingerprint_strategy: optional fingerprint strategy to use to compute
the fingerprint of a target
:return: a fingerprint representing this target (no dependencies)
:rtype: string
"""
fingerprint_strategy = fingerprint_strategy or DefaultFingerprintStrategy()
return fingerprint_strategy.fingerprint_target(self)
def invalidation_hash(self, fingerprint_strategy=None):
fingerprint_strategy = fingerprint_strategy or DefaultFingerprintStrategy()
if fingerprint_strategy not in self._cached_fingerprint_map:
self._cached_fingerprint_map[fingerprint_strategy] = self.compute_invalidation_hash(fingerprint_strategy)
return self._cached_fingerprint_map[fingerprint_strategy]
def mark_extra_invalidation_hash_dirty(self):
pass
def mark_invalidation_hash_dirty(self):
self._cached_fingerprint_map = {}
self._cached_transitive_fingerprint_map = {}
self.mark_extra_invalidation_hash_dirty()
def transitive_invalidation_hash(self, fingerprint_strategy=None):
"""
:param FingerprintStrategy fingerprint_strategy: optional fingerprint strategy to use to compute
the fingerprint of a target
:return: A fingerprint representing this target and all of its dependencies.
The return value can be `None`, indicating that this target and all of its transitive dependencies
did not contribute to the fingerprint, according to the provided FingerprintStrategy.
:rtype: string
"""
fingerprint_strategy = fingerprint_strategy or DefaultFingerprintStrategy()
if fingerprint_strategy not in self._cached_transitive_fingerprint_map:
hasher = sha1()
def dep_hash_iter():
for dep in self.dependencies:
dep_hash = dep.transitive_invalidation_hash(fingerprint_strategy)
if dep_hash is not None:
yield dep_hash
dep_hashes = sorted(list(dep_hash_iter()))
for dep_hash in dep_hashes:
hasher.update(dep_hash)
target_hash = self.invalidation_hash(fingerprint_strategy)
if target_hash is None and not dep_hashes:
return None
dependencies_hash = hasher.hexdigest()[:12]
combined_hash = '{target_hash}.{deps_hash}'.format(target_hash=target_hash,
deps_hash=dependencies_hash)
self._cached_transitive_fingerprint_map[fingerprint_strategy] = combined_hash
return self._cached_transitive_fingerprint_map[fingerprint_strategy]
def mark_transitive_invalidation_hash_dirty(self):
self._cached_transitive_fingerprint_map = {}
self.mark_extra_transitive_invalidation_hash_dirty()
def mark_extra_transitive_invalidation_hash_dirty(self):
pass
def inject_dependency(self, dependency_address):
self._build_graph.inject_dependency(dependent=self.address, dependency=dependency_address)
def invalidate_dependee(dependee):
dependee.mark_transitive_invalidation_hash_dirty()
self._build_graph.walk_transitive_dependee_graph([self.address], work=invalidate_dependee)
def has_sources(self, extension=''):
"""
:param string extension: suffix of filenames to test for
:return: True if the target contains sources that match the optional extension suffix
:rtype: bool
"""
sources_field = self.payload.get_field('sources')
if sources_field:
return sources_field.has_sources(extension)
else:
return False
def sources_relative_to_buildroot(self):
if self.has_sources():
return self.payload.sources.relative_to_buildroot()
else:
return []
def sources_relative_to_source_root(self):
if self.has_sources():
abs_source_root = os.path.join(get_buildroot(), self.target_base)
for source in self.sources_relative_to_buildroot():
abs_source = os.path.join(get_buildroot(), source)
yield os.path.relpath(abs_source, abs_source_root)
def globs_relative_to_buildroot(self):
sources_field = self.payload.get_field('sources')
if sources_field:
return sources_field.filespec
@property
def derived_from(self):
"""Returns the target this target was derived from.
If this target was not derived from another, returns itself.
"""
return self._build_graph.get_derived_from(self.address)
@property
def derived_from_chain(self):
"""Returns all targets that this target was derived from.
If this target was not derived from another, returns an empty sequence.
"""
cur = self
while cur.derived_from is not cur:
cur = cur.derived_from
yield cur
@property
def concrete_derived_from(self):
"""Returns the concrete target this target was (directly or indirectly) derived from.
The returned target is guaranteed to not have been derived from any other target, and is thus
guaranteed to be a 'real' target from a BUILD file, not a programmatically injected target.
"""
return self._build_graph.get_concrete_derived_from(self.address)
@property
def traversable_specs(self):
"""
:return: specs referenced by this target to be injected into the build graph
:rtype: list of strings
"""
return []
@property
def traversable_dependency_specs(self):
"""
:return: specs representing dependencies of this target that will be injected to the build
graph and linked in the graph as dependencies of this target
:rtype: list of strings
"""
# To support DeferredSourcesField
for name, payload_field in self.payload.fields:
if isinstance(payload_field, DeferredSourcesField) and payload_field.address:
yield payload_field.address.spec
@property
def dependencies(self):
"""
:return: targets that this target depends on
:rtype: list of Target
"""
return [self._build_graph.get_target(dep_address)
for dep_address in self._build_graph.dependencies_of(self.address)]
@property
def dependents(self):
"""
:return: targets that depend on this target
:rtype: list of Target
"""
return [self._build_graph.get_target(dep_address)
for dep_address in self._build_graph.dependents_of(self.address)]
@property
def is_synthetic(self):
"""
:return: True if this target did not originate from a BUILD file.
"""
return self.concrete_derived_from.address != self.address
@property
def is_original(self):
"""Returns ``True`` if this target is derived from no other."""
return self.derived_from == self
@property
def id(self):
"""A unique identifier for the Target.
The generated id is safe for use as a path name on unix systems.
"""
return self.address.path_safe_spec
@property
def identifier(self):
"""A unique identifier for the Target.
The generated id is safe for use as a path name on unix systems.
"""
return self.id
def walk(self, work, predicate=None):
"""Walk of this target's dependency graph, DFS preorder traversal, visiting each node exactly
once.
If a predicate is supplied it will be used to test each target before handing the target to
work and descending. Work can return targets in which case these will be added to the walk
candidate set if not already walked.
:param work: Callable that takes a :py:class:`pants.base.target.Target`
as its single argument.
:param predicate: Callable that takes a :py:class:`pants.base.target.Target`
as its single argument and returns True if the target should passed to ``work``.
"""
if not callable(work):
raise ValueError('work must be callable but was {}'.format(work))
if predicate and not callable(predicate):
raise ValueError('predicate must be callable but was {}'.format(predicate))
self._build_graph.walk_transitive_dependency_graph([self.address], work, predicate)
def closure(self, bfs=False):
"""Returns this target's transitive dependencies.
The walk will be depth-first in preorder, or breadth first if bfs=True is specified.
"""
if bfs:
return self._build_graph.transitive_subgraph_of_addresses_bfs([self.address])
else:
return self._build_graph.transitive_subgraph_of_addresses([self.address])
# TODO(Eric Ayers) As of 2/5/2015 this call is DEPRECATED and should be removed soon
def add_labels(self, *label):
self.labels.update(label)
# TODO(Eric Ayers) As of 2/5/2015 this call is DEPRECATED and should be removed soon
def remove_label(self, label):
self.labels.remove(label)
# TODO(Eric Ayers) As of 2/5/2015 this call is DEPRECATED and should be removed soon
def has_label(self, label):
return label in self.labels
def __lt__(self, other):
return self.address < other.address
def __eq__(self, other):
return isinstance(other, Target) and self.address == other.address
def __hash__(self):
return hash(self.address)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
addr = self.address if hasattr(self, 'address') else 'address not yet set'
return "{}({})".format(type(self).__name__, addr)
def create_sources_field(self, sources, sources_rel_path, address=None, key_arg=None):
"""Factory method to create a SourcesField appropriate for the type of the sources object.
Note that this method is called before the call to Target.__init__ so don't expect fields to
be populated!
:return: a payload field object representing the sources parameter
:rtype: SourcesField
"""
if isinstance(sources, Addresses):
# Currently, this is only created by the result of from_target() which takes a single argument
if len(sources.addresses) != 1:
raise self.WrongNumberOfAddresses(
"Expected a single address to from_target() as argument to {spec}"
.format(spec=address.spec))
referenced_address = SyntheticAddress.parse(sources.addresses[0],
relative_to=sources.rel_path)
return DeferredSourcesField(ref_address=referenced_address)
elif isinstance(sources, FilesetWithSpec):
filespec = sources.filespec
else:
sources = sources or []
assert_list(sources, key_arg=key_arg)
filespec = {'globs' : [os.path.join(sources_rel_path, src) for src in (sources or [])]}
return SourcesField(sources=sources, sources_rel_path=sources_rel_path, filespec=filespec)
|
|
# Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to represent a DigitalOcean Virtual Machine object (Droplet).
"""
import json
import logging
import time
from perfkitbenchmarker import disk
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import linux_virtual_machine
from perfkitbenchmarker import virtual_machine
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.digitalocean import digitalocean_disk
from perfkitbenchmarker.providers.digitalocean import util
from perfkitbenchmarker import providers
FLAGS = flags.FLAGS
UBUNTU_IMAGE = 'ubuntu-14-04-x64'
# DigitalOcean sets up the root account with a temporary
# password that's set as expired, requiring it to be changed
# immediately. This breaks dpkg postinst scripts, for example
# running adduser will produce errors:
#
# # chfn -f 'RabbitMQ messaging server' rabbitmq
# You are required to change your password immediately (root enforced)
# chfn: PAM: Authentication token is no longer valid; new one required
#
# To avoid this, just disable the root password (we don't need it),
# and remove the forced expiration.
CLOUD_CONFIG_TEMPLATE = '''#cloud-config
users:
- name: {0}
ssh-authorized-keys:
- {1}
sudo: ['ALL=(ALL) NOPASSWD:ALL']
groups: sudo
shell: /bin/bash
runcmd:
- [ passwd, -l, root ]
- [ chage, -d, -1, -I, -1, -E, -1, -M, 999999, root ]
'''
# HTTP status codes for creation that should not be retried.
FATAL_CREATION_ERRORS = set([
422, # 'unprocessable_entity' such as invalid size or region.
])
# Default configuration for action status polling.
DEFAULT_ACTION_WAIT_SECONDS = 10
DEFAULT_ACTION_MAX_TRIES = 90
def GetErrorMessage(stdout):
"""Extract a message field from JSON output if present."""
try:
return json.loads(stdout)['message']
except (ValueError, KeyError):
return stdout
class DigitalOceanVirtualMachine(virtual_machine.BaseVirtualMachine):
"""Object representing a DigitalOcean Virtual Machine (Droplet)."""
CLOUD = providers.DIGITALOCEAN
# Subclasses should override the default image.
DEFAULT_IMAGE = None
def __init__(self, vm_spec):
"""Initialize a DigitalOcean virtual machine.
Args:
vm_spec: virtual_machine.BaseVirtualMachineSpec object of the vm.
"""
super(DigitalOceanVirtualMachine, self).__init__(vm_spec)
self.droplet_id = None
self.max_local_disks = 1
self.local_disk_counter = 0
self.image = self.image or self.DEFAULT_IMAGE
def _Create(self):
"""Create a DigitalOcean VM instance (droplet)."""
with open(self.ssh_public_key) as f:
public_key = f.read().rstrip('\n')
stdout, ret = util.RunCurlCommand(
'POST', 'droplets', {
'name': self.name,
'region': self.zone,
'size': self.machine_type,
'image': self.image,
'backups': False,
'ipv6': False,
'private_networking': True,
'ssh_keys': [],
'user_data': CLOUD_CONFIG_TEMPLATE.format(
self.user_name, public_key)
})
if ret != 0:
msg = GetErrorMessage(stdout)
if ret in FATAL_CREATION_ERRORS:
raise errors.Error('Creation request invalid, not retrying: %s' % msg)
raise errors.Resource.RetryableCreationError('Creation failed: %s' % msg)
response = json.loads(stdout)
self.droplet_id = response['droplet']['id']
# The freshly created droplet will be in a locked and unusable
# state for a while, and it cannot be deleted or modified in
# this state. Wait for the action to finish and check the
# reported result.
if not self._GetActionResult(response['links']['actions'][0]['id']):
raise errors.Resource.RetryableCreationError('Creation failed, see log.')
def _GetActionResult(self, action_id,
wait_seconds=DEFAULT_ACTION_WAIT_SECONDS,
max_tries=DEFAULT_ACTION_MAX_TRIES):
"""Wait until a VM action completes."""
for _ in xrange(max_tries):
time.sleep(wait_seconds)
stdout, ret = util.RunCurlCommand('GET', 'actions/%s' % action_id)
if ret != 0:
logging.warn('Unexpected action lookup failure.')
return False
response = json.loads(stdout)
status = response['action']['status']
logging.debug('action %d: status is "%s".', action_id, status)
if status == 'completed':
return True
elif status == 'errored':
return False
# If we get here, waiting timed out. Treat as failure.
logging.debug('action %d: timed out waiting.', action_id)
return False
@vm_util.Retry()
def _PostCreate(self):
"""Get the instance's data."""
stdout, _ = util.RunCurlCommand(
'GET', 'droplets/%s' % self.droplet_id)
response = json.loads(stdout)['droplet']
for interface in response['networks']['v4']:
if interface['type'] == 'public':
self.ip_address = interface['ip_address']
else:
self.internal_ip = interface['ip_address']
def _Delete(self):
"""Delete a DigitalOcean VM instance."""
stdout, ret = util.RunCurlCommand(
'DELETE', 'droplets/%s' % self.droplet_id)
if ret != 0:
if ret == 404:
return # Assume already deleted.
raise errors.Resource.RetryableDeletionError('Deletion failed: %s' %
GetErrorMessage(stdout))
# Get the droplet's actions so that we can look up the
# ID for the deletion just issued.
stdout, ret = util.RunCurlCommand(
'GET', 'droplets/%s/actions' % self.droplet_id)
if ret != 0:
# There's a race condition here - if the lookup fails, assume it's
# due to deletion already being complete. Don't raise an error in
# that case, the _Exists check should trigger retry if needed.
return
response = json.loads(stdout)['actions']
# Get the action ID for the 'destroy' action. This assumes there's only
# one of them, but AFAIK there can't be more since 'destroy' locks the VM.
destroy = [v for v in response if v['type'] == 'destroy'][0]
# Wait for completion. Actions are global objects, so the action
# status should still be retrievable after the VM got destroyed.
# We don't care about the result, let the _Exists check decide if we
# need to try again.
self._GetActionResult(destroy['id'])
def _Exists(self):
"""Returns true if the VM exists."""
_, ret = util.RunCurlCommand(
'GET', 'droplets/%s' % self.droplet_id,
suppress_warning=True)
if ret == 0:
return True
if ret == 404:
# Definitely doesn't exist.
return False
# Unknown status - assume it doesn't exist. TODO(klausw): retry?
return False
def CreateScratchDisk(self, disk_spec):
"""Create a VM's scratch disk.
Args:
disk_spec: virtual_machine.BaseDiskSpec object of the disk.
"""
if disk_spec.disk_type != disk.STANDARD:
# TODO(klausw): support type BOOT_DISK once that's implemented.
raise errors.Error('DigitalOcean does not support disk type %s.' %
disk_spec.disk_type)
if self.scratch_disks:
# We have a "disk" already, don't add more. TODO(klausw): permit
# multiple creation for type BOOT_DISK once that's implemented.
raise errors.Error('DigitalOcean does not support multiple disks.')
# Just create a local directory at the specified path, don't mount
# anything.
self.RemoteCommand('sudo mkdir -p {0} && sudo chown -R $USER:$USER {0}'
.format(disk_spec.mount_point))
self.scratch_disks.append(digitalocean_disk.DigitalOceanDisk(disk_spec))
class ContainerizedDigitalOceanVirtualMachine(
DigitalOceanVirtualMachine,
linux_virtual_machine.ContainerizedDebianMixin):
DEFAULT_IMAGE = UBUNTU_IMAGE
class DebianBasedDigitalOceanVirtualMachine(DigitalOceanVirtualMachine,
linux_virtual_machine.DebianMixin):
DEFAULT_IMAGE = UBUNTU_IMAGE
class RhelBasedDigitalOceanVirtualMachine(DigitalOceanVirtualMachine,
linux_virtual_machine.RhelMixin):
pass
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A custom module for some common operations used by NASNet.
Functions exposed in this file:
- calc_reduction_layers
- get_channel_index
- get_channel_dim
- global_avg_pool
- factorized_reduction
- drop_path
Classes exposed in this file:
- NasNetABaseCell
- NasNetANormalCell
- NasNetAReductionCell
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
arg_scope = tf.contrib.framework.arg_scope
slim = tf.contrib.slim
DATA_FORMAT_NCHW = 'NCHW'
DATA_FORMAT_NHWC = 'NHWC'
INVALID = 'null'
def calc_reduction_layers(num_cells, num_reduction_layers):
"""Figure out what layers should have reductions."""
reduction_layers = []
for pool_num in range(1, num_reduction_layers + 1):
layer_num = (float(pool_num) / (num_reduction_layers + 1)) * num_cells
layer_num = int(layer_num)
reduction_layers.append(layer_num)
return reduction_layers
@tf.contrib.framework.add_arg_scope
def get_channel_index(data_format=INVALID):
assert data_format != INVALID
axis = 3 if data_format == 'NHWC' else 1
return axis
@tf.contrib.framework.add_arg_scope
def get_channel_dim(shape, data_format=INVALID):
assert data_format != INVALID
assert len(shape) == 4
if data_format == 'NHWC':
return int(shape[3])
elif data_format == 'NCHW':
return int(shape[1])
else:
raise ValueError('Not a valid data_format', data_format)
@tf.contrib.framework.add_arg_scope
def global_avg_pool(x, data_format=INVALID):
"""Average pool away the height and width spatial dimensions of x."""
assert data_format != INVALID
assert data_format in ['NHWC', 'NCHW']
assert x.shape.ndims == 4
if data_format == 'NHWC':
return tf.reduce_mean(x, [1, 2])
else:
return tf.reduce_mean(x, [2, 3])
@tf.contrib.framework.add_arg_scope
def factorized_reduction(net, output_filters, stride, data_format=INVALID):
"""Reduces the shape of net without information loss due to striding."""
assert data_format != INVALID
if stride == 1:
net = slim.conv2d(net, output_filters, 1, scope='path_conv')
net = slim.batch_norm(net, scope='path_bn')
return net
if data_format == 'NHWC':
stride_spec = [1, stride, stride, 1]
else:
stride_spec = [1, 1, stride, stride]
# Skip path 1
path1 = tf.nn.avg_pool(
net, [1, 1, 1, 1], stride_spec, 'VALID', data_format=data_format)
path1 = slim.conv2d(path1, int(output_filters / 2), 1, scope='path1_conv')
# Skip path 2
# First pad with 0's on the right and bottom, then shift the filter to
# include those 0's that were added.
if data_format == 'NHWC':
pad_arr = [[0, 0], [0, 1], [0, 1], [0, 0]]
path2 = tf.pad(net, pad_arr)[:, 1:, 1:, :]
concat_axis = 3
else:
pad_arr = [[0, 0], [0, 0], [0, 1], [0, 1]]
path2 = tf.pad(net, pad_arr)[:, :, 1:, 1:]
concat_axis = 1
path2 = tf.nn.avg_pool(
path2, [1, 1, 1, 1], stride_spec, 'VALID', data_format=data_format)
# If odd number of filters, add an additional one to the second path.
final_filter_size = int(output_filters / 2) + int(output_filters % 2)
path2 = slim.conv2d(path2, final_filter_size, 1, scope='path2_conv')
# Concat and apply BN
final_path = tf.concat(values=[path1, path2], axis=concat_axis)
final_path = slim.batch_norm(final_path, scope='final_path_bn')
return final_path
@tf.contrib.framework.add_arg_scope
def drop_path(net, keep_prob, is_training=True):
"""Drops out a whole example hiddenstate with the specified probability."""
if is_training:
batch_size = tf.shape(net)[0]
noise_shape = [batch_size, 1, 1, 1]
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape, dtype=tf.float32)
binary_tensor = tf.cast(tf.floor(random_tensor), net.dtype)
keep_prob_inv = tf.cast(1.0 / keep_prob, net.dtype)
net = net * keep_prob_inv * binary_tensor
return net
def _operation_to_filter_shape(operation):
splitted_operation = operation.split('x')
filter_shape = int(splitted_operation[0][-1])
assert filter_shape == int(
splitted_operation[1][0]), 'Rectangular filters not supported.'
return filter_shape
def _operation_to_num_layers(operation):
splitted_operation = operation.split('_')
if 'x' in splitted_operation[-1]:
return 1
return int(splitted_operation[-1])
def _operation_to_info(operation):
"""Takes in operation name and returns meta information.
An example would be 'separable_3x3_4' -> (3, 4).
Args:
operation: String that corresponds to convolution operation.
Returns:
Tuple of (filter shape, num layers).
"""
num_layers = _operation_to_num_layers(operation)
filter_shape = _operation_to_filter_shape(operation)
return num_layers, filter_shape
def _stacked_separable_conv(net, stride, operation, filter_size):
"""Takes in an operations and parses it to the correct sep operation."""
num_layers, kernel_size = _operation_to_info(operation)
for layer_num in range(num_layers - 1):
net = tf.nn.relu(net)
net = slim.separable_conv2d(
net,
filter_size,
kernel_size,
depth_multiplier=1,
scope='separable_{0}x{0}_{1}'.format(kernel_size, layer_num + 1),
stride=stride)
net = slim.batch_norm(
net, scope='bn_sep_{0}x{0}_{1}'.format(kernel_size, layer_num + 1))
stride = 1
net = tf.nn.relu(net)
net = slim.separable_conv2d(
net,
filter_size,
kernel_size,
depth_multiplier=1,
scope='separable_{0}x{0}_{1}'.format(kernel_size, num_layers),
stride=stride)
net = slim.batch_norm(
net, scope='bn_sep_{0}x{0}_{1}'.format(kernel_size, num_layers))
return net
def _operation_to_pooling_type(operation):
"""Takes in the operation string and returns the pooling type."""
splitted_operation = operation.split('_')
return splitted_operation[0]
def _operation_to_pooling_shape(operation):
"""Takes in the operation string and returns the pooling kernel shape."""
splitted_operation = operation.split('_')
shape = splitted_operation[-1]
assert 'x' in shape
filter_height, filter_width = shape.split('x')
assert filter_height == filter_width
return int(filter_height)
def _operation_to_pooling_info(operation):
"""Parses the pooling operation string to return its type and shape."""
pooling_type = _operation_to_pooling_type(operation)
pooling_shape = _operation_to_pooling_shape(operation)
return pooling_type, pooling_shape
def _pooling(net, stride, operation):
"""Parses operation and performs the correct pooling operation on net."""
padding = 'SAME'
pooling_type, pooling_shape = _operation_to_pooling_info(operation)
if pooling_type == 'avg':
net = slim.avg_pool2d(net, pooling_shape, stride=stride, padding=padding)
elif pooling_type == 'max':
net = slim.max_pool2d(net, pooling_shape, stride=stride, padding=padding)
else:
raise NotImplementedError('Unimplemented pooling type: ', pooling_type)
return net
class NasNetABaseCell(object):
"""NASNet Cell class that is used as a 'layer' in image architectures.
Args:
num_conv_filters: The number of filters for each convolution operation.
operations: List of operations that are performed in the NASNet Cell in
order.
used_hiddenstates: Binary array that signals if the hiddenstate was used
within the cell. This is used to determine what outputs of the cell
should be concatenated together.
hiddenstate_indices: Determines what hiddenstates should be combined
together with the specified operations to create the NASNet cell.
"""
def __init__(self, num_conv_filters, operations, used_hiddenstates,
hiddenstate_indices, drop_path_keep_prob, total_num_cells,
total_training_steps):
self._num_conv_filters = num_conv_filters
self._operations = operations
self._used_hiddenstates = used_hiddenstates
self._hiddenstate_indices = hiddenstate_indices
self._drop_path_keep_prob = drop_path_keep_prob
self._total_num_cells = total_num_cells
self._total_training_steps = total_training_steps
def _reduce_prev_layer(self, prev_layer, curr_layer):
"""Matches dimension of prev_layer to the curr_layer."""
# Set the prev layer to the current layer if it is none
if prev_layer is None:
return curr_layer
curr_num_filters = self._filter_size
prev_num_filters = get_channel_dim(prev_layer.shape)
curr_filter_shape = int(curr_layer.shape[2])
prev_filter_shape = int(prev_layer.shape[2])
if curr_filter_shape != prev_filter_shape:
prev_layer = tf.nn.relu(prev_layer)
prev_layer = factorized_reduction(
prev_layer, curr_num_filters, stride=2)
elif curr_num_filters != prev_num_filters:
prev_layer = tf.nn.relu(prev_layer)
prev_layer = slim.conv2d(
prev_layer, curr_num_filters, 1, scope='prev_1x1')
prev_layer = slim.batch_norm(prev_layer, scope='prev_bn')
return prev_layer
def _cell_base(self, net, prev_layer):
"""Runs the beginning of the conv cell before the predicted ops are run."""
num_filters = self._filter_size
# Check to be sure prev layer stuff is setup correctly
prev_layer = self._reduce_prev_layer(prev_layer, net)
net = tf.nn.relu(net)
net = slim.conv2d(net, num_filters, 1, scope='1x1')
net = slim.batch_norm(net, scope='beginning_bn')
split_axis = get_channel_index()
net = tf.split(axis=split_axis, num_or_size_splits=1, value=net)
for split in net:
assert int(split.shape[split_axis] == int(self._num_conv_filters *
self._filter_scaling))
net.append(prev_layer)
return net
def __call__(self, net, scope=None, filter_scaling=1, stride=1,
prev_layer=None, cell_num=-1, current_step=None):
"""Runs the conv cell."""
self._cell_num = cell_num
self._filter_scaling = filter_scaling
self._filter_size = int(self._num_conv_filters * filter_scaling)
i = 0
with tf.variable_scope(scope):
net = self._cell_base(net, prev_layer)
for iteration in range(5):
with tf.variable_scope('comb_iter_{}'.format(iteration)):
left_hiddenstate_idx, right_hiddenstate_idx = (
self._hiddenstate_indices[i],
self._hiddenstate_indices[i + 1])
original_input_left = left_hiddenstate_idx < 2
original_input_right = right_hiddenstate_idx < 2
h1 = net[left_hiddenstate_idx]
h2 = net[right_hiddenstate_idx]
operation_left = self._operations[i]
operation_right = self._operations[i+1]
i += 2
# Apply conv operations
with tf.variable_scope('left'):
h1 = self._apply_conv_operation(h1, operation_left,
stride, original_input_left,
current_step)
with tf.variable_scope('right'):
h2 = self._apply_conv_operation(h2, operation_right,
stride, original_input_right,
current_step)
# Combine hidden states using 'add'.
with tf.variable_scope('combine'):
h = h1 + h2
# Add hiddenstate to the list of hiddenstates we can choose from
net.append(h)
with tf.variable_scope('cell_output'):
net = self._combine_unused_states(net)
return net
def _apply_conv_operation(self, net, operation,
stride, is_from_original_input, current_step):
"""Applies the predicted conv operation to net."""
# Dont stride if this is not one of the original hiddenstates
if stride > 1 and not is_from_original_input:
stride = 1
input_filters = get_channel_dim(net.shape)
filter_size = self._filter_size
if 'separable' in operation:
net = _stacked_separable_conv(net, stride, operation, filter_size)
elif operation in ['none']:
# Check if a stride is needed, then use a strided 1x1 here
if stride > 1 or (input_filters != filter_size):
net = tf.nn.relu(net)
net = slim.conv2d(net, filter_size, 1, stride=stride, scope='1x1')
net = slim.batch_norm(net, scope='bn_1')
elif 'pool' in operation:
net = _pooling(net, stride, operation)
if input_filters != filter_size:
net = slim.conv2d(net, filter_size, 1, stride=1, scope='1x1')
net = slim.batch_norm(net, scope='bn_1')
else:
raise ValueError('Unimplemented operation', operation)
if operation != 'none':
net = self._apply_drop_path(net, current_step=current_step)
return net
def _combine_unused_states(self, net):
"""Concatenate the unused hidden states of the cell."""
used_hiddenstates = self._used_hiddenstates
final_height = int(net[-1].shape[2])
final_num_filters = get_channel_dim(net[-1].shape)
assert len(used_hiddenstates) == len(net)
for idx, used_h in enumerate(used_hiddenstates):
curr_height = int(net[idx].shape[2])
curr_num_filters = get_channel_dim(net[idx].shape)
# Determine if a reduction should be applied to make the number of
# filters match.
should_reduce = final_num_filters != curr_num_filters
should_reduce = (final_height != curr_height) or should_reduce
should_reduce = should_reduce and not used_h
if should_reduce:
stride = 2 if final_height != curr_height else 1
with tf.variable_scope('reduction_{}'.format(idx)):
net[idx] = factorized_reduction(
net[idx], final_num_filters, stride)
states_to_combine = (
[h for h, is_used in zip(net, used_hiddenstates) if not is_used])
# Return the concat of all the states
concat_axis = get_channel_index()
net = tf.concat(values=states_to_combine, axis=concat_axis)
return net
@tf.contrib.framework.add_arg_scope # No public API. For internal use only.
def _apply_drop_path(self, net, current_step=None,
use_summaries=False, drop_connect_version='v3'):
"""Apply drop_path regularization.
Args:
net: the Tensor that gets drop_path regularization applied.
current_step: a float32 Tensor with the current global_step value,
to be divided by hparams.total_training_steps. Usually None, which
defaults to tf.train.get_or_create_global_step() properly casted.
use_summaries: a Python boolean. If set to False, no summaries are output.
drop_connect_version: one of 'v1', 'v2', 'v3', controlling whether
the dropout rate is scaled by current_step (v1), layer (v2), or
both (v3, the default).
Returns:
The dropped-out value of `net`.
"""
drop_path_keep_prob = self._drop_path_keep_prob
if drop_path_keep_prob < 1.0:
assert drop_connect_version in ['v1', 'v2', 'v3']
if drop_connect_version in ['v2', 'v3']:
# Scale keep prob by layer number
assert self._cell_num != -1
# The added 2 is for the reduction cells
num_cells = self._total_num_cells
layer_ratio = (self._cell_num + 1)/float(num_cells)
if use_summaries:
with tf.device('/cpu:0'):
tf.summary.scalar('layer_ratio', layer_ratio)
drop_path_keep_prob = 1 - layer_ratio * (1 - drop_path_keep_prob)
if drop_connect_version in ['v1', 'v3']:
# Decrease the keep probability over time
if current_step is None:
current_step = tf.train.get_or_create_global_step()
current_step = tf.cast(current_step, tf.float32)
drop_path_burn_in_steps = self._total_training_steps
current_ratio = current_step / drop_path_burn_in_steps
current_ratio = tf.minimum(1.0, current_ratio)
if use_summaries:
with tf.device('/cpu:0'):
tf.summary.scalar('current_ratio', current_ratio)
drop_path_keep_prob = (1 - current_ratio * (1 - drop_path_keep_prob))
if use_summaries:
with tf.device('/cpu:0'):
tf.summary.scalar('drop_path_keep_prob', drop_path_keep_prob)
net = drop_path(net, drop_path_keep_prob)
return net
class NasNetANormalCell(NasNetABaseCell):
"""NASNetA Normal Cell."""
def __init__(self, num_conv_filters, drop_path_keep_prob, total_num_cells,
total_training_steps):
operations = ['separable_5x5_2',
'separable_3x3_2',
'separable_5x5_2',
'separable_3x3_2',
'avg_pool_3x3',
'none',
'avg_pool_3x3',
'avg_pool_3x3',
'separable_3x3_2',
'none']
used_hiddenstates = [1, 0, 0, 0, 0, 0, 0]
hiddenstate_indices = [0, 1, 1, 1, 0, 1, 1, 1, 0, 0]
super(NasNetANormalCell, self).__init__(num_conv_filters, operations,
used_hiddenstates,
hiddenstate_indices,
drop_path_keep_prob,
total_num_cells,
total_training_steps)
class NasNetAReductionCell(NasNetABaseCell):
"""NASNetA Reduction Cell."""
def __init__(self, num_conv_filters, drop_path_keep_prob, total_num_cells,
total_training_steps):
operations = ['separable_5x5_2',
'separable_7x7_2',
'max_pool_3x3',
'separable_7x7_2',
'avg_pool_3x3',
'separable_5x5_2',
'none',
'avg_pool_3x3',
'separable_3x3_2',
'max_pool_3x3']
used_hiddenstates = [1, 1, 1, 0, 0, 0, 0]
hiddenstate_indices = [0, 1, 0, 1, 0, 1, 3, 2, 2, 0]
super(NasNetAReductionCell, self).__init__(num_conv_filters, operations,
used_hiddenstates,
hiddenstate_indices,
drop_path_keep_prob,
total_num_cells,
total_training_steps)
|
|
import numpy as np
import os
import sys
import copy
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
sys.path.append(os.path.join(os.path.dirname(__file__),"../"))
from crowdsourcing.annotation_types.classification import *
def combine_dicts(ds):
v = {}
for d in ds:
for k in d:
v[k] = d[k]
return v
# Defining a useful suite of lesion study experiments and standardized plot styles across different annotation types
#-------------------------------------------------------
DEFAULT_PLOT_PARAMS = {'line-width':3, 'bar-color':'g', 'bar-width':0.8, 'axis-font-size':20, 'title-font-size':30, 'tick-font-size':16, 'legend-font-size':14}
PROB_WORKER_IMAGE_CV_ONLINE = {'name':'prob-worker-cv-online', 'line_style':'-', 'color':'r', 'use_computer_vision':True, 'online':True, 'simple_crowdsourcing':False, 'learn_worker_params':True, 'learn_image_params':True, 'naive_computer_vision':False, 'batch_size':1000, 'sort_method':'num_annos'}
PROB_WORKER_IMAGE_CV_NAIVE_ONLINE = {'name':'prob-worker-cv-naive-online', 'line_style':'-', 'color':'c', 'use_computer_vision':True, 'online':True, 'simple_crowdsourcing':False, 'learn_worker_params':True, 'learn_image_params':True, 'naive_computer_vision':True, 'batch_size':1000, 'sort_method':'num_annos'}
PROB_WORKER_IMAGE_ONLINE = {'name':'prob-worker-online', 'line_style':'-', 'color':'g', 'use_computer_vision':False, 'online':True, 'simple_crowdsourcing':False, 'learn_worker_params':True, 'learn_image_params':True, 'batch_size':1000, 'sort_method':'num_annos'}
PROB_ONLINE = {'name':'prob-online', 'line_style':'-', 'color':'b', 'use_computer_vision':False, 'online':True, 'simple_crowdsourcing':False, 'learn_worker_params':False, 'learn_image_params':False, 'batch_size':1000, 'sort_method':'num_annos'}
PROB_WORKER_IMAGE_CV_ONLINE_0005 = combine_dicts([PROB_WORKER_IMAGE_CV_ONLINE, {'name':'prob-worker-cv-online-.005', 'min_risk':0.005, 'color':'#FF0000'}])
PROB_WORKER_IMAGE_CV_ONLINE_001 = combine_dicts([PROB_WORKER_IMAGE_CV_ONLINE, {'name':'prob-worker-cv-online-.01', 'min_risk':0.01, 'color':'#BB0000'}])
PROB_WORKER_IMAGE_CV_ONLINE_002 = combine_dicts([PROB_WORKER_IMAGE_CV_ONLINE, {'name':'prob-worker-cv-online-.02', 'min_risk':0.02, 'color':'#770000'}])
PROB_WORKER_IMAGE_CV = {'name':'prob-worker-cv', 'line_style':'-.s', 'color':'r', 'use_computer_vision':True, 'online':False, 'simple_crowdsourcing':False, 'learn_worker_params':True, 'learn_image_params':True, 'naive_computer_vision':False, 'batch_size':1000, 'sort_method':'num_annos'}
PROB_WORKER_IMAGE = {'name':'prob-worker', 'line_style':'-.o', 'color':'g', 'use_computer_vision':False, 'online':False, 'simple_crowdsourcing':False, 'learn_worker_params':True, 'learn_image_params':True, 'batch_size':1000, 'sort_method':'num_annos'}
PROB_WORKER = {'name':'prob-worker-noim', 'line_style':'-.o', 'color':'k', 'use_computer_vision':False, 'online':False, 'simple_crowdsourcing':False, 'learn_worker_params':True, 'learn_image_params':False, 'batch_size':1000, 'sort_method':'num_annos'}
PROB = {'name':'prob', 'line_style':'-.*', 'color':'b', 'use_computer_vision':False, 'online':False, 'simple_crowdsourcing':False, 'learn_worker_params':False, 'learn_image_params':False, 'batch_size':1000, 'sort_method':'num_annos'}
SIMPLE_CROWDSOURCING = {'name':'majority-vote', 'line_style':'-.v', 'color':'m', 'use_computer_vision':False, 'online':False, 'simple_crowdsourcing':True, 'learn_worker_params':False, 'learn_image_params':False, 'batch_size':1000, 'sort_method':'num_annos'}
ALL_METHODS_NO_CV = [PROB_WORKER_IMAGE_ONLINE, PROB_ONLINE, PROB_WORKER_IMAGE, PROB_WORKER, PROB, SIMPLE_CROWDSOURCING]
ALL_PLOTS_NO_CV = [combine_dicts([{'title':'Method Comparison', 'name':'method_comparison_semilog', 'type':'semilog', 'legend':True, 'xlabel':'Avg Number of Human Workers Per Image', 'ylabel':'Error', 'methods':[{'name':PROB_WORKER_IMAGE_ONLINE['name'],'x':'num', 'y':'err'}, {'name':PROB_ONLINE['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER['name'],'x':'num', 'y':'err'}, {'name':PROB['name'],'x':'num', 'y':'err'}, {'name':SIMPLE_CROWDSOURCING['name'],'x':'num', 'y':'err'}]}, DEFAULT_PLOT_PARAMS]),
combine_dicts([{'title':'Method Comparison', 'name':'method_comparison_loglog', 'type':'loglog', 'legend':True, 'xlabel':'Avg Number of Human Workers Per Image #', 'ylabel':'Error', 'methods':[{'name':PROB_WORKER_IMAGE_ONLINE['name'],'x':'num', 'y':'err'}, {'name':PROB_ONLINE['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER['name'],'x':'num', 'y':'err'}, {'name':PROB['name'],'x':'num', 'y':'err'}, {'name':SIMPLE_CROWDSOURCING['name'],'x':'num', 'y':'err'}]}, DEFAULT_PLOT_PARAMS]),
combine_dicts([{'title':'Method Comparison', 'name':'method_comparison', 'type':'plot', 'legend':True, 'xlabel':'Avg Number of Human Workers Per Image', 'ylabel':'Error', 'methods':[{'name':PROB_WORKER_IMAGE_ONLINE['name'],'x':'num', 'y':'err'}, {'name':PROB_ONLINE['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER['name'],'x':'num', 'y':'err'}, {'name':PROB['name'],'x':'num', 'y':'err'}, {'name':SIMPLE_CROWDSOURCING['name'],'x':'num', 'y':'err'}]}, DEFAULT_PLOT_PARAMS]),
combine_dicts([{'title':'Risk Estimation', 'name':'risk_estimation_semilog', 'type':'semilog', 'legend':True, 'xlabel':'Avg Number of Human Workers Per Image', 'ylabel':'Error', 'methods':[{'name':PROB_WORKER_IMAGE['name'], 'x':'num', 'y':'err', 'title':'Actual Error', 'line_style':'b-'}, {'name':PROB_WORKER_IMAGE['name'], 'x':'num', 'y':'risk', 'title':'Estimated Error', 'line_style':'g-'}]}, DEFAULT_PLOT_PARAMS]),
combine_dicts([{'title':'Risk Estimation', 'name':'risk_estimation_loglog', 'type':'loglog', 'legend':True, 'xlabel':'Avg Number of Human Workers Per Image', 'ylabel':'Error', 'methods':[{'name':PROB_WORKER_IMAGE['name'], 'x':'num', 'y':'err', 'title':'Actual Error', 'line_style':'b-'}, {'name':PROB_WORKER_IMAGE['name'], 'x':'num', 'y':'risk', 'title':'Estimated Error', 'line_style':'g-'}]}, DEFAULT_PLOT_PARAMS]),
combine_dicts([{'title':'Risk Estimation', 'name':'risk_estimation', 'type':'plot', 'legend':True, 'xlabel':'Avg Number of Human Workers Per Image', 'ylabel':'Error', 'methods':[{'name':PROB_WORKER_IMAGE['name'], 'x':'num', 'y':'err', 'title':'Actual Error', 'line_style':'b-'}, {'name':PROB_WORKER_IMAGE['name'], 'x':'num', 'y':'risk', 'title':'Estimated Error', 'line_style':'g-'}]}, DEFAULT_PLOT_PARAMS]),
combine_dicts([{'title':'Worker Skill', 'name':'worker_skill', 'type':'skill', 'methods':[{'name':PROB_WORKER_IMAGE['name']}]}, DEFAULT_PLOT_PARAMS]),
combine_dicts([{'title':'Num Annotations', 'name':'num_annotations', 'type':'hist', 'xlabel':'Annotations Per Image', 'ylabel':'Image Count', 'methods':[{'name':PROB_WORKER_IMAGE_ONLINE['name'], 'x':'num_annos', 'y':'num_annos_bins'}]}, DEFAULT_PLOT_PARAMS])
]
ALL_METHODS = [PROB_WORKER_IMAGE_CV_ONLINE_002, PROB_WORKER_IMAGE_CV_ONLINE_001, PROB_WORKER_IMAGE_CV_ONLINE_0005, PROB_WORKER_IMAGE_CV_NAIVE_ONLINE, PROB_WORKER_IMAGE_ONLINE, PROB_ONLINE, PROB_WORKER_IMAGE_CV, PROB_WORKER_IMAGE, PROB, SIMPLE_CROWDSOURCING]
ALL_PLOTS = [combine_dicts([{'title':'Method Comparison', 'name':'method_comparison_semilog', 'type':'semilog', 'legend':True, 'xlabel':'Avg Number of Human Workers Per Image', 'ylabel':'Error', 'methods':[{'name':PROB_WORKER_IMAGE_CV_ONLINE_002['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE_CV_ONLINE_0005['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE_CV_NAIVE_ONLINE['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE_ONLINE['name'],'x':'num', 'y':'err'}, {'name':PROB_ONLINE['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE_CV['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE['name'],'x':'num', 'y':'err'}, {'name':PROB['name'],'x':'num', 'y':'err'}, {'name':SIMPLE_CROWDSOURCING['name'],'x':'num', 'y':'err'}]}, DEFAULT_PLOT_PARAMS]),
combine_dicts([{'title':'Method Comparison', 'name':'method_comparison_loglog', 'type':'loglog', 'legend':True, 'xlabel':'Avg Number of Human Workers Per Image', 'ylabel':'Error', 'methods':[{'name':PROB_WORKER_IMAGE_CV_ONLINE_002['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE_CV_ONLINE_0005['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE_CV_NAIVE_ONLINE['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE_ONLINE['name'],'x':'num', 'y':'err'}, {'name':PROB_ONLINE['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE_CV['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE['name'],'x':'num', 'y':'err'}, {'name':PROB['name'],'x':'num', 'y':'err'}, {'name':SIMPLE_CROWDSOURCING['name'],'x':'num', 'y':'err'}]}, DEFAULT_PLOT_PARAMS]),
combine_dicts([{'title':'Method Comparison', 'name':'method_comparison', 'type':'plot', 'legend':True, 'xlabel':'Avg Number of Human Workers Per Image', 'ylabel':'Error', 'methods':[{'name':PROB_WORKER_IMAGE_CV_ONLINE_002['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE_CV_ONLINE_0005['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE_ONLINE['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE_CV_NAIVE_ONLINE['name'],'x':'num', 'y':'err'}, {'name':PROB_ONLINE['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE_CV['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE['name'],'x':'num', 'y':'err'}, {'name':PROB['name'],'x':'num', 'y':'err'}, {'name':SIMPLE_CROWDSOURCING['name'],'x':'num', 'y':'err'}]}, DEFAULT_PLOT_PARAMS]),
combine_dicts([{'title':'Risk Estimation', 'name':'risk_estimation_semilog', 'type':'semilog', 'legend':True, 'xlabel':'Avg Number of Human Workers Per Image', 'ylabel':'Error', 'methods':[{'name':PROB_WORKER_IMAGE_CV['name'], 'x':'num', 'y':'err', 'title':'Actual Error', 'line_style':'b-'}, {'name':PROB_WORKER_IMAGE_CV['name'], 'x':'num', 'y':'risk', 'title':'Estimated Error', 'line_style':'g-'}]}, DEFAULT_PLOT_PARAMS]),
combine_dicts([{'title':'Risk Estimation', 'name':'risk_estimation_loglog', 'type':'loglog', 'legend':True, 'xlabel':'Avg Number of Human Workers Per Image', 'ylabel':'Error', 'methods':[{'name':PROB_WORKER_IMAGE_CV['name'], 'x':'num', 'y':'err', 'title':'Actual Error', 'line_style':'b-'}, {'name':PROB_WORKER_IMAGE_CV['name'], 'x':'num', 'y':'risk', 'title':'Estimated Error', 'line_style':'g-'}]}, DEFAULT_PLOT_PARAMS]),
combine_dicts([{'title':'Risk Estimation', 'name':'risk_estimation', 'type':'plot', 'legend':True, 'xlabel':'Avg Number of Human Workers Per Image', 'ylabel':'Error', 'methods':[{'name':PROB_WORKER_IMAGE_CV['name'], 'x':'num', 'y':'err', 'title':'Actual Error', 'line_style':'b-'}, {'name':PROB_WORKER_IMAGE_CV['name'], 'x':'num', 'y':'risk', 'title':'Estimated Error', 'line_style':'g-'}]}, DEFAULT_PLOT_PARAMS]),
combine_dicts([{'title':'Worker Skill', 'name':'worker_skill', 'type':'skill', 'methods':[{'name':PROB_WORKER_IMAGE['name']}]}, DEFAULT_PLOT_PARAMS]),
combine_dicts([{'title':'Num Annotations', 'name':'num_annotations', 'type':'hist', 'xlabel':'Annotations Per Image', 'ylabel':'Image Count', 'methods':[{'name':PROB_WORKER_IMAGE_CV_ONLINE['name'], 'x':'num_annos', 'y':'num_annos_bins'}]}, DEFAULT_PLOT_PARAMS])
]
#-------------------------------------------------------
class SimulatedCrowdsourcer(object):
def __init__(self, full_dataset, expert_dataset=None, save_prefix=None, output_dir='output', online=True, simple_crowdsourcing=False, learn_worker_params=True, learn_image_params=True, use_computer_vision=False, naive_computer_vision=False, batch_size=1000, num_rand_perms=1, sort_method='num_annos', name=None, line_style='-', color='r', save_all_perms=False, min_risk=0.005):
self.full_dataset, self.expert_dataset = full_dataset, expert_dataset
self.online, self.simple_crowdsourcing = online, simple_crowdsourcing
self.learn_worker_params, self.learn_image_params = learn_worker_params, learn_image_params
self.use_computer_vision, self.batch_size = use_computer_vision, (batch_size if online else len(full_dataset.images))
self.naive_computer_vision = naive_computer_vision
self.num_rand_perms, self.sort_method = num_rand_perms, sort_method
self.save_prefix, self.output_dir = save_prefix, output_dir
self.max_workers_per_image, self.save_all_perms = 1, save_all_perms
self.min_risk = min_risk
def run(self):
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
sum_plot_data, num_plot_data, fnames = [], [], []
all_plot_data = {}
for rand_perm in range(self.num_rand_perms):
plot_data = {'num':[], 'err':[], 'risk':[]}
iter = 0
self.rand_perms = {}
#if self.full_dataset.computer_vision_predictor and hasattr(self.full_dataset.computer_vision_predictor, 'iteration'):
# self.full_dataset.computer_vision_predictor.iteration = 0
self.dataset = self.full_dataset.__class__(debug=0, learn_worker_params=self.learn_worker_params, learn_image_params=self.learn_image_params, computer_vision_predictor=(self.full_dataset.computer_vision_predictor if self.use_computer_vision else None), naive_computer_vision=self.naive_computer_vision, min_risk=self.min_risk)
self.dataset.copy_parameters_from(self.full_dataset, full=False)
for i in self.full_dataset.images:
self.dataset.images[i] = self.full_dataset._CrowdImageClass_(i, self.dataset)
self.dataset.images[i].copy_parameters_from(self.full_dataset.images[i], full=False)
for w in self.full_dataset.workers:
self.dataset.workers[w] = self.full_dataset._CrowdWorkerClass_(w, self.dataset)
self.dataset.workers[w].copy_parameters_from(self.full_dataset.workers[w], full=False)
while self.dataset.num_unfinished(full_dataset=self.full_dataset) > 0:
if self.simple_crowdsourcing:
self.dataset.crowdsource_simple()
else:
self.dataset.estimate_parameters(avoid_if_finished=True)
self.dataset.check_finished_annotations(set_finished=self.online)
if self.expert_dataset:
err,num = self.dataset.compute_error(self.expert_dataset), self.dataset.num_annotations()
if not self.simple_crowdsourcing:
plot_data["risk"].append(self.dataset.risk())
plot_data["num"].append(float(num)/len(self.dataset.images))
plot_data["err"].append(err)
if self.save_prefix and rand_perm==0 or self.save_all_perms:
fname = self.output_dir+'/'+self.save_prefix+str(rand_perm)+'_'+str(iter)+'.json'
self.dataset.save(fname)
fnames.append({str(rand_perm)+'_'+str(iter):fname})
self.augment_annotations_if_necessary()
iter += 1
if hasattr(self.dataset, 'parts'):
plot_data["num_annos"] = []
for p in range(len(self.dataset.parts)):
plot_data["num_annos"] += [self.dataset.parts[p].images[i].num_annotations() for i in self.dataset.parts[p].images]
else:
plot_data["num_annos"] = [self.dataset.images[i].num_annotations() for i in self.dataset.images]
plot_data["num_annos_bins"] = np.arange(-.5, np.asarray(plot_data["num_annos"]).max()+.5, 1).tolist()
if hasattr(self.dataset.workers[w],'skill') and self.dataset.workers[w].skill:
for s in range(len(self.dataset.workers[self.dataset.workers.keys()[0]].skill)):
plot_data["skill"+str(s)] = [self.dataset.workers[w].skill[s] for w in self.dataset.workers]
if self.dataset.cv_worker: plot_data["skill_cv"+str(s)] = [self.dataset.cv_worker.skill[s]]
plot_data["worker_num_annos"] = [len(self.dataset.workers[w].images) for w in self.dataset.workers]
for k in plot_data:
if not k in all_plot_data:
all_plot_data[k] = []
all_plot_data[k].append(plot_data[k])
plot_data = {}
for k in all_plot_data:
ml = int(np.asarray([len(c) for c in all_plot_data[k]]).max())
a = np.zeros((self.num_rand_perms, ml))
valid = np.zeros((self.num_rand_perms, ml))
for i in range(self.num_rand_perms):
a[i,:len(all_plot_data[k][i])] = all_plot_data[k][i]
valid[i,:len(all_plot_data[k][i])] = 1
if k == 'num_annos':
plot_data[k] = a.flatten().tolist()
else:
plot_data[k] = (a.sum(axis=0) / valid.sum(axis=0)).tolist()
plot_data[k+'_var'] = ((((a-plot_data[k])**2)*valid).sum(axis=0) / valid.sum(axis=0)).tolist()
return plot_data, fnames, all_plot_data
def augment_annotations_if_necessary(self):
processed = []
num = 0
image_ids = self.dataset.choose_images_to_annotate_next(max_workers_per_image=self.max_workers_per_image, sort_method=self.sort_method, full_dataset=self.full_dataset)
for i in image_ids:
processed.append(i)
workers = self.full_dataset.images[i].workers
if not i in self.rand_perms:
self.rand_perms[i] = np.random.permutation(len(self.full_dataset.images[i].z))
#print str(i) + " " + str(len(workers)) + " " + str(workers)
has_cv = (1 if (self.dataset.cv_worker and self.dataset.cv_worker.id in self.dataset.images[i].z) else 0)
fd_has_cv = (1 if (self.full_dataset.cv_worker and self.full_dataset.cv_worker.id in self.full_dataset.images[i].z) else 0)
for j in range(len(self.dataset.images[i].z)-has_cv, min(len(self.dataset.images[i].z)-has_cv+self.max_workers_per_image, len(self.full_dataset.images[i].z)-fd_has_cv)):
w = workers[self.rand_perms[i][j]]
if not self.dataset.images[i].finished:
assert not w in self.dataset.images[i].z, "Duplicate worker " + str(w) + " for image " + str(i) + " calling augment_annotations()"
z = self.dataset._CrowdLabelClass_(self.dataset.images[i], self.dataset.workers[w])
z.parse(self.full_dataset.images[i].z[w].raw_data)
self.dataset.images[i].z[w] = z
self.dataset.images[i].workers.append(w)
self.dataset.workers[w].images[i] = self.dataset.images[i]
num += 1
if num >= self.batch_size:
break
'''
if self.use_computer_vision:
for i in processed:
self.dataset.images[i].predict_true_labels(avoid_if_finished=True) # Initialize training label for computer vision
self.dataset.synch_computer_vision_labels()
'''
def RunSimulatedExperiments(full_dataset, methods, output_dir, plots, expert_dataset=None, title=None, num_rand_perms=5, force_compute=False, show_intermediate_results=True):
results, fnames, methods_d, all_plot_data = {}, {}, {}, {}
fnames_i = []
for a in methods:
m = a['name']
if not os.path.exists(os.path.join('output', output_dir, 'plot_data_'+m+'.json')) or force_compute:
sc = SimulatedCrowdsourcer(full_dataset, expert_dataset=(expert_dataset if expert_dataset else full_dataset), num_rand_perms=num_rand_perms, output_dir='output/'+output_dir, save_prefix=m, **a)
results[m], fnames[m], all_plot_data[m] = sc.run()
with open(os.path.join('output', output_dir, 'plot_data_'+m+'.json'), 'w') as f:
json.dump({'results':results[m], 'fnames':fnames[m], 'all_plot_data':all_plot_data[m]}, f)
else:
with open(os.path.join('output', output_dir, 'plot_data_'+m+'.json')) as f:
data = json.load(f)
results[m], fnames[m], all_plot_data[m] = data['results'], data['fnames'], data['all_plot_data']
methods_d[m] = a
fnames_c = []
for f in fnames[a['name']]:
fnames_c.append({f.keys()[0] : f[f.keys()[0]][len('output/'):]})
fnames_i.append({'name':a['name'], 'files':fnames_c})
if show_intermediate_results:
GeneratePlotResults(full_dataset, methods, output_dir, plots, results, methods_d, fnames_i, title)
GeneratePlotResults(full_dataset, methods, output_dir, plots, results, methods_d, fnames_i, title)
def GeneratePlotResults(full_dataset, methods, output_dir, plots, results, methods_d, fnames_i, title):
plot_files = []
for i in range(len(plots)):
handles, labels = [], []
fig = plt.figure(i+1)
plt.clf()
plot = plots[i]
if 'xlim' in plot: plt.xlim(plot['xlim'][0], plot['xlim'][1])
if 'ylim' in plot: plt.ylim(plot['ylim'][0], plot['ylim'][1])
for a in plots[i]['methods']:
m = a['name']
if not m in methods_d:
continue
print str(i) + ' ' + str(m)
line_style = a['line_style'] if 'line_style' in a else methods_d[m]['line_style']
color = a['color'] if 'color' in a else methods_d[m]['color']
if plot['type'] == 'skill':
plot = copy.deepcopy(plots[i])
plot['type'] = 'scatter' if len(full_dataset.skill_names) <= 2 else 'scatter3d'
if not 'xlabel' in plot: plot['xlabel'] = full_dataset.skill_names[0]
if not 'ylabel' in plot: plot['ylabel'] = full_dataset.skill_names[1]
if not 'zlabel' in plot and len(full_dataset.skill_names) > 2: plot['zlabel'] = full_dataset.skill_names[2]
if not 'x' in a: a['x'] = 'skill0'
if not 'y' in a: a['y'] = 'skill1'
if not 'z' in a and len(full_dataset.skill_names) > 2: a['z'] = 'skill2'
if plot['type'] == 'semilog':
print str(len(results[m][a['x']])) + ' ' + str(len(results[m][a['y']]))
h = plt.semilogy(results[m][a['x']], results[m][a['y']], line_style, lw=plot["line-width"], color=color)
handles.append(h[0])
elif plot['type'] == 'loglog':
h = plt.loglog(results[m][a['x']], results[m][a['y']], line_style, lw=plot["line-width"], color=color)
handles.append(h[0])
elif plot['type'] == 'plot':
h = plt.plot(results[m][a['x']], results[m][a['y']], line_style, lw=plot["line-width"])
handles.append(h[0])
elif plot['type'] == 'hist':
if 'y' in a:
h = plt.hist(results[m][a['x']], results[m][a['y']], histtype='bar', rwidth=plot["bar-width"], color=plot["bar-color"])
else:
h = plt.hist(results[m][a['x']], histtype='bar', rwidth=plot["bar-width"], color=plot["bar-color"])
handles.append(h[0])
elif plot['type'] == 'scatter':
h = plt.scatter(results[m][a['x']], results[m][a['y']], c='r', marker='o')
handles.append(h)
elif plot['type'] == 'scatter3d':
ax = fig.add_subplot(111, projection='3d')
h = ax.scatter(results[m][a['x']], results[m][a['y']], results[m][a['z']], c='r', marker='o')
handles.append(h)
labels.append(a['title'] if 'title' in a else m)
if plot['type'] == 'scatter3d':
if 'xlabel' in plot: ax.set_xlabel(plot['xlabel'], fontsize=plot['axis-font-size'])
if 'ylabel' in plot: ax.set_ylabel(plot['ylabel'], fontsize=plot['axis-font-size'])
if 'zlabel' in plot: ax.set_zlabel(plot['zlabel'], fontsize=plot['axis-font-size'])
else:
if 'xlabel' in plot: plt.xlabel(plot['xlabel'], fontsize=plot['axis-font-size'])
if 'ylabel' in plot: plt.ylabel(plot['ylabel'], fontsize=plot['axis-font-size'])
if 'zlabel' in plot: plt.zlabel(plot['zlabel'], fontsize=plot['axis-font-size'])
if 'title' in plot: plt.title(plot['title'], fontsize=plot['title-font-size'])
plt.tick_params(axis='both', which='major', labelsize=plot['tick-font-size'])
plt.tick_params(axis='both', which='minor', labelsize=plot['tick-font-size'])
if 'legend' in plot: plt.legend(handles, labels, prop={'size':plot['legend-font-size']})
plt.savefig(os.path.join('output', output_dir, plot['name']+'.pdf'))
plt.savefig(os.path.join('output', output_dir, plot['name']+'.png'))
plot_files.append(output_dir + '/' + plot['name']+'.png')
with open(os.path.join('output', output_dir, 'galleries.json'), 'w') as f:
json.dump({'plots':plot_files, 'methods':fnames_i, 'title':title}, f)
|
|
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Perform basic ELF security checks on a series of executables.
Exit status will be 0 if successful, and the program will be silent.
Otherwise the exit status will be 1 and it will log which executables failed which checks.
Needs `readelf` (for ELF) and `objdump` (for PE).
'''
import subprocess
import sys
import os
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
OBJDUMP_CMD = os.getenv('OBJDUMP', '/usr/bin/objdump')
NONFATAL = {'HIGH_ENTROPY_VA'} # checks which are non-fatal for now but only generate a warning
def check_ELF_PIE(executable):
'''
Check for position independent executable (PIE), allowing for address space randomization.
'''
p = subprocess.Popen([READELF_CMD, '-h', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
ok = False
for line in stdout.splitlines():
line = line.split()
if len(line)>=2 and line[0] == 'Type:' and line[1] == 'DYN':
ok = True
return ok
def get_ELF_program_headers(executable):
'''Return type and flags for ELF program headers'''
p = subprocess.Popen([READELF_CMD, '-l', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
in_headers = False
count = 0
headers = []
for line in stdout.splitlines():
if line.startswith('Program Headers:'):
in_headers = True
if line == '':
in_headers = False
if in_headers:
if count == 1: # header line
ofs_typ = line.find('Type')
ofs_offset = line.find('Offset')
ofs_flags = line.find('Flg')
ofs_align = line.find('Align')
if ofs_typ == -1 or ofs_offset == -1 or ofs_flags == -1 or ofs_align == -1:
raise ValueError('Cannot parse elfread -lW output')
elif count > 1:
typ = line[ofs_typ:ofs_offset].rstrip()
flags = line[ofs_flags:ofs_align].rstrip()
headers.append((typ, flags))
count += 1
return headers
def check_ELF_NX(executable):
'''
Check that no sections are writable and executable (including the stack)
'''
have_wx = False
have_gnu_stack = False
for (typ, flags) in get_ELF_program_headers(executable):
if typ == 'GNU_STACK':
have_gnu_stack = True
if 'W' in flags and 'E' in flags: # section is both writable and executable
have_wx = True
return have_gnu_stack and not have_wx
def check_ELF_RELRO(executable):
'''
Check for read-only relocations.
GNU_RELRO program header must exist
Dynamic section must have BIND_NOW flag
'''
have_gnu_relro = False
for (typ, flags) in get_ELF_program_headers(executable):
# Note: not checking flags == 'R': here as linkers set the permission differently
# This does not affect security: the permission flags of the GNU_RELRO program header are ignored, the PT_LOAD header determines the effective permissions.
# However, the dynamic linker need to write to this area so these are RW.
# Glibc itself takes care of mprotecting this area R after relocations are finished.
# See also http://permalink.gmane.org/gmane.comp.gnu.binutils/71347
if typ == 'GNU_RELRO':
have_gnu_relro = True
have_bindnow = False
p = subprocess.Popen([READELF_CMD, '-d', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
for line in stdout.splitlines():
tokens = line.split()
if len(tokens)>1 and tokens[1] == '(BIND_NOW)' or (len(tokens)>2 and tokens[1] == '(FLAGS)' and 'BIND_NOW' in tokens[2:]):
have_bindnow = True
return have_gnu_relro and have_bindnow
def check_ELF_Canary(executable):
'''
Check for use of stack canary
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
ok = False
for line in stdout.splitlines():
if '__stack_chk_fail' in line:
ok = True
return ok
def get_PE_dll_characteristics(executable):
'''
Get PE DllCharacteristics bits.
Returns a tuple (arch,bits) where arch is 'i386:x86-64' or 'i386'
and bits is the DllCharacteristics value.
'''
p = subprocess.Popen([OBJDUMP_CMD, '-x', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
arch = ''
bits = 0
for line in stdout.splitlines():
tokens = line.split()
if len(tokens)>=2 and tokens[0] == 'architecture:':
arch = tokens[1].rstrip(',')
if len(tokens)>=2 and tokens[0] == 'DllCharacteristics':
bits = int(tokens[1],16)
return (arch,bits)
IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA = 0x0020
IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE = 0x0040
IMAGE_DLL_CHARACTERISTICS_NX_COMPAT = 0x0100
def check_PE_DYNAMIC_BASE(executable):
'''PIE: DllCharacteristics bit 0x40 signifies dynamicbase (ASLR)'''
(arch,bits) = get_PE_dll_characteristics(executable)
reqbits = IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE
return (bits & reqbits) == reqbits
# On 64 bit, must support high-entropy 64-bit address space layout randomization in addition to DYNAMIC_BASE
# to have secure ASLR.
def check_PE_HIGH_ENTROPY_VA(executable):
'''PIE: DllCharacteristics bit 0x20 signifies high-entropy ASLR'''
(arch,bits) = get_PE_dll_characteristics(executable)
if arch == 'i386:x86-64':
reqbits = IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA
else: # Unnecessary on 32-bit
assert(arch == 'i386')
reqbits = 0
return (bits & reqbits) == reqbits
def check_PE_NX(executable):
'''NX: DllCharacteristics bit 0x100 signifies nxcompat (DEP)'''
(arch,bits) = get_PE_dll_characteristics(executable)
return (bits & IMAGE_DLL_CHARACTERISTICS_NX_COMPAT) == IMAGE_DLL_CHARACTERISTICS_NX_COMPAT
CHECKS = {
'ELF': [
('PIE', check_ELF_PIE),
('NX', check_ELF_NX),
('RELRO', check_ELF_RELRO),
('Canary', check_ELF_Canary)
],
'PE': [
('DYNAMIC_BASE', check_PE_DYNAMIC_BASE),
('HIGH_ENTROPY_VA', check_PE_HIGH_ENTROPY_VA),
('NX', check_PE_NX)
]
}
def identify_executable(executable):
with open(filename, 'rb') as f:
magic = f.read(4)
if magic.startswith(b'MZ'):
return 'PE'
elif magic.startswith(b'\x7fELF'):
return 'ELF'
return None
if __name__ == '__main__':
retval = 0
for filename in sys.argv[1:]:
try:
etype = identify_executable(filename)
if etype is None:
print('%s: unknown format' % filename)
retval = 1
continue
failed = []
warning = []
for (name, func) in CHECKS[etype]:
if not func(filename):
if name in NONFATAL:
warning.append(name)
else:
failed.append(name)
if failed:
print('%s: failed %s' % (filename, ' '.join(failed)))
retval = 1
if warning:
print('%s: warning %s' % (filename, ' '.join(warning)))
except IOError:
print('%s: cannot open' % filename)
retval = 1
sys.exit(retval)
|
|
"""
Test the hashing module.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2009 Gael Varoquaux
# License: BSD Style, 3 clauses.
import time
import hashlib
import sys
import gc
import io
import collections
import itertools
import pickle
import random
from decimal import Decimal
from joblib.hashing import hash
from joblib.func_inspect import filter_args
from joblib.memory import Memory
from joblib.testing import raises, skipif, fixture, parametrize
from joblib.test.common import np, with_numpy
from joblib.my_exceptions import TransportableException
from joblib._compat import PY3_OR_LATER
try:
# Python 2/Python 3 compat
unicode('str')
except NameError:
unicode = lambda s: s
###############################################################################
# Helper functions for the tests
def time_func(func, *args):
""" Time function func on *args.
"""
times = list()
for _ in range(3):
t1 = time.time()
func(*args)
times.append(time.time() - t1)
return min(times)
def relative_time(func1, func2, *args):
""" Return the relative time between func1 and func2 applied on
*args.
"""
time_func1 = time_func(func1, *args)
time_func2 = time_func(func2, *args)
relative_diff = 0.5 * (abs(time_func1 - time_func2)
/ (time_func1 + time_func2))
return relative_diff
class Klass(object):
def f(self, x):
return x
class KlassWithCachedMethod(object):
def __init__(self, cachedir):
mem = Memory(cachedir=cachedir)
self.f = mem.cache(self.f)
def f(self, x):
return x
###############################################################################
# Tests
input_list = [1, 2, 1., 2., 1 + 1j, 2. + 1j,
'a', 'b',
(1,), (1, 1,), [1, ], [1, 1, ],
{1: 1}, {1: 2}, {2: 1},
None,
gc.collect,
[1, ].append,
# Next 2 sets have unorderable elements in python 3.
set(('a', 1)),
set(('a', 1, ('a', 1))),
# Next 2 dicts have unorderable type of keys in python 3.
{'a': 1, 1: 2},
{'a': 1, 1: 2, 'd': {'a': 1}}]
@parametrize('obj1', input_list)
@parametrize('obj2', input_list)
def test_trivial_hash(obj1, obj2):
"""Smoke test hash on various types."""
# Check that 2 objects have the same hash only if they are the same.
are_hashes_equal = hash(obj1) == hash(obj2)
are_objs_identical = obj1 is obj2
assert are_hashes_equal == are_objs_identical
def test_hash_methods():
# Check that hashing instance methods works
a = io.StringIO(unicode('a'))
assert hash(a.flush) == hash(a.flush)
a1 = collections.deque(range(10))
a2 = collections.deque(range(9))
assert hash(a1.extend) != hash(a2.extend)
@fixture(scope='function')
@with_numpy
def three_np_arrays():
rnd = np.random.RandomState(0)
arr1 = rnd.random_sample((10, 10))
arr2 = arr1.copy()
arr3 = arr2.copy()
arr3[0] += 1
return arr1, arr2, arr3
def test_hash_numpy_arrays(three_np_arrays):
arr1, arr2, arr3 = three_np_arrays
for obj1, obj2 in itertools.product(three_np_arrays, repeat=2):
are_hashes_equal = hash(obj1) == hash(obj2)
are_arrays_equal = np.all(obj1 == obj2)
assert are_hashes_equal == are_arrays_equal
assert hash(arr1) != hash(arr1.T)
def test_hash_numpy_dict_of_arrays(three_np_arrays):
arr1, arr2, arr3 = three_np_arrays
d1 = {1: arr1, 2: arr2}
d2 = {1: arr2, 2: arr1}
d3 = {1: arr2, 2: arr3}
assert hash(d1) == hash(d2)
assert hash(d1) != hash(d3)
@with_numpy
@parametrize('dtype', ['datetime64[s]', 'timedelta64[D]'])
def test_numpy_datetime_array(dtype):
# memoryview is not supported for some dtypes e.g. datetime64
# see https://github.com/joblib/joblib/issues/188 for more details
a_hash = hash(np.arange(10))
array = np.arange(0, 10, dtype=dtype)
assert hash(array) != a_hash
@with_numpy
def test_hash_numpy_noncontiguous():
a = np.asarray(np.arange(6000).reshape((1000, 2, 3)),
order='F')[:, :1, :]
b = np.ascontiguousarray(a)
assert hash(a) != hash(b)
c = np.asfortranarray(a)
assert hash(a) != hash(c)
@with_numpy
@parametrize('coerce_mmap', [True, False])
def test_hash_memmap(tmpdir, coerce_mmap):
"""Check that memmap and arrays hash identically if coerce_mmap is True."""
filename = tmpdir.join('memmap_temp').strpath
try:
m = np.memmap(filename, shape=(10, 10), mode='w+')
a = np.asarray(m)
are_hashes_equal = (hash(a, coerce_mmap=coerce_mmap) ==
hash(m, coerce_mmap=coerce_mmap))
assert are_hashes_equal == coerce_mmap
finally:
if 'm' in locals():
del m
# Force a garbage-collection cycle, to be certain that the
# object is delete, and we don't run in a problem under
# Windows with a file handle still open.
gc.collect()
@with_numpy
@skipif(sys.platform == 'win32', reason='This test is not stable under windows'
' for some reason')
def test_hash_numpy_performance():
""" Check the performance of hashing numpy arrays:
In [22]: a = np.random.random(1000000)
In [23]: %timeit hashlib.md5(a).hexdigest()
100 loops, best of 3: 20.7 ms per loop
In [24]: %timeit hashlib.md5(pickle.dumps(a, protocol=2)).hexdigest()
1 loops, best of 3: 73.1 ms per loop
In [25]: %timeit hashlib.md5(cPickle.dumps(a, protocol=2)).hexdigest()
10 loops, best of 3: 53.9 ms per loop
In [26]: %timeit hash(a)
100 loops, best of 3: 20.8 ms per loop
"""
rnd = np.random.RandomState(0)
a = rnd.random_sample(1000000)
if hasattr(np, 'getbuffer'):
# Under python 3, there is no getbuffer
getbuffer = np.getbuffer
else:
getbuffer = memoryview
md5_hash = lambda x: hashlib.md5(getbuffer(x)).hexdigest()
relative_diff = relative_time(md5_hash, hash, a)
assert relative_diff < 0.3
# Check that hashing an tuple of 3 arrays takes approximately
# 3 times as much as hashing one array
time_hashlib = 3 * time_func(md5_hash, a)
time_hash = time_func(hash, (a, a, a))
relative_diff = 0.5 * (abs(time_hash - time_hashlib)
/ (time_hash + time_hashlib))
assert relative_diff < 0.3
def test_bound_methods_hash():
""" Make sure that calling the same method on two different instances
of the same class does resolve to the same hashes.
"""
a = Klass()
b = Klass()
assert (hash(filter_args(a.f, [], (1, ))) ==
hash(filter_args(b.f, [], (1, ))))
def test_bound_cached_methods_hash(tmpdir):
""" Make sure that calling the same _cached_ method on two different
instances of the same class does resolve to the same hashes.
"""
a = KlassWithCachedMethod(tmpdir.strpath)
b = KlassWithCachedMethod(tmpdir.strpath)
assert (hash(filter_args(a.f.func, [], (1, ))) ==
hash(filter_args(b.f.func, [], (1, ))))
@with_numpy
def test_hash_object_dtype():
""" Make sure that ndarrays with dtype `object' hash correctly."""
a = np.array([np.arange(i) for i in range(6)], dtype=object)
b = np.array([np.arange(i) for i in range(6)], dtype=object)
assert hash(a) == hash(b)
@with_numpy
def test_numpy_scalar():
# Numpy scalars are built from compiled functions, and lead to
# strange pickling paths explored, that can give hash collisions
a = np.float64(2.0)
b = np.float64(3.0)
assert hash(a) != hash(b)
def test_dict_hash(tmpdir):
# Check that dictionaries hash consistently, eventhough the ordering
# of the keys is not garanteed
k = KlassWithCachedMethod(tmpdir.strpath)
d = {'#s12069__c_maps.nii.gz': [33],
'#s12158__c_maps.nii.gz': [33],
'#s12258__c_maps.nii.gz': [33],
'#s12277__c_maps.nii.gz': [33],
'#s12300__c_maps.nii.gz': [33],
'#s12401__c_maps.nii.gz': [33],
'#s12430__c_maps.nii.gz': [33],
'#s13817__c_maps.nii.gz': [33],
'#s13903__c_maps.nii.gz': [33],
'#s13916__c_maps.nii.gz': [33],
'#s13981__c_maps.nii.gz': [33],
'#s13982__c_maps.nii.gz': [33],
'#s13983__c_maps.nii.gz': [33]}
a = k.f(d)
b = k.f(a)
assert hash(a) == hash(b)
def test_set_hash(tmpdir):
# Check that sets hash consistently, even though their ordering
# is not guaranteed
k = KlassWithCachedMethod(tmpdir.strpath)
s = set(['#s12069__c_maps.nii.gz',
'#s12158__c_maps.nii.gz',
'#s12258__c_maps.nii.gz',
'#s12277__c_maps.nii.gz',
'#s12300__c_maps.nii.gz',
'#s12401__c_maps.nii.gz',
'#s12430__c_maps.nii.gz',
'#s13817__c_maps.nii.gz',
'#s13903__c_maps.nii.gz',
'#s13916__c_maps.nii.gz',
'#s13981__c_maps.nii.gz',
'#s13982__c_maps.nii.gz',
'#s13983__c_maps.nii.gz'])
a = k.f(s)
b = k.f(a)
assert hash(a) == hash(b)
def test_set_decimal_hash():
# Check that sets containing decimals hash consistently, even though
# ordering is not guaranteed
assert (hash(set([Decimal(0), Decimal('NaN')])) ==
hash(set([Decimal('NaN'), Decimal(0)])))
def test_string():
# Test that we obtain the same hash for object owning several strings,
# whatever the past of these strings (which are immutable in Python)
string = 'foo'
a = {string: 'bar'}
b = {string: 'bar'}
c = pickle.loads(pickle.dumps(b))
assert hash([a, b]) == hash([a, c])
@with_numpy
def test_dtype():
# Test that we obtain the same hash for object owning several dtype,
# whatever the past of these dtypes. Catter for cache invalidation with
# complex dtype
a = np.dtype([('f1', np.uint), ('f2', np.int32)])
b = a
c = pickle.loads(pickle.dumps(a))
assert hash([a, c]) == hash([a, b])
@parametrize('to_hash,expected',
[('This is a string to hash',
{'py2': '80436ada343b0d79a99bfd8883a96e45',
'py3': '71b3f47df22cb19431d85d92d0b230b2'}),
(u"C'est l\xe9t\xe9",
{'py2': '2ff3a25200eb6219f468de2640913c2d',
'py3': '2d8d189e9b2b0b2e384d93c868c0e576'}),
((123456, 54321, -98765),
{'py2': '50d81c80af05061ac4dcdc2d5edee6d6',
'py3': 'e205227dd82250871fa25aa0ec690aa3'}),
([random.Random(42).random() for _ in range(5)],
{'py2': '1a36a691b2e2ba3a9df72de3dccf17ea',
'py3': 'a11ffad81f9682a7d901e6edc3d16c84'}),
([3, 'abc', None, TransportableException('foo', ValueError)],
{'py2': 'adb6ba84990ee5e462dc138383f11802',
'py3': '994f663c64ba5e64b2a85ebe75287829'}),
({'abcde': 123, 'sadfas': [-9999, 2, 3]},
{'py2': 'fc9314a39ff75b829498380850447047',
'py3': 'aeda150553d4bb5c69f0e69d51b0e2ef'})])
def test_hashes_stay_the_same(to_hash, expected):
# We want to make sure that hashes don't change with joblib
# version. For end users, that would mean that they have to
# regenerate their cache from scratch, which potentially means
# lengthy recomputations.
# Expected results have been generated with joblib 0.9.2
py_version_str = 'py3' if PY3_OR_LATER else 'py2'
assert hash(to_hash) == expected[py_version_str]
@with_numpy
def test_hashes_are_different_between_c_and_fortran_contiguous_arrays():
# We want to be sure that the c-contiguous and f-contiguous versions of the
# same array produce 2 different hashes.
rng = np.random.RandomState(0)
arr_c = rng.random_sample((10, 10))
arr_f = np.asfortranarray(arr_c)
assert hash(arr_c) != hash(arr_f)
@with_numpy
def test_0d_array():
hash(np.array(0))
@with_numpy
def test_0d_and_1d_array_hashing_is_different():
assert hash(np.array(0)) != hash(np.array([0]))
@with_numpy
def test_hashes_stay_the_same_with_numpy_objects():
# We want to make sure that hashes don't change with joblib
# version. For end users, that would mean that they have to
# regenerate their cache from scratch, which potentially means
# lengthy recomputations.
rng = np.random.RandomState(42)
# Being explicit about dtypes in order to avoid
# architecture-related differences. Also using 'f4' rather than
# 'f8' for float arrays because 'f8' arrays generated by
# rng.random.randn don't seem to be bit-identical on 32bit and
# 64bit machines.
to_hash_list = [
rng.randint(-1000, high=1000, size=50).astype('<i8'),
tuple(rng.randn(3).astype('<f4') for _ in range(5)),
[rng.randn(3).astype('<f4') for _ in range(5)],
{
-3333: rng.randn(3, 5).astype('<f4'),
0: [
rng.randint(10, size=20).astype('<i8'),
rng.randn(10).astype('<f4')
]
},
# Non regression cases for https://github.com/joblib/joblib/issues/308.
# Generated with joblib 0.9.4.
np.arange(100, dtype='<i8').reshape((10, 10)),
# Fortran contiguous array
np.asfortranarray(np.arange(100, dtype='<i8').reshape((10, 10))),
# Non contiguous array
np.arange(100, dtype='<i8').reshape((10, 10))[:, :2],
]
# These expected results have been generated with joblib 0.9.0
expected_dict = {'py2': ['80f2387e7752abbda2658aafed49e086',
'0d700f7f25ea670fd305e4cd93b0e8cd',
'83a2bdf843e79e4b3e26521db73088b9',
'63e0efd43c0a9ad92a07e8ce04338dd3',
'03fef702946b602c852b8b4e60929914',
'07074691e90d7098a85956367045c81e',
'd264cf79f353aa7bbfa8349e3df72d8f'],
'py3': ['10a6afc379ca2708acfbaef0ab676eab',
'988a7114f337f381393025911ebc823b',
'c6809f4b97e35f2fa0ee8d653cbd025c',
'b3ad17348e32728a7eb9cda1e7ede438',
'927b3e6b0b6a037e8e035bda134e0b05',
'108f6ee98e7db19ea2006ffd208f4bf1',
'bd48ccaaff28e16e6badee81041b7180']}
py_version_str = 'py3' if PY3_OR_LATER else 'py2'
expected_list = expected_dict[py_version_str]
for to_hash, expected in zip(to_hash_list, expected_list):
assert hash(to_hash) == expected
def test_hashing_pickling_error():
def non_picklable():
return 42
with raises(pickle.PicklingError) as excinfo:
hash(non_picklable)
excinfo.match('PicklingError while hashing')
|
|
# -*- coding: utf-8 -*-
'''
Module for managing windows systems.
:depends:
- win32net
Support for reboot, shutdown, etc
'''
from __future__ import absolute_import
# Import python libs
import logging
from datetime import datetime
# Import 3rd Party Libs
try:
import pythoncom
import wmi
import win32net
import win32api
import win32con
import pywintypes
from ctypes import windll
HAS_WIN32NET_MODS = True
except ImportError:
HAS_WIN32NET_MODS = False
# Import salt libs
import salt.utils
import salt.utils.locales
from salt.modules.reg import read_value
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'system'
def __virtual__():
'''
Set the system module of the kernel is Windows
'''
if HAS_WIN32NET_MODS and salt.utils.is_windows():
return __virtualname__
return False
def _convert_minutes_seconds(timeout, in_seconds=False):
'''
convert timeout to seconds
'''
return timeout if in_seconds else timeout*60
def halt(timeout=5, in_seconds=False):
'''
Halt a running system.
:param int timeout:
Number of seconds before halting the system.
Default is 5 seconds.
:return: True is successful.
:rtype: bool
timeout
The wait time before the system will be shutdown.
in_seconds
Whether to treat timeout as seconds or minutes.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' system.halt 5
'''
return shutdown(timeout=timeout, in_seconds=in_seconds)
def init(runlevel):
'''
Change the system runlevel on sysV compatible systems
CLI Example:
.. code-block:: bash
salt '*' system.init 3
'''
#cmd = ['init', runlevel]
#ret = __salt__['cmd.run'](cmd, python_shell=False)
#return ret
# TODO: Create a mapping of runlevels to
# corresponding Windows actions
return 'Not implemented on Windows at this time.'
def poweroff(timeout=5, in_seconds=False):
'''
Power off a running system.
:param int timeout:
Number of seconds before powering off the system.
Default is 5 seconds.
:return: True if successful
:rtype: bool
timeout
The wait time before the system will be shutdown.
in_seconds
Whether to treat timeout as seconds or minutes.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' system.poweroff 5
'''
return shutdown(timeout=timeout, in_seconds=in_seconds)
def reboot(timeout=5, in_seconds=False):
'''
Reboot a running system.
:param int timeout:
Number of seconds before rebooting the system.
Default is 5 minutes.
:param bool in_seconds:
Whether to treat timeout as seconds or minutes.
.. versionadded:: 2015.8.0
:return: True if successful
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' system.reboot 5
'''
return shutdown(timeout=timeout, reboot=True, in_seconds=in_seconds)
def shutdown(message=None, timeout=5, force_close=True, reboot=False, in_seconds=False):
'''
Shutdown a running system.
:param str message:
A message to display to the user before shutting down.
:param int timeout:
The length of time that the shutdown dialog box should be displayed, in
seconds. While this dialog box is displayed, the shutdown can be stopped
by the shutdown_abort function.
If timeout is not zero, InitiateSystemShutdown displays a dialog box on
the specified computer. The dialog box displays the name of the user
who called the function, displays the message specified by the
lpMessage parameter, and prompts the user to log off. The dialog box
beeps when it is created and remains on top of other windows in the
system. The dialog box can be moved but not closed. A timer counts down
the remaining time before a forced shutdown.
If timeout is zero, the computer shuts down without displaying the
dialog box, and the shutdown cannot be stopped by shutdown_abort.
Default is 5 minutes
:param bool in_seconds:
Whether to treat timeout as seconds or minutes.
.. versionadded:: 2015.8.0
:param bool force_close:
True to force close all open applications. False displays a dialog box
instructing the user to close the applications.
:param bool reboot:
True restarts the computer immediately after shutdown.
False caches to disk and safely powers down the system.
:return: True if successful
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' system.shutdown 5
'''
seconds = _convert_minutes_seconds(timeout, in_seconds)
if message:
message = message.decode('utf-8')
try:
win32api.InitiateSystemShutdown('127.0.0.1', message, timeout,
force_close, reboot)
return True
except pywintypes.error as exc:
(number, context, message) = exc
log.error('Failed to shutdown the system')
log.error('nbr: {0}'.format(number))
log.error('ctx: {0}'.format(context))
log.error('msg: {0}'.format(message))
return False
def shutdown_hard():
'''
Shutdown a running system with no timeout or warning.
:return: True if successful
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' system.shutdown_hard
'''
return shutdown(timeout=0)
def shutdown_abort():
'''
Abort a shutdown. Only available while the dialog box is being
displayed to the user. Once the shutdown has initiated, it cannot be aborted
:return: True if successful
:rtype: bool
'''
try:
win32api.AbortSystemShutdown('127.0.0.1')
return True
except pywintypes.error as exc:
(number, context, message) = exc
log.error('Failed to abort system shutdown')
log.error('nbr: {0}'.format(number))
log.error('ctx: {0}'.format(context))
log.error('msg: {0}'.format(message))
return False
def lock():
'''
Lock the workstation.
:return: True if successful
:rtype: bool
'''
return windll.user32.LockWorkStation()
def set_computer_name(name):
'''
Set the Windows computer name
:param str name:
The new name to give the computer. Requires a reboot to take effect.
:return:
Returns a dictionary containing the old and new names if successful.
False if not.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_computer_name 'DavesComputer'
'''
if name:
name = name.decode('utf-8')
if windll.kernel32.SetComputerNameExW(win32con.ComputerNamePhysicalDnsHostname,
name):
ret = {'Computer Name': {'Current': get_system_info()['name']}}
pending = get_pending_computer_name()
if pending not in (None, False):
ret['Computer Name']['Pending'] = pending
return ret
return False
def get_pending_computer_name():
'''
Get a pending computer name. If the computer name has been changed, and the
change is pending a system reboot, this function will return the pending
computer name. Otherwise, ``None`` will be returned. If there was an error
retrieving the pending computer name, ``False`` will be returned, and an
error message will be logged to the minion log.
:return:
Returns the pending name if pending restart. Returns none if not pending
restart.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_pending_computer_name
'''
current = get_computer_name()
pending = read_value('HKLM',
r'SYSTEM\CurrentControlSet\Control\ComputerName\ComputerName',
'ComputerName')['vdata']
if pending:
return pending if pending != current else None
return False
def get_computer_name():
'''
Get the Windows computer name
:return:
Returns the computer name if found. Otherwise returns False
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_computer_name
'''
name = get_system_info()['name']
return name if name else False
def set_computer_desc(desc=None):
'''
Set the Windows computer description
:param str desc:
The computer description
:return: False if it fails. Description if successful.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_computer_desc 'This computer belongs to Dave!'
'''
# Make sure the system exists
# Return an object containing current information array for the computer
system_info = win32net.NetServerGetInfo(None, 101)
# If desc is passed, decode it for unicode
if desc:
system_info['comment'] = desc.decode('utf-8')
else:
return False
# Apply new settings
try:
win32net.NetServerSetInfo(None, 101, system_info)
except win32net.error as exc:
(number, context, message) = exc
log.error('Failed to update system')
log.error('nbr: {0}'.format(number))
log.error('ctx: {0}'.format(context))
log.error('msg: {0}'.format(message))
return False
return {'Computer Description': get_computer_desc()}
set_computer_description = salt.utils.alias_function(set_computer_desc, 'set_computer_description')
def get_system_info():
'''
Get system information.
:return:
Returns a Dictionary containing information about the system to include
name, description, version, etc...
:rtype: dict
'''
system_info = win32net.NetServerGetInfo(None, 101)
return system_info
def get_computer_desc():
'''
Get the Windows computer description
:return:
Returns the computer description if found. Otherwise returns False
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_computer_desc
'''
desc = get_system_info()['comment']
return desc if desc else False
get_computer_description = salt.utils.alias_function(get_computer_desc, 'get_computer_description')
def _lookup_error(number):
'''
Lookup the error based on the passed number
.. versionadded:: 2015.5.7
.. versionadded:: 2015.8.2
:param int number: Number code to lookup
:return: The text that corresponds to the error number
:rtype: str
'''
return_values = {
2: 'Invalid OU or specifying OU is not supported',
5: 'Access is denied',
53: 'The network path was not found',
87: 'The parameter is incorrect',
110: 'The system cannot open the specified object',
1323: 'Unable to update the password',
1326: 'Logon failure: unknown username or bad password',
1355: 'The specified domain either does not exist or could not be contacted',
2224: 'The account already exists',
2691: 'The machine is already joined to the domain',
2692: 'The machine is not currently joined to a domain',
}
return return_values[number]
def join_domain(domain,
username=None,
password=None,
account_ou=None,
account_exists=False,
restart=False):
'''
Join a computer to an Active Directory domain. Requires reboot.
:param str domain:
The domain to which the computer should be joined, e.g.
``example.com``
:param str username:
Username of an account which is authorized to join computers to the
specified domain. Need to be either fully qualified like
``user@domain.tld`` or simply ``user``
:param str password:
Password of the specified user
:param str account_ou:
The DN of the OU below which the account for this computer should be
created when joining the domain, e.g.
``ou=computers,ou=departm_432,dc=my-company,dc=com``
:param bool account_exists:
Needs to be set to ``True`` to allow re-using an existing account
:param bool restart: Restarts the computer after a successful join
.. versionadded:: 2015.5.7
.. versionadded:: 2015.8.2
:returns: Returns a dictionary if successful. False if unsuccessful.
:rtype: dict, bool
CLI Example:
.. code-block:: bash
salt 'minion-id' system.join_domain domain='domain.tld' \\
username='joinuser' password='joinpassword' \\
account_ou='ou=clients,ou=org,dc=domain,dc=tld' \\
account_exists=False, restart=True
'''
status = get_domain_workgroup()
if 'Domain' in status:
if status['Domain'] == domain:
return 'Already joined to {0}'.format(domain)
if username and '\\' not in username and '@' not in username:
username = '{0}@{1}'.format(username, domain)
if username and password is None:
return 'Must specify a password if you pass a username'
# remove any escape characters
if isinstance(account_ou, str):
account_ou = account_ou.split('\\')
account_ou = ''.join(account_ou)
NETSETUP_JOIN_DOMAIN = 0x1
NETSETUP_ACCOUNT_CREATE = 0x2
NETSETUP_DOMAIN_JOIN_IF_JOINED = 0x20
join_options = 0x0
join_options |= NETSETUP_JOIN_DOMAIN
join_options |= NETSETUP_DOMAIN_JOIN_IF_JOINED
if not account_exists:
join_options |= NETSETUP_ACCOUNT_CREATE
pythoncom.CoInitialize()
c = wmi.WMI()
comp = c.Win32_ComputerSystem()[0]
err = comp.JoinDomainOrWorkgroup(Name=domain,
Password=password,
UserName=username,
AccountOU=account_ou,
FJoinOptions=join_options)
# you have to do this because JoinDomainOrWorkgroup returns a strangely
# formatted value that looks like (0,)
if not err[0]:
ret = {'Domain': domain,
'Restart': False}
if restart:
ret['Restart'] = reboot()
return ret
log.error(_lookup_error(err[0]))
return False
def unjoin_domain(username=None,
password=None,
domain=None,
workgroup='WORKGROUP',
disable=False,
restart=False):
r'''
Unjoin a computer from an Active Directory Domain. Requires restart.
:param username:
Username of an account which is authorized to manage computer accounts
on the domain. Need to be fully qualified like ``user@domain.tld`` or
``domain.tld\user``. If domain not specified, the passed domain will be
used. If computer account doesn't need to be disabled, can be None.
:param str password:
Password of the specified user
:param str domain: The domain from which to unjoin the computer. Can be None
:param str workgroup: The workgroup to join the computer to. Default is
``WORKGROUP``
.. versionadded:: 2015.5.7
.. versionadded:: 2015.8.2
:param bool disable:
Disable the user account in Active Directory. True to disable.
:param bool restart: Restart the computer after successful unjoin
.. versionadded:: 2015.5.7
.. versionadded:: 2015.8.2
:returns: Returns a dictionary if successful. False if unsuccessful.
:rtype: dict, bool
CLI Example:
.. code-block:: bash
salt 'minion-id' system.unjoin_domain restart=True
salt 'minion-id' system.unjoin_domain username='unjoinuser' \\
password='unjoinpassword' disable=True \\
restart=True
'''
status = get_domain_workgroup()
if 'Workgroup' in status:
if status['Workgroup'] == workgroup:
return 'Already joined to {0}'.format(workgroup)
if username and '\\' not in username and '@' not in username:
if domain:
username = '{0}@{1}'.format(username, domain)
else:
return 'Must specify domain if not supplied in username'
if username and password is None:
return 'Must specify a password if you pass a username'
NETSETUP_ACCT_DELETE = 0x2
unjoin_options = 0x0
if disable:
unjoin_options |= NETSETUP_ACCT_DELETE
pythoncom.CoInitialize()
c = wmi.WMI()
comp = c.Win32_ComputerSystem()[0]
err = comp.UnjoinDomainOrWorkgroup(Password=password,
UserName=username,
FUnjoinOptions=unjoin_options)
# you have to do this because UnjoinDomainOrWorkgroup returns a
# strangely formatted value that looks like (0,)
if not err[0]:
err = comp.JoinDomainOrWorkgroup(Name=workgroup)
if not err[0]:
ret = {'Workgroup': workgroup,
'Restart': False}
if restart:
ret['Restart'] = reboot()
return ret
else:
log.error(_lookup_error(err[0]))
log.error('Failed to join the computer to {0}'.format(workgroup))
return False
else:
log.error(_lookup_error(err[0]))
log.error('Failed to unjoin computer from {0}'.format(status['Domain']))
return False
def get_domain_workgroup():
'''
Get the domain or workgroup the computer belongs to.
.. versionadded:: 2015.5.7
.. versionadded:: 2015.8.2
:return: The name of the domain or workgroup
:rtype: str
'''
pythoncom.CoInitialize()
c = wmi.WMI()
for computer in c.Win32_ComputerSystem():
if computer.PartOfDomain:
return {'Domain': computer.Domain}
else:
return {'Workgroup': computer.Domain}
def _get_date_time_format(dt_string):
'''
Function that detects the date/time format for the string passed.
:param str dt_string:
A date/time string
:return: The format of the passed dt_string
:rtype: str
'''
valid_formats = [
'%I:%M:%S %p',
'%I:%M %p',
'%H:%M:%S',
'%H:%M',
'%Y-%m-%d',
'%m-%d-%y',
'%m-%d-%Y',
'%m/%d/%y',
'%m/%d/%Y',
'%Y/%m/%d'
]
for dt_format in valid_formats:
try:
datetime.strptime(dt_string, dt_format)
return dt_format
except ValueError:
continue
return False
def get_system_time():
'''
Get the system time.
:return: Returns the system time in HH:MM AM/PM format.
:rtype: str
'''
return datetime.strftime(datetime.now(), "%I:%M %p")
def set_system_time(newtime):
'''
Set the system time.
:param str newtime:
The time to set. Can be any of the following formats.
- HH:MM:SS AM/PM
- HH:MM AM/PM
- HH:MM:SS (24 hour)
- HH:MM (24 hour)
:return: Returns True if successful. Otherwise False.
:rtype: bool
'''
# Parse time values from new time
time_format = _get_date_time_format(newtime)
dt_obj = datetime.strptime(newtime, time_format)
# Set time using set_system_date_time()
return set_system_date_time(hours=int(dt_obj.strftime('%H')),
minutes=int(dt_obj.strftime('%M')),
seconds=int(dt_obj.strftime('%S')))
def set_system_date_time(years=None,
months=None,
days=None,
hours=None,
minutes=None,
seconds=None):
'''
Set the system date and time. Each argument is an element of the date, but
not required. If an element is not passed, the current system value for that
element will be used. For example, if you don't pass the year, the current
system year will be used. (Used by set_system_date and set_system_time)
:param int years: Years digit, ie: 2015
:param int months: Months digit: 1 - 12
:param int days: Days digit: 1 - 31
:param int hours: Hours digit: 0 - 23
:param int minutes: Minutes digit: 0 - 59
:param int seconds: Seconds digit: 0 - 59
:return: True if successful. Otherwise False.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date_ time 2015 5 12 11 37 53
'''
# Get the current date/time
try:
date_time = win32api.GetLocalTime()
except win32api.error as exc:
(number, context, message) = exc
log.error('Failed to get local time')
log.error('nbr: {0}'.format(number))
log.error('ctx: {0}'.format(context))
log.error('msg: {0}'.format(message))
return False
# Check for passed values. If not passed, use current values
if not years:
years = date_time[0]
if not months:
months = date_time[1]
if not days:
days = date_time[3]
if not hours:
hours = date_time[4]
if not minutes:
minutes = date_time[5]
if not seconds:
seconds = date_time[6]
# Create the time tuple to be passed to SetLocalTime, including day_of_week
time_tuple = (years, months, days, hours, minutes, seconds, 0)
try:
win32api.SetLocalTime(time_tuple)
except win32api.error as exc:
(number, context, message) = exc
log.error('Failed to set local time')
log.error('nbr: {0}'.format(number))
log.error('ctx: {0}'.format(context))
log.error('msg: {0}'.format(message))
return False
return True
def get_system_date():
'''
Get the Windows system date
:return: Returns the system date.
:rtype: str
CLI Example:
.. code-block:: bash
salt '*' system.get_system_date
'''
return datetime.strftime(datetime.now(), "%a %m/%d/%Y")
def set_system_date(newdate):
'''
Set the Windows system date. Use <mm-dd-yy> format for the date.
:param str newdate:
The date to set. Can be any of the following formats
- YYYY-MM-DD
- MM-DD-YYYY
- MM-DD-YY
- MM/DD/YYYY
- MM/DD/YY
- YYYY/MM/DD
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date '03-28-13'
'''
# Parse time values from new time
date_format = _get_date_time_format(newdate)
dt_obj = datetime.strptime(newdate, date_format)
# Set time using set_system_date_time()
return set_system_date_time(years=int(dt_obj.strftime('%Y')),
months=int(dt_obj.strftime('%m')),
days=int(dt_obj.strftime('%d')))
def start_time_service():
'''
Start the Windows time service
:return: True if successful. Otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' system.start_time_service
'''
return __salt__['service.start']('w32time')
def stop_time_service():
'''
Stop the Windows time service
:return: True if successful. Otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' system.stop_time_service
'''
return __salt__['service.stop']('w32time')
|
|
import logging
from pip._vendor.packaging.utils import canonicalize_name
from pip._internal.exceptions import (
DistributionNotFound,
InstallationError,
InstallationSubprocessError,
MetadataInconsistent,
UnsupportedPythonVersion,
UnsupportedWheel,
)
from pip._internal.models.wheel import Wheel
from pip._internal.req.req_install import InstallRequirement
from pip._internal.utils.compatibility_tags import get_supported
from pip._internal.utils.hashes import Hashes
from pip._internal.utils.misc import (
dist_in_site_packages,
dist_in_usersite,
get_installed_distributions,
)
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.virtualenv import running_under_virtualenv
from .base import Constraint
from .candidates import (
AlreadyInstalledCandidate,
EditableCandidate,
ExtrasCandidate,
LinkCandidate,
RequiresPythonCandidate,
)
from .found_candidates import FoundCandidates
from .requirements import (
ExplicitRequirement,
RequiresPythonRequirement,
SpecifierRequirement,
UnsatisfiableRequirement,
)
if MYPY_CHECK_RUNNING:
from typing import (
Dict,
FrozenSet,
Iterable,
Iterator,
List,
Optional,
Sequence,
Set,
Tuple,
TypeVar,
)
from pip._vendor.packaging.specifiers import SpecifierSet
from pip._vendor.packaging.version import _BaseVersion
from pip._vendor.pkg_resources import Distribution
from pip._vendor.resolvelib import ResolutionImpossible
from pip._internal.cache import CacheEntry, WheelCache
from pip._internal.index.package_finder import PackageFinder
from pip._internal.models.link import Link
from pip._internal.operations.prepare import RequirementPreparer
from pip._internal.resolution.base import InstallRequirementProvider
from .base import Candidate, Requirement
from .candidates import BaseCandidate
C = TypeVar("C")
Cache = Dict[Link, C]
VersionCandidates = Dict[_BaseVersion, Candidate]
logger = logging.getLogger(__name__)
class Factory(object):
def __init__(
self,
finder, # type: PackageFinder
preparer, # type: RequirementPreparer
make_install_req, # type: InstallRequirementProvider
wheel_cache, # type: Optional[WheelCache]
use_user_site, # type: bool
force_reinstall, # type: bool
ignore_installed, # type: bool
ignore_requires_python, # type: bool
py_version_info=None, # type: Optional[Tuple[int, ...]]
):
# type: (...) -> None
self._finder = finder
self.preparer = preparer
self._wheel_cache = wheel_cache
self._python_candidate = RequiresPythonCandidate(py_version_info)
self._make_install_req_from_spec = make_install_req
self._use_user_site = use_user_site
self._force_reinstall = force_reinstall
self._ignore_requires_python = ignore_requires_python
self._build_failures = {} # type: Cache[InstallationError]
self._link_candidate_cache = {} # type: Cache[LinkCandidate]
self._editable_candidate_cache = {} # type: Cache[EditableCandidate]
self._installed_candidate_cache = {
} # type: Dict[str, AlreadyInstalledCandidate]
if not ignore_installed:
self._installed_dists = {
canonicalize_name(dist.project_name): dist
for dist in get_installed_distributions(local_only=False)
}
else:
self._installed_dists = {}
@property
def force_reinstall(self):
# type: () -> bool
return self._force_reinstall
def _make_candidate_from_dist(
self,
dist, # type: Distribution
extras, # type: FrozenSet[str]
template, # type: InstallRequirement
):
# type: (...) -> Candidate
try:
base = self._installed_candidate_cache[dist.key]
except KeyError:
base = AlreadyInstalledCandidate(dist, template, factory=self)
self._installed_candidate_cache[dist.key] = base
if extras:
return ExtrasCandidate(base, extras)
return base
def _make_candidate_from_link(
self,
link, # type: Link
extras, # type: FrozenSet[str]
template, # type: InstallRequirement
name, # type: Optional[str]
version, # type: Optional[_BaseVersion]
):
# type: (...) -> Optional[Candidate]
# TODO: Check already installed candidate, and use it if the link and
# editable flag match.
if link in self._build_failures:
# We already tried this candidate before, and it does not build.
# Don't bother trying again.
return None
if template.editable:
if link not in self._editable_candidate_cache:
try:
self._editable_candidate_cache[link] = EditableCandidate(
link, template, factory=self,
name=name, version=version,
)
except (InstallationSubprocessError, MetadataInconsistent) as e:
logger.warning("Discarding %s. %s", link, e)
self._build_failures[link] = e
return None
base = self._editable_candidate_cache[link] # type: BaseCandidate
else:
if link not in self._link_candidate_cache:
try:
self._link_candidate_cache[link] = LinkCandidate(
link, template, factory=self,
name=name, version=version,
)
except (InstallationSubprocessError, MetadataInconsistent) as e:
logger.warning("Discarding %s. %s", link, e)
self._build_failures[link] = e
return None
base = self._link_candidate_cache[link]
if extras:
return ExtrasCandidate(base, extras)
return base
def _iter_found_candidates(
self,
ireqs, # type: Sequence[InstallRequirement]
specifier, # type: SpecifierSet
hashes, # type: Hashes
prefers_installed, # type: bool
):
# type: (...) -> Iterable[Candidate]
if not ireqs:
return ()
# The InstallRequirement implementation requires us to give it a
# "template". Here we just choose the first requirement to represent
# all of them.
# Hopefully the Project model can correct this mismatch in the future.
template = ireqs[0]
name = canonicalize_name(template.req.name)
extras = frozenset() # type: FrozenSet[str]
for ireq in ireqs:
specifier &= ireq.req.specifier
hashes &= ireq.hashes(trust_internet=False)
extras |= frozenset(ireq.extras)
# Get the installed version, if it matches, unless the user
# specified `--force-reinstall`, when we want the version from
# the index instead.
installed_candidate = None
if not self._force_reinstall and name in self._installed_dists:
installed_dist = self._installed_dists[name]
if specifier.contains(installed_dist.version, prereleases=True):
installed_candidate = self._make_candidate_from_dist(
dist=installed_dist,
extras=extras,
template=template,
)
def iter_index_candidates():
# type: () -> Iterator[Candidate]
result = self._finder.find_best_candidate(
project_name=name,
specifier=specifier,
hashes=hashes,
)
icans = list(result.iter_applicable())
# PEP 592: Yanked releases must be ignored unless only yanked
# releases can satisfy the version range. So if this is false,
# all yanked icans need to be skipped.
all_yanked = all(ican.link.is_yanked for ican in icans)
# PackageFinder returns earlier versions first, so we reverse.
versions_found = set() # type: Set[_BaseVersion]
for ican in reversed(icans):
if not all_yanked and ican.link.is_yanked:
continue
if ican.version in versions_found:
continue
candidate = self._make_candidate_from_link(
link=ican.link,
extras=extras,
template=template,
name=name,
version=ican.version,
)
if candidate is None:
continue
yield candidate
versions_found.add(ican.version)
return FoundCandidates(
iter_index_candidates,
installed_candidate,
prefers_installed,
)
def find_candidates(
self,
requirements, # type: Sequence[Requirement]
constraint, # type: Constraint
prefers_installed, # type: bool
):
# type: (...) -> Iterable[Candidate]
explicit_candidates = set() # type: Set[Candidate]
ireqs = [] # type: List[InstallRequirement]
for req in requirements:
cand, ireq = req.get_candidate_lookup()
if cand is not None:
explicit_candidates.add(cand)
if ireq is not None:
ireqs.append(ireq)
# If none of the requirements want an explicit candidate, we can ask
# the finder for candidates.
if not explicit_candidates:
return self._iter_found_candidates(
ireqs,
constraint.specifier,
constraint.hashes,
prefers_installed,
)
return (
c for c in explicit_candidates
if constraint.is_satisfied_by(c)
and all(req.is_satisfied_by(c) for req in requirements)
)
def make_requirement_from_install_req(self, ireq, requested_extras):
# type: (InstallRequirement, Iterable[str]) -> Optional[Requirement]
if not ireq.match_markers(requested_extras):
logger.info(
"Ignoring %s: markers '%s' don't match your environment",
ireq.name, ireq.markers,
)
return None
if not ireq.link:
return SpecifierRequirement(ireq)
if ireq.link.is_wheel:
wheel = Wheel(ireq.link.filename)
if not wheel.supported(self._finder.target_python.get_tags()):
msg = "{} is not a supported wheel on this platform.".format(
wheel.filename,
)
raise UnsupportedWheel(msg)
cand = self._make_candidate_from_link(
ireq.link,
extras=frozenset(ireq.extras),
template=ireq,
name=canonicalize_name(ireq.name) if ireq.name else None,
version=None,
)
if cand is None:
# There's no way we can satisfy a URL requirement if the underlying
# candidate fails to build. An unnamed URL must be user-supplied, so
# we fail eagerly. If the URL is named, an unsatisfiable requirement
# can make the resolver do the right thing, either backtrack (and
# maybe find some other requirement that's buildable) or raise a
# ResolutionImpossible eventually.
if not ireq.name:
raise self._build_failures[ireq.link]
return UnsatisfiableRequirement(canonicalize_name(ireq.name))
return self.make_requirement_from_candidate(cand)
def make_requirement_from_candidate(self, candidate):
# type: (Candidate) -> ExplicitRequirement
return ExplicitRequirement(candidate)
def make_requirement_from_spec(
self,
specifier, # type: str
comes_from, # type: InstallRequirement
requested_extras=(), # type: Iterable[str]
):
# type: (...) -> Optional[Requirement]
ireq = self._make_install_req_from_spec(specifier, comes_from)
return self.make_requirement_from_install_req(ireq, requested_extras)
def make_requires_python_requirement(self, specifier):
# type: (Optional[SpecifierSet]) -> Optional[Requirement]
if self._ignore_requires_python or specifier is None:
return None
return RequiresPythonRequirement(specifier, self._python_candidate)
def get_wheel_cache_entry(self, link, name):
# type: (Link, Optional[str]) -> Optional[CacheEntry]
"""Look up the link in the wheel cache.
If ``preparer.require_hashes`` is True, don't use the wheel cache,
because cached wheels, always built locally, have different hashes
than the files downloaded from the index server and thus throw false
hash mismatches. Furthermore, cached wheels at present have
nondeterministic contents due to file modification times.
"""
if self._wheel_cache is None or self.preparer.require_hashes:
return None
return self._wheel_cache.get_cache_entry(
link=link,
package_name=name,
supported_tags=get_supported(),
)
def get_dist_to_uninstall(self, candidate):
# type: (Candidate) -> Optional[Distribution]
# TODO: Are there more cases this needs to return True? Editable?
dist = self._installed_dists.get(candidate.name)
if dist is None: # Not installed, no uninstallation required.
return None
# We're installing into global site. The current installation must
# be uninstalled, no matter it's in global or user site, because the
# user site installation has precedence over global.
if not self._use_user_site:
return dist
# We're installing into user site. Remove the user site installation.
if dist_in_usersite(dist):
return dist
# We're installing into user site, but the installed incompatible
# package is in global site. We can't uninstall that, and would let
# the new user installation to "shadow" it. But shadowing won't work
# in virtual environments, so we error out.
if running_under_virtualenv() and dist_in_site_packages(dist):
raise InstallationError(
"Will not install to the user site because it will "
"lack sys.path precedence to {} in {}".format(
dist.project_name, dist.location,
)
)
return None
def _report_requires_python_error(
self,
requirement, # type: RequiresPythonRequirement
template, # type: Candidate
):
# type: (...) -> UnsupportedPythonVersion
message_format = (
"Package {package!r} requires a different Python: "
"{version} not in {specifier!r}"
)
message = message_format.format(
package=template.name,
version=self._python_candidate.version,
specifier=str(requirement.specifier),
)
return UnsupportedPythonVersion(message)
def get_installation_error(self, e):
# type: (ResolutionImpossible) -> InstallationError
assert e.causes, "Installation error reported with no cause"
# If one of the things we can't solve is "we need Python X.Y",
# that is what we report.
for cause in e.causes:
if isinstance(cause.requirement, RequiresPythonRequirement):
return self._report_requires_python_error(
cause.requirement,
cause.parent,
)
# Otherwise, we have a set of causes which can't all be satisfied
# at once.
# The simplest case is when we have *one* cause that can't be
# satisfied. We just report that case.
if len(e.causes) == 1:
req, parent = e.causes[0]
if parent is None:
req_disp = str(req)
else:
req_disp = '{} (from {})'.format(req, parent.name)
logger.critical(
"Could not find a version that satisfies the requirement %s",
req_disp,
)
return DistributionNotFound(
'No matching distribution found for {}'.format(req)
)
# OK, we now have a list of requirements that can't all be
# satisfied at once.
# A couple of formatting helpers
def text_join(parts):
# type: (List[str]) -> str
if len(parts) == 1:
return parts[0]
return ", ".join(parts[:-1]) + " and " + parts[-1]
def describe_trigger(parent):
# type: (Candidate) -> str
ireq = parent.get_install_requirement()
if not ireq or not ireq.comes_from:
return "{}=={}".format(parent.name, parent.version)
if isinstance(ireq.comes_from, InstallRequirement):
return str(ireq.comes_from.name)
return str(ireq.comes_from)
triggers = set()
for req, parent in e.causes:
if parent is None:
# This is a root requirement, so we can report it directly
trigger = req.format_for_error()
else:
trigger = describe_trigger(parent)
triggers.add(trigger)
if triggers:
info = text_join(sorted(triggers))
else:
info = "the requested packages"
msg = "Cannot install {} because these package versions " \
"have conflicting dependencies.".format(info)
logger.critical(msg)
msg = "\nThe conflict is caused by:"
for req, parent in e.causes:
msg = msg + "\n "
if parent:
msg = msg + "{} {} depends on ".format(
parent.name,
parent.version
)
else:
msg = msg + "The user requested "
msg = msg + req.format_for_error()
msg = msg + "\n\n" + \
"To fix this you could try to:\n" + \
"1. loosen the range of package versions you've specified\n" + \
"2. remove package versions to allow pip attempt to solve " + \
"the dependency conflict\n"
logger.info(msg)
return DistributionNotFound(
"ResolutionImpossible: for help visit "
"https://pip.pypa.io/en/latest/user_guide/"
"#fixing-conflicting-dependencies"
)
|
|
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HttpClients in this module use httplib to make HTTP requests.
This module make HTTP requests based on httplib, but there are environments
in which an httplib based approach will not work (if running in Google App
Engine for example). In those cases, higher level classes (like AtomService
and GDataService) can swap out the HttpClient to transparently use a
different mechanism for making HTTP requests.
HttpClient: Contains a request method which performs an HTTP call to the
server.
ProxiedHttpClient: Contains a request method which connects to a proxy using
settings stored in operating system environment variables then
performs an HTTP call to the endpoint server.
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import types
import os
import http.client
import atom.url
import atom.http_interface
import socket
import base64
import atom.http_core
ssl_imported = False
ssl = None
try:
import ssl
ssl_imported = True
except ImportError:
pass
class ProxyError(atom.http_interface.Error):
pass
class TestConfigurationError(Exception):
pass
DEFAULT_CONTENT_TYPE = 'application/atom+xml'
class HttpClient(atom.http_interface.GenericHttpClient):
# Added to allow old v1 HttpClient objects to use the new
# http_code.HttpClient. Used in unit tests to inject a mock client.
v2_http_client = None
def __init__(self, headers=None):
self.debug = False
self.headers = headers or {}
def request(self, operation, url, data=None, headers=None):
"""Performs an HTTP call to the server, supports GET, POST, PUT, and
DELETE.
Usage example, perform and HTTP GET on http://www.google.com/:
import atom.http
client = atom.http.HttpClient()
http_response = client.request('GET', 'http://www.google.com/')
Args:
operation: str The HTTP operation to be performed. This is usually one
of 'GET', 'POST', 'PUT', or 'DELETE'
data: filestream, list of parts, or other object which can be converted
to a string. Should be set to None when performing a GET or DELETE.
If data is a file-like object which can be read, this method will
read a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be
evaluated and sent.
url: The full URL to which the request should be sent. Can be a string
or atom.url.Url.
headers: dict of strings. HTTP headers which should be sent
in the request.
"""
all_headers = self.headers.copy()
if headers:
all_headers.update(headers)
# If the list of headers does not include a Content-Length, attempt to
# calculate it based on the data object.
if data and 'Content-Length' not in all_headers:
if isinstance(data, str):
all_headers['Content-Length'] = str(len(data))
else:
raise atom.http_interface.ContentLengthRequired('Unable to calculate '
'the length of the data parameter. Specify a value for '
'Content-Length')
# Set the content type to the default value if none was set.
if 'Content-Type' not in all_headers:
all_headers['Content-Type'] = DEFAULT_CONTENT_TYPE
if self.v2_http_client is not None:
http_request = atom.http_core.HttpRequest(method=operation)
atom.http_core.Uri.parse_uri(str(url)).modify_request(http_request)
http_request.headers = all_headers
if data:
http_request._body_parts.append(data)
return self.v2_http_client.request(http_request=http_request)
if not isinstance(url, atom.url.Url):
if isinstance(url, str):
url = atom.url.parse_url(url)
else:
raise atom.http_interface.UnparsableUrlObject('Unable to parse url '
'parameter because it was not a string or atom.url.Url')
connection = self._prepare_connection(url, all_headers)
if self.debug:
connection.debuglevel = 1
connection.putrequest(operation, self._get_access_url(url),
skip_host=True)
if url.port is not None:
connection.putheader('Host', '%s:%s' % (url.host, url.port))
else:
connection.putheader('Host', url.host)
# Overcome a bug in Python 2.4 and 2.5
# httplib.HTTPConnection.putrequest adding
# HTTP request header 'Host: www.google.com:443' instead of
# 'Host: www.google.com', and thus resulting the error message
# 'Token invalid - AuthSub token has wrong scope' in the HTTP response.
if (url.protocol == 'https' and int(url.port or 443) == 443 and
hasattr(connection, '_buffer') and
isinstance(connection._buffer, list)):
header_line = 'Host: %s:443' % url.host
replacement_header_line = 'Host: %s' % url.host
try:
connection._buffer[connection._buffer.index(header_line)] = (
replacement_header_line)
except ValueError: # header_line missing from connection._buffer
pass
# Send the HTTP headers.
for header_name in all_headers:
connection.putheader(header_name, all_headers[header_name])
connection.endheaders()
# If there is data, send it in the request.
if data:
if isinstance(data, list):
for data_part in data:
_send_data_part(data_part, connection)
else:
_send_data_part(data, connection)
# Return the HTTP Response from the server.
return connection.getresponse()
def _prepare_connection(self, url, headers):
if not isinstance(url, atom.url.Url):
if isinstance(url, str):
url = atom.url.parse_url(url)
else:
raise atom.http_interface.UnparsableUrlObject('Unable to parse url '
'parameter because it was not a string or atom.url.Url')
if url.protocol == 'https':
if not url.port:
return http.client.HTTPSConnection(url.host)
return http.client.HTTPSConnection(url.host, int(url.port))
else:
if not url.port:
return http.client.HTTPConnection(url.host)
return http.client.HTTPConnection(url.host, int(url.port))
def _get_access_url(self, url):
return url.to_string()
class ProxiedHttpClient(HttpClient):
"""Performs an HTTP request through a proxy.
The proxy settings are obtained from enviroment variables. The URL of the
proxy server is assumed to be stored in the environment variables
'https_proxy' and 'http_proxy' respectively. If the proxy server requires
a Basic Auth authorization header, the username and password are expected to
be in the 'proxy-username' or 'proxy_username' variable and the
'proxy-password' or 'proxy_password' variable, or in 'http_proxy' or
'https_proxy' as "protocol://[username:password@]host:port".
After connecting to the proxy server, the request is completed as in
HttpClient.request.
"""
def _prepare_connection(self, url, headers):
proxy_settings = os.environ.get('%s_proxy' % url.protocol)
if not proxy_settings:
# The request was HTTP or HTTPS, but there was no appropriate proxy set.
return HttpClient._prepare_connection(self, url, headers)
else:
proxy_auth = _get_proxy_auth(proxy_settings)
proxy_netloc = _get_proxy_net_location(proxy_settings)
if url.protocol == 'https':
# Set any proxy auth headers
if proxy_auth:
proxy_auth = 'Proxy-authorization: %s' % proxy_auth
# Construct the proxy connect command.
port = url.port
if not port:
port = '443'
proxy_connect = 'CONNECT %s:%s HTTP/1.0\r\n' % (url.host, port)
# Set the user agent to send to the proxy
if headers and 'User-Agent' in headers:
user_agent = 'User-Agent: %s\r\n' % (headers['User-Agent'])
else:
user_agent = 'User-Agent: python\r\n'
proxy_pieces = '%s%s%s\r\n' % (proxy_connect, proxy_auth, user_agent)
# Find the proxy host and port.
proxy_url = atom.url.parse_url(proxy_netloc)
if not proxy_url.port:
proxy_url.port = '80'
# Connect to the proxy server, very simple recv and error checking
p_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
p_sock.connect((proxy_url.host, int(proxy_url.port)))
p_sock.sendall(proxy_pieces)
response = ''
# Wait for the full response.
while response.find("\r\n\r\n") == -1:
response += p_sock.recv(8192)
p_status = response.split()[1]
if p_status != str(200):
raise ProxyError('Error status=%s' % str(p_status))
# Trivial setup for ssl socket.
sslobj = None
if ssl_imported:
sslobj = ssl.wrap_socket(p_sock, None, None)
else:
sock_ssl = socket.ssl(p_sock, None, None)
sslobj = http.client.FakeSocket(p_sock, sock_ssl)
# Initalize httplib and replace with the proxy socket.
connection = http.client.HTTPConnection(proxy_url.host)
connection.sock = sslobj
return connection
else:
# If protocol was not https.
# Find the proxy host and port.
proxy_url = atom.url.parse_url(proxy_netloc)
if not proxy_url.port:
proxy_url.port = '80'
if proxy_auth:
headers['Proxy-Authorization'] = proxy_auth.strip()
return http.client.HTTPConnection(proxy_url.host, int(proxy_url.port))
def _get_access_url(self, url):
return url.to_string()
def _get_proxy_auth(proxy_settings):
"""Returns proxy authentication string for header.
Will check environment variables for proxy authentication info, starting with
proxy(_/-)username and proxy(_/-)password before checking the given
proxy_settings for a [protocol://]username:password@host[:port] string.
Args:
proxy_settings: String from http_proxy or https_proxy environment variable.
Returns:
Authentication string for proxy, or empty string if no proxy username was
found.
"""
proxy_username = None
proxy_password = None
proxy_username = os.environ.get('proxy-username')
if not proxy_username:
proxy_username = os.environ.get('proxy_username')
proxy_password = os.environ.get('proxy-password')
if not proxy_password:
proxy_password = os.environ.get('proxy_password')
if not proxy_username:
if '@' in proxy_settings:
protocol_and_proxy_auth = proxy_settings.split('@')[0].split(':')
if len(protocol_and_proxy_auth) == 3:
# 3 elements means we have [<protocol>, //<user>, <password>]
proxy_username = protocol_and_proxy_auth[1].lstrip('/')
proxy_password = protocol_and_proxy_auth[2]
elif len(protocol_and_proxy_auth) == 2:
# 2 elements means we have [<user>, <password>]
proxy_username = protocol_and_proxy_auth[0]
proxy_password = protocol_and_proxy_auth[1]
if proxy_username:
user_auth = base64.encodestring('%s:%s' % (proxy_username,
proxy_password))
return 'Basic %s\r\n' % (user_auth.strip())
else:
return ''
def _get_proxy_net_location(proxy_settings):
"""Returns proxy host and port.
Args:
proxy_settings: String from http_proxy or https_proxy environment variable.
Must be in the form of protocol://[username:password@]host:port
Returns:
String in the form of protocol://host:port
"""
if '@' in proxy_settings:
protocol = proxy_settings.split(':')[0]
netloc = proxy_settings.split('@')[1]
return '%s://%s' % (protocol, netloc)
else:
return proxy_settings
def _send_data_part(data, connection):
# Check to see if data is a file-like object that has a read method.
if hasattr(data, 'read'):
# Read the file and send it a chunk at a time.
while 1:
binarydata = data.read(100000)
if binarydata == '': break
connection.send(binarydata)
return
else:
# The data object was not a file.
# Try to convert to a string bytes and send the data.
connection.send(bytes(data, "UTF-8"))
return
|
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import wave
from glob import glob
import os
import pyee
from os.path import dirname, join
from speech_recognition import AudioSource
from mycroft.client.speech.listener import RecognizerLoop
from mycroft.client.speech.mic import ResponsiveRecognizer
def to_percent(val):
return "{0:.2f}".format(100.0 * val) + "%"
class FileStream:
MIN_S_TO_DEBUG = 5.0
# How long between printing debug info to screen
UPDATE_INTERVAL_S = 1.0
def __init__(self, file_name):
self.file = wave.open(file_name, 'rb')
self.size = self.file.getnframes()
self.sample_rate = self.file.getframerate()
self.sample_width = self.file.getsampwidth()
self.last_update_time = 0.0
self.total_s = self.size / self.sample_rate / self.sample_width
if self.total_s > self.MIN_S_TO_DEBUG:
self.debug = True
else:
self.debug = False
def calc_progress(self):
return float(self.file.tell()) / self.size
def read(self, chunk_size):
progress = self.calc_progress()
if progress == 1.0:
raise EOFError
if self.debug:
cur_time = time.time()
dt = cur_time - self.last_update_time
if dt > self.UPDATE_INTERVAL_S:
self.last_update_time = cur_time
print(to_percent(progress))
return self.file.readframes(chunk_size)
def close(self):
self.file.close()
class FileMockMicrophone(AudioSource):
def __init__(self, file_name):
self.stream = FileStream(file_name)
self.SAMPLE_RATE = self.stream.sample_rate
self.SAMPLE_WIDTH = self.stream.sample_width
self.CHUNK = 1024
def close(self):
self.stream.close()
class AudioTester:
def __init__(self, samp_rate):
print() # Pad debug messages
self.ww_recognizer = RecognizerLoop().create_mycroft_recognizer(
samp_rate, 'en-us')
self.listener = ResponsiveRecognizer(self.ww_recognizer)
print()
def test_audio(self, file_name):
source = FileMockMicrophone(file_name)
ee = pyee.EventEmitter()
class SharedData:
times_found = 0
def on_found_wake_word():
SharedData.times_found += 1
ee.on('recognizer_loop:record_begin', on_found_wake_word)
try:
while True:
self.listener.listen(source, ee)
except EOFError:
pass
return SharedData.times_found
class Color:
BOLD = '\033[1m'
NORMAL = '\033[0m'
GREEN = '\033[92m'
RED = '\033[91m'
def bold_str(val):
return Color.BOLD + str(val) + Color.NORMAL
def get_root_dir():
return dirname(dirname(__file__))
def get_file_names(folder):
query = join(folder, '*.wav')
root_dir = get_root_dir()
full_path = join(root_dir, query)
file_names = sorted(glob(full_path))
if len(file_names) < 1:
raise IOError
return file_names
def test_audio_files(tester, file_names, on_file_finish):
num_found = 0
for file_name in file_names:
short_name = os.path.basename(file_name)
times_found = tester.test_audio(file_name)
num_found += times_found
on_file_finish(short_name, times_found)
return num_found
def file_frame_rate(file_name):
wf = wave.open(file_name, 'rb')
frame_rate = wf.getframerate()
wf.close()
return frame_rate
def print_ww_found_status(word, short_name):
print("Wake word " + bold_str(word) + " - " + short_name)
def test_false_negative(directory):
file_names = get_file_names(directory)
# Grab audio format info from first file
tester = AudioTester(file_frame_rate(file_names[0]))
def on_file_finish(short_name, times_found):
not_found_str = Color.RED + "Not found"
found_str = Color.GREEN + "Detected "
status_str = not_found_str if times_found == 0 else found_str
print_ww_found_status(status_str, short_name)
num_found = test_audio_files(tester, file_names, on_file_finish)
total = len(file_names)
print
print("Found " + bold_str(num_found) + " out of " + bold_str(total))
print(bold_str(to_percent(float(num_found) / total)) + " accuracy.")
print
def test_false_positive(directory):
file_names = get_file_names(directory)
# Grab audio format info from first file
tester = AudioTester(file_frame_rate(file_names[0]))
def on_file_finish(short_name, times_found):
not_found_str = Color.GREEN + "Not found"
found_str = Color.RED + "Detected "
status_str = not_found_str if times_found == 0 else found_str
print_ww_found_status(status_str, short_name)
num_found = test_audio_files(tester, file_names, on_file_finish)
total = len(file_names)
print
print("Found " + bold_str(num_found) + " false positives")
print("in " + bold_str(str(total)) + " files")
print
def run_test():
directory = join('audio-accuracy-test', 'data')
false_neg_dir = join(directory, 'with_wake_word', 'query_after')
false_pos_dir = join(directory, 'without_wake_word')
try:
test_false_negative(false_neg_dir)
except IOError:
print(bold_str("Warning: No wav files found in " + false_neg_dir))
try:
test_false_positive(false_pos_dir)
except IOError:
print(bold_str("Warning: No wav files found in " + false_pos_dir))
print("Complete!")
if __name__ == "__main__":
run_test()
|
|
import unittest
import numpy as np
from . import defHeaders
from pycqed.analysis.tools import data_manipulation as dm_tools
CBox = None
class CBox_tests(unittest.TestCase):
'''
This is a test suite for testing the QuTech_ControlBox Instrument.
It is designed to provide a test function for each function as well as for
general things such as testing if the com s are working.
'''
@classmethod
def setUpClass(self):
self.CBox = CBox
def test_firmware_version(self):
v = CBox.get('firmware_version')
self.assertTrue(int(v[1]) == 2) # major version
self.assertTrue(int(int(v[3:5])) > 13) # minor version
def test_setting_mode(self):
for i in range(6):
self.CBox.set('acquisition_mode', i)
self.assertEqual(self.CBox.get('acquisition_mode'),
defHeaders.acquisition_modes[i])
self.CBox.set('acquisition_mode', 0)
self.assertEqual(self.CBox.get('acquisition_mode'),
defHeaders.acquisition_modes[0])
for i in range(2):
self.CBox.set('run_mode', i)
self.assertEqual(self.CBox.get('run_mode'),
defHeaders.run_modes[i])
self.CBox.set('run_mode', 0)
for j in range(3):
for i in range(3):
self.CBox.set('AWG{}_mode'.format(j), i)
self.assertEqual(self.CBox.get('AWG{}_mode'.format(j)),
defHeaders.awg_modes[i])
self.CBox.set('AWG{}_mode'.format(j), 0)
def test_codec(self):
# codec is CBox.c
encoded_128 = self.CBox.c.encode_byte(128, 7)
self.assertTrue(type(encoded_128) == bytes)
self.assertTrue(len(encoded_128) == 2)
self.assertTrue(bytes_to_binary(encoded_128) ==
'1000000110000000')
encoded_128 = self.CBox.c.encode_byte(128, 4)
self.assertTrue(type(encoded_128) == bytes)
self.assertTrue(len(encoded_128) == 2)
self.assertTrue(bytes_to_binary(encoded_128) ==
'1000100010000000')
# Encoding using 4 bits per byte
encoded_546815 = self.CBox.c.encode_byte(546815, 4, 6)
self.assertEqual(type(encoded_546815), bytes)
self.assertEqual(len(encoded_546815), 6)
sub_str = bytes([encoded_546815[0], encoded_546815[-1]])
self.assertTrue(bin(sub_str == '1000100010001111'))
self.assertEqual(CBox.c.decode_byte(encoded_546815, 4), 546815)
# encoding using 7 bits per byte
encoded_546815 = self.CBox.c.encode_byte(546815, 7, 4)
self.assertEqual(CBox.c.decode_byte(encoded_546815, 7), 546815)
# encoding using 7 bits per byte
encoded_neg235 = self.CBox.c.encode_byte(-235, 7, 4)
self.assertEqual(CBox.c.decode_byte(encoded_neg235, 7), -235)
# Encoding and decoding array
x = np.random.randint(0, 2565, 20)
data_bytes = CBox.c.encode_array(x, 7, 2)
message = CBox.c.create_message(data_bytes=data_bytes)
x_dec = CBox.c.decode_message(message, 7, 2)
self.assertEqual(x.all(), x_dec.all())
def test_sig_del(self):
s_del = self.CBox.get('signal_delay')
self.CBox.set('signal_delay', 0)
self.assertEqual(self.CBox.get('signal_delay'), 0)
self.CBox.set('signal_delay', 124)
self.assertEqual(self.CBox.get('signal_delay'), 124)
self.CBox.set('signal_delay', s_del)
def test_integration_length(self):
s_del = self.CBox.get('integration_length')
self.CBox.set('integration_length', 50)
self.assertEqual(self.CBox.get('integration_length'), 50)
self.CBox.set('integration_length', 124)
self.assertEqual(self.CBox.get('integration_length'), 124)
self.CBox.set('integration_length', s_del)
def test_set_signal_threshold(self):
for i in range(2):
t = self.CBox.get('sig{}_threshold_line'.format(i))
self.CBox.set('sig{}_threshold_line'.format(i), 124)
self.assertEqual(
self.CBox.get('sig{}_threshold_line'.format(i)), 124)
self.CBox.set('sig{}_threshold_line'.format(i), t)
def test_adc_offset(self):
offs = self.CBox.get('adc_offset')
self.CBox.set('adc_offset', 123)
self.assertEqual(self.CBox.get('adc_offset'), 123)
self.CBox.set('adc_offset', -123)
self.assertEqual(self.CBox.get('adc_offset'), -123)
self.CBox.set('adc_offset', offs)
def test_dac_offset(self):
for i in range(3):
for j in range(2):
initial_val = self.CBox.get('AWG{}_dac{}_offset'.format(i, j))
self.CBox.set('AWG{}_dac{}_offset'.format(i, j), 200)
self.assertEqual(
self.CBox.get('AWG{}_dac{}_offset'.format(i, j)), 200)
self.CBox.set('AWG{}_dac{}_offset'.format(i, j), initial_val)
def test_tape(self):
for i in range(3):
tape = [2, 4, 5, 1, 0, 3]
initial_val = self.CBox.get('AWG{}_tape'.format(i))
self.CBox.set('AWG{}_tape'.format(i), tape)
# TODO: This part of test should be rewritten
# stored_conventional_tape = self.CBox.get('AWG{}_tape'.format(i))
# conventional_tape = [sample/2 for sample in
# stored_conventional_tape]
# self.assertEqual(conventional_tape, tape)
self.CBox.set('AWG{}_tape'.format(i), initial_val)
def test_log_length(self):
initial_val = self.CBox.get('log_length')
self.CBox.set('log_length', 2)
self.assertEqual(self.CBox.get('log_length'), 2)
self.CBox.set('log_length', 7500)
self.assertEqual(self.CBox.get('log_length'), 7500)
self.CBox.set('log_length', initial_val)
def test_lin_trans_coeffs(self):
initial_val = self.CBox.get('lin_trans_coeffs')
self.CBox.set('lin_trans_coeffs', [1, .4, 0, 1.33])
self.assertEqual(self.CBox.get('lin_trans_coeffs'), [1, .4, 0, 1.33])
self.CBox.set('lin_trans_coeffs', [1, .4, .2, 1])
self.assertEqual(self.CBox.get('lin_trans_coeffs'), [1, .4, .2, 1])
self.CBox.set('lin_trans_coeffs', initial_val)
def test_averaging_parameters(self):
initial_val = self.CBox.get('nr_samples')
self.CBox.set('nr_samples', 2)
self.assertEqual(self.CBox.get('nr_samples'), 2)
self.CBox.set('nr_samples', 1564)
self.assertEqual(self.CBox.get('nr_samples'), 1564)
self.CBox.set('nr_samples', initial_val)
initial_val = self.CBox.get('nr_averages')
self.CBox.set('nr_averages', 2)
self.assertEqual(self.CBox.get('nr_averages'), 2)
self.CBox.set('nr_averages', 2**15)
self.assertEqual(self.CBox.get('nr_averages'), 2**15)
self.CBox.set('nr_averages', initial_val)
def test_measurement_timeout(self):
initial_val = self.CBox.get('measurement_timeout')
self.CBox.set('measurement_timeout', 123)
self.assertEqual(self.CBox.get('measurement_timeout'), 123)
self.CBox.set('measurement_timeout', -123)
self.assertEqual(self.CBox.get('measurement_timeout'), -123)
self.CBox.set('measurement_timeout', initial_val)
def test_Integration_logging(self):
'''
Test for mode 1 integration logs. Only tests on length of data
'''
log_length = 8000
self.CBox.set('acquisition_mode', 0)
self.CBox.set('log_length', log_length)
self.CBox.set('signal_delay', 20)
self.CBox.set('integration_length', 255)
self.CBox.set('nr_averages', 4)
self.CBox.set('nr_samples', 10)
self.CBox.set('adc_offset', 1)
weights0 = np.ones(512)
weights1 = np.ones(512)
self.CBox.set('sig0_integration_weights', weights0)
self.CBox.set('sig1_integration_weights', weights1)
self.CBox.set('acquisition_mode', 1)
[InputAvgRes0, InputAvgRes1] = self.CBox.get_integration_log_results()
self.CBox.set('acquisition_mode', 0)
self.assertEqual(len(InputAvgRes0), log_length)
self.assertEqual(len(InputAvgRes1), log_length)
def test_state_logging_and_counters(self):
'''
Test uses mode 1 integration logging. Checks if the results
for the integration shots, states and state counters produce
the same results for a given threshold.
Does not do this using a dedicated sequence.
'''
log_length = 50
self.CBox.set('acquisition_mode', 0)
self.CBox.set('log_length', log_length)
weights0 = np.ones(512)
weights1 = np.ones(512)
self.CBox.set('sig0_integration_weights', weights0)
self.CBox.set('sig1_integration_weights', weights1)
self.CBox.set('acquisition_mode', 1)
[IntLog0, IntLog1] = self.CBox.get_integration_log_results()
self.CBox.set('acquisition_mode', 0)
threshold = int(np.mean(IntLog0))
self.CBox.sig0_threshold_line.set(threshold)
self.CBox.sig1_threshold_line.set(threshold)
self.CBox.set('acquisition_mode', 1)
log = self.CBox.get_integration_log_results()
counters = self.CBox.get_qubit_state_log_counters()
self.CBox.set('acquisition_mode', 0)
digi_shots = dm_tools.digitize(
log, threshold=CBox.sig0_threshold_line.get())
software_err_fracs_0 = dm_tools.count_error_fractions(digi_shots[0])
software_err_fracs_1 = dm_tools.count_error_fractions(digi_shots[1])
# Test if software analysis of the counters and CBox counters are the
# same
self.assertTrue((software_err_fracs_0 == counters[0]).all())
self.assertTrue((software_err_fracs_1 == counters[1]).all())
def test_integration_average_mode(self):
self.CBox.set('acquisition_mode', 0)
NoSamples = 60
weights0 = np.ones(512) * 1
weights1 = np.ones(512) * 0
self.CBox.set('sig0_integration_weights', weights0)
self.CBox.set('sig1_integration_weights', weights1)
self.CBox.set('nr_averages', 4)
self.CBox.set('nr_samples', NoSamples)
self.CBox.set('acquisition_mode', 4)
[InputAvgRes0, IntAvgRes1] = self.CBox.get_integrated_avg_results()
self.CBox.set('acquisition_mode', 0)
# Test signal lengths set correctly
self.assertEqual(len(InputAvgRes0), NoSamples)
# Test if setting weights to zero functions correctly
self.assertTrue((IntAvgRes1 == np.zeros(NoSamples)).all())
weights1 = np.ones(512) * 1
self.CBox.set('sig1_integration_weights', weights1)
self.CBox.set('lin_trans_coeffs', [0, 0, 0, 1])
self.CBox.set('acquisition_mode', 4)
[InputAvgRes0, IntAvgRes1] = self.CBox.get_integrated_avg_results()
self.CBox.set('acquisition_mode', 0)
# Test if setting lin trans coeff to zero functions correctly
self.assertTrue((InputAvgRes0 == np.zeros(NoSamples)).all())
self.assertFalse((IntAvgRes1 == np.zeros(NoSamples)).all())
# def test_streaming_mode(self):
# self.CBox.set('acquisition_mode', 0)
# NoSamples = 1e3
# self.CBox.set('acquisition_mode', 5)
# data = self.CBox.get_streaming_results(NoSamples)
# self.CBox.set('acquisition_mode', 0)
# self.assertTrue(len(data[0]) > NoSamples)
# self.assertTrue(len(data[0]) == len(data[1]))
def test_set_awg_lookuptable(self):
length = np.random.randint(1, 128)
random_lut = np.random.randint(-1000, 1000, length)
self.assertTrue(self.CBox.set_awg_lookuptable(0, 4, 0, random_lut))
def test_DacEnable(self):
for awg in range(3):
self.assertTrue(self.CBox.enable_dac(awg, 0, True))
self.assertTrue(self.CBox.enable_dac(awg, 0, False))
self.assertTrue(self.CBox.enable_dac(awg, 1, True))
self.assertTrue(self.CBox.enable_dac(awg, 1, False))
def test_DacOffset(self):
for awg in range(3):
self.assertTrue(self.CBox.set_dac_offset(awg, 0, True))
self.assertTrue(self.CBox.set_dac_offset(awg, 0, False))
self.assertTrue(self.CBox.set_dac_offset(awg, 1, True))
self.assertTrue(self.CBox.set_dac_offset(awg, 1, False))
def test_input_avg_mode(self):
self.CBox.set('acquisition_mode', 0)
NoSamples = 250
self.CBox.set('nr_samples', NoSamples)
self.CBox.set('nr_averages', 2)
self.assertEqual(self.CBox.get('nr_samples'), NoSamples)
self.CBox.set('acquisition_mode', 3)
[InputAvgRes0, InputAvgRes1] = self.CBox.get_input_avg_results()
self.CBox.set('acquisition_mode', 0)
# Only checks on lenght of signal as test
# No check if averaging or signal delay works
self.assertEqual(len(InputAvgRes0), NoSamples)
self.assertEqual(len(InputAvgRes1), NoSamples)
# def test_SigDelay(self):
# val = np.random.randint(0, 256)
# stat = self.CBox.set_signal_delay(val)
# self.assertTrue(stat)
# self.assertEqual(val, self.CBox.get_signal_delay(val))
# def test_IntegrationLength(self):
# val = np.random.randint(0, 255)
# # Real bound is 512 but currently exception in protocol
# stat = self.CBox.set_integration_length(val)
# self.assertTrue(stat)
# self.assertEqual(val, self.CBox.get_integration_length(val))
def bytes_to_binary(bytestring):
'''
used as a convenience function in codec testing
'''
s = ''
for n in bytestring:
s += ''.join(str((n & (1 << i)) and 1) for i in reversed(range(8)))
return s
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(CBox_tests)
unittest.TextTestRunner(verbosity=2).run(suite)
|
|
# encoding: utf-8
import curses
import _curses
import weakref
from .editloop import FormDefaultEditLoop, FormNewEditLoop
from .screen import ScreenArea
from ..widget import button
from ..widget import widget
#import Menu
from .. import util_viewhelp
from ..globals import DISABLE_RESIZE_SYSTEM
from .. import global_options
from ..utils import InputHandler, _LinePrinter
class _FormBase(ScreenArea,
InputHandler,
_LinePrinter):
BLANK_COLUMNS_RIGHT = 2
BLANK_LINES_BASE = 2
OK_BUTTON_TEXT = 'OK'
OK_BUTTON_BR_OFFSET = (2, 6)
OKBUTTON_TYPE = button.MiniButton
DEFAULT_X_OFFSET = 2
#Preserve cursor location between displays?
PRESERVE_SELECTED_WIDGET_DEFAULT = False
FRAMED = True
ALLOW_RESIZE = True
FIX_MINIMUM_SIZE_WHEN_CREATED = True
def __init__(self, name=None, parent_app=None, framed=None, help=None,
color='FORMDEFAULT', widget_list=None, cycle_widgets=False,
*args, **kwargs):
super(_FormBase, self).__init__(*args, **kwargs)
self.preserve_selected_widget = self.__class__.PRESERVE_SELECTED_WIDGET_DEFAULT
if parent_app:
try:
self.parent_app = weakref.proxy(parent_app)
except:
self.parent_app = parent_app
try:
self.keypress_timeout = self.parent_app.keypress_timeout_default
except AttributeError:
pass
if framed is None:
self.framed = self.__class__.FRAMED
else:
self.framed = framed
self.name = name
self.editing = False
## OLD MENU CODE REMOVED self.__menus = []
self._clear_all_widgets()
self.help = help
self.color = color
self.cycle_widgets = cycle_widgets
self.set_up_handlers()
self.set_up_exit_condition_handlers()
if hasattr(self, 'initialWidgets'):
self.create_widgets_from_list(self.__class__.initialWidgets)
if widget_list:
self.create_widgets_from_list(widget_list)
self.create()
if self.FIX_MINIMUM_SIZE_WHEN_CREATED:
self.min_l = self.lines
self.min_c = self.columns
def resize(self):
pass
def _clear_all_widgets(self, ):
self._widgets__ = []
self._widgets_by_id = {}
self._next_w_id = 0
self.nextrely = self.DEFAULT_NEXTRELY
self.nextrelx = self.DEFAULT_X_OFFSET
self.editw = 0 # Index of widget to edit.
def create_widgets_from_list(self, widget_list, **kwargs):
#This code is currently experimental, and the API may change in future
#releases
# (npyscreen.TextBox, {'rely': 2, 'relx': 7, 'editable': False})
for line in widget_list:
w_type = line[0]
kwargs = line[1]
self.add_widget(w_type, **kwargs)
def set_value(self, value):
self.value = value
for _w in self._widgets__:
if hasattr(_w, 'when_parent_changes_value'):
_w.when_parent_changes_value()
def _resize(self, *args):
global DISABLE_RESIZE_SYSTEM
if DISABLE_RESIZE_SYSTEM:
return False
if not self.ALLOW_RESIZE:
return False
if hasattr(self, 'parent_app'):
self.parent_app.resize()
self._create_screen()
self.resize()
for w in self._widgets__:
w._resize()
self.DISPLAY()
def create(self):
"""
Programmers should over-ride this in derived classes, creating widgets
here.
"""
pass
def set_up_handlers(self):
self.complex_handlers = []
self.handlers = {curses.KEY_F1: self.h_display_help,
"KEY_F(1)": self.h_display_help,
"^O": self.h_display_help,
"^L": self.h_display,
curses.KEY_RESIZE: self._resize}
def set_up_exit_condition_handlers(self):
# What happens when widgets exit?
# each widget will set it's how_exited value: this should
# be used to look up the following table.
self.how_exited_handers = {
widget.EXITED_DOWN: self.find_next_editable,
widget.EXITED_RIGHT: self.find_next_editable,
widget.EXITED_UP: self.find_previous_editable,
widget.EXITED_LEFT: self.find_previous_editable,
widget.EXITED_ESCAPE: self.do_nothing,
True: self.find_next_editable, # A default value
widget.EXITED_MOUSE: self.get_and_use_mouse_event,
False: self.do_nothing,
None: self.do_nothing,
}
def handle_exiting_widgets(self, condition):
self.how_exited_handers[condition]()
def do_nothing(self, *args, **kwargs):
pass
def exit_editing(self, *args, **kwargs):
self.editing = False
try:
self._widgets__[self.editw].entry_widget.editing = False
except:
pass
try:
self._widgets__[self.editw].editing = False
except:
pass
def adjust_widgets(self):
"""
This function is called when editing any widget (as opposed to the
`while_editing` method, which may only be called when moving between
widgets). Override this function to add custom function.
It is called for every keypress, and perhaps more, be careful when
selecting what should be done here*.
* What consequences might there be? UI lag? Crashing?
"""
pass
def while_editing(self, *args, **kwargs):
"""
This function gets called once during each iteration of the `edit` loop.
Override this function to add custom loop activity. A proxy to the
currently selected widget is passed to the function.
"""
pass
def on_screen(self):
# is the widget in editw on sreen at the moment?
# if not, alter screen so that it is.
w = weakref.proxy(self._widgets__[self.editw])
max_y, max_x = self._max_physical()
w_my, w_mx = w.calculate_area_needed()
# always try to show the top of the screen.
self.show_from_y = 0
self.show_from_x = 0
while w.rely + w_my - 1 > self.show_from_y + max_y:
self.show_from_y += 1
while w.rely < self.show_from_y:
self.show_from_y -= 1
while w.relx + w_mx - 1 > self.show_from_x + max_x:
self.show_from_x += 1
while w.relx < self.show_from_x:
self.show_from_x -= 1
def h_display_help(self, input):
if self.help == None:
return
if self.name:
help_name = "%s Help" %(self.name)
else:
help_name = None
curses.flushinp()
util_viewhelp.view_help(self.help, title=help_name)
#select.ViewText(self.help, name=help_name)
self.display()
return True
def DISPLAY(self):
self.curses_pad.redrawwin()
self.erase()
self.display()
self.display(clear=False)
if self.editing and self.editw is not None:
self._widgets__[self.editw].display()
def h_display(self, input):
self._resize()
self.DISPLAY()
def safe_get_mouse_event(self):
try:
mouse_event = curses.getmouse()
return mouse_event
except _curses.error:
return None
def get_and_use_mouse_event(self):
mouse_event = self.safe_get_mouse_event()
if mouse_event:
self.use_mouse_event(mouse_event)
def use_mouse_event(self, mouse_event):
wg = self.find_mouse_handler(mouse_event)
if wg:
self.set_editing(wg)
if hasattr(wg, 'handle_mouse_event'):
wg.handle_mouse_event(mouse_event)
else:
curses.beep()
def find_mouse_handler(self, mouse_event):
#mouse_id, x, y, z, bstate = mouse_event
for wd in self._widgets__:
try:
if wd.intersted_in_mouse_event(mouse_event) == True:
return wd
except AttributeError:
pass
return None
def set_editing(self, wdg):
try:
self.editw = self._widgets__.index(wdg)
except ValueError:
pass
def find_next_editable(self, *args):
if not self.editw == len(self._widgets__):
if not self.cycle_widgets:
r = list(range(self.editw + 1, len(self._widgets__)))
else:
r = list(range(self.editw + 1, len(self._widgets__))) + list(range(0, self.editw))
for n in r:
if self._widgets__[n].editable and not self._widgets__[n].hidden:
self.editw = n
break
self.display()
def find_previous_editable(self, *args):
if not self.editw == 0:
# remember that xrange does not return the 'last' value,
# so go to -1, not 0! (fence post error in reverse)
for n in range(self.editw - 1, -1, -1):
if self._widgets__[n].editable and not self._widgets__[n].hidden:
self.editw = n
break
#def widget_useable_space(self, rely=0, relx=0):
# #Slightly misreports space available.
# mxy, mxx = self.lines-1, self.columns-1
# return (mxy-1-rely, mxx-1-relx)
def center_on_display(self):
my, mx = self._max_physical()
if self.lines < my:
self.show_aty = (my - self.lines) // 2
else:
self.show_aty = 0
if self.columns < mx:
self.show_atx = (mx - self.columns) // 2
else:
self.show_atx = 0
def display(self, clear=False):
#APPLICATION_THEME_MANAGER.set_theme(self)
if curses.has_colors() and not global_options.DISABLE_ALL_COLORS:
self.curses_pad.attrset(0)
color_attribute = self.theme_manager.findPair(self, self.color)
self.curses_pad.bkgdset(' ', color_attribute)
self.curses_pad.attron(color_attribute)
self.curses_pad.erase()
self.draw_form()
for w in [wg for wg in self._widgets__ if wg.hidden]:
w.clear()
for w in [wg for wg in self._widgets__ if not wg.hidden]:
w.update(clear=clear)
self.refresh()
def draw_title_and_help(self):
try:
if self.name:
_title = self.name[:(self.columns - 4)]
_title = ' ' + str(_title) + ' '
#self.curses_pad.addstr(0,1, ' '+str(_title)+' ')
if isinstance(_title, bytes):
_title = _title.decode('utf-8', 'replace')
self.add_line(0, 1, _title,
self.make_attributes_list(_title,
curses.A_NORMAL),
self.columns - 4)
except:
pass
if self.help and self.editing:
try:
help_advert = " Help: F1 or ^O "
if isinstance(help_advert, bytes):
help_advert = help_advert.decode('utf-8', 'replace')
self.add_line(
0, self.curses_pad.getmaxyx()[1]-len(help_advert)-2,
help_advert,
self.make_attributes_list(help_advert, curses.A_NORMAL),
len(help_advert)
)
except:
pass
def draw_form(self):
if self.framed:
if curses.has_colors() and not global_options.DISABLE_ALL_COLORS:
self.curses_pad.attrset(0)
self.curses_pad.bkgdset(' ', curses.A_NORMAL | self.theme_manager.findPair(self, self.color))
self.curses_pad.border()
self.draw_title_and_help()
def add_widget(self,
widget_class,
w_id=None,
max_height=None,
rely=None,
relx=None,
*args, **kwargs):
"""
Add a widget to the form. The form will do its best to decide on
placing, unless you override it.
The form of this function is add_widget(widget_class, ....) with any
arguments or keywords supplied to the widget. The wigdet will be added
to self._widgets__
It is safe to use the return value of this function to keep hold of the
widget, since that is a weak reference proxy, but it is not safe to keep
hold of self._widgets__
"""
if rely is None:
rely = self.nextrely
if relx is None:
relx = self.nextrelx
if max_height is False:
max_height = self.curses_pad.getmaxyx()[0] - rely - 1
_w = widget_class(self,
rely=rely,
relx=relx,
max_height=max_height,
*args, **kwargs)
self.nextrely = _w.height + _w.rely
self._widgets__.append(_w)
w_proxy = weakref.proxy(_w)
if not w_id:
w_id = self._next_w_id
self._next_w_id += 1
self._widgets_by_id[w_id] = w_proxy
return w_proxy
def get_widget(self, w_id):
return self._widgets_by_id[w_id]
add = add_widget
class FormBaseNew(FormNewEditLoop, _FormBase):
# use the new-style edit loop.
pass
class Form(FormDefaultEditLoop, _FormBase):
#use the old-style edit loop
pass
def resize(self):
super(Form, self).resize()
self.move_ok_button()
class FormBaseNewExpanded(FormNewEditLoop, _FormBase):
BLANK_LINES_BASE = 1
OK_BUTTON_BR_OFFSET = (1, 6)
# use the new-style edit loop.
pass
class FormExpanded(FormDefaultEditLoop, _FormBase):
BLANK_LINES_BASE = 1
OK_BUTTON_BR_OFFSET = (1, 6)
#use the old-style edit loop
pass
class TitleForm(Form):
"""A form without a box, just a title line"""
BLANK_LINES_BASE = 1
DEFAULT_X_OFFSET = 1
DEFAULT_NEXTRELY = 1
BLANK_COLUMNS_RIGHT = 0
OK_BUTTON_BR_OFFSET = (1, 6)
#OKBUTTON_TYPE = button.MiniButton
#DEFAULT_X_OFFSET = 1
def draw_form(self):
MAXY, MAXX = self.curses_pad.getmaxyx()
self.curses_pad.hline(0, 0, curses.ACS_HLINE, MAXX)
self.draw_title_and_help()
class TitleFooterForm(TitleForm):
BLANK_LINES_BASE = 1
def draw_form(self):
MAXY, MAXX = self.curses_pad.getmaxyx()
if self.editing:
self.curses_pad.hline(MAXY - 1, 0, curses.ACS_HLINE,
MAXX - self.__class__.OK_BUTTON_BR_OFFSET[1] - 1)
else:
self.curses_pad.hline(MAXY - 1, 0, curses.ACS_HLINE, MAXX - 1)
super(TitleFooterForm, self).draw_form()
class SplitForm(Form):
MOVE_LINE_ON_RESIZE = False
"""Just the same as the Title Form, but with a horizontal line"""
def __init__(self, draw_line_at=None, *args, **kwargs):
super(SplitForm, self).__init__(*args, **kwargs)
if not hasattr(self, 'draw_line_at'):
#if draw_line_at != None:
if draw_line_at is not None:
self.draw_line_at = draw_line_at
else:
self.draw_line_at = self.get_half_way()
def draw_form(self,):
MAXY, MAXX = self.curses_pad.getmaxyx()
super(SplitForm, self).draw_form()
self.curses_pad.hline(self.draw_line_at, 1, curses.ACS_HLINE, MAXX - 2)
def get_half_way(self):
return self.curses_pad.getmaxyx()[0] // 2
def resize(self):
super(SplitForm, self).resize()
if self.MOVE_LINE_ON_RESIZE:
self.draw_line_at = self.get_half_way()
|
|
#!/usr/bin/env python
#
# Sybil - Python Profile Manager
# Copyright (c) 2008, Patrick Kennedy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import with_statement
import datetime
import logging
import os
import random
import sys
import urllib
import wsgiref.handlers
import sys
from google.appengine.api import users, memcache
from google.appengine.ext import gql, db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from common import counter, stores, utils, framework
from common.stores import UserData
from common.stores import Profile, World, Comment, WorldMember
class Index(framework.BaseRequestHandler):
def get(self):
len_ = self.request.get_range('length', min_value=3,
max_value=10, default=6)
names = self.get_name_list(5, len_)
get = self.request.get
refresh_cache = get('refresh_cache', False) is not False
if get('logit', False) is not False:
from google.appengine.api.labs import taskqueue
taskqueue.add(url='/tasks/logging/')
context = self.context
context['len_'] = len_
context['names'] = names
sterile_url = framework.sterilize_url(self.request.url)
@framework.memoize(sterile_url, 'profile_listing', refresh=refresh_cache)
def __fetch_profile_data():
query = stores.Profile.all()
query.filter('public =', True)
query.order('-updated')
return query.fetch(5)
@framework.memoize(sterile_url, 'world_listing', refresh=refresh_cache)
def __fetch_world_data():
query = World.all()
query.filter('public =', True)
query.order('-created')
return query.fetch(5)
context['profile_data'] = {
'profiles': __fetch_profile_data(),
'list_author': True
}
context['world_data'] = {
'worlds': __fetch_world_data(),
'list_author': True,
}
profile_count = counter.Counter('TotalProfiles').get_count(refresh_cache)
world_count = counter.Counter('TotalWorlds').get_count(refresh_cache)
user_count = counter.Counter('TotalUsers').get_count(refresh_cache)
context['profile_count'] = profile_count
context['world_count'] = world_count
context['user_count'] = user_count
self.render(['index', 'index'])
def post(self):
self.redirect('/' + self.args_to_url())
class PrintEnviron(webapp.RequestHandler):
def get(self):
for name in os.environ.keys():
self.response.out.write("<b>%s</b> = %s<br />\n" % (name, os.environ[name]))
class ChangeLog(framework.BaseRequestHandler):
def get(self):
self.render(['index', 'changelog'])
class EditProfile(framework.BaseRequestHandler):
def get(self, username, name):
adata = UserData.load_from_nickname(username)
author = adata.user
page_admin = self.page_admin(author)
name = urllib.unquote_plus(urllib.unquote(name))
unix_name = utils.unix_string(name)
context = self.context
context['author'] = author
context['page_author'] = adata
context['page_admin'] = page_admin
# Check to see if we already have a profile for that character.
# profile = Profile.gql('WHERE unix_name = :name AND author = :author',
# name=unix_name, author=adata).get()
profile = Profile.get_by_key_name(
stores.Profile.key_name_form % (unix_name, adata.key_name)
)
# If we can't find that character and we're the author we may want to
# make it, other wise we should move the user back to the user page.
if not profile or (not profile.public and not page_admin):
self.flash.msg = "Unknown Profile: %s" % name
if author == self.user:
self.redirect('/create/profile/?name=%s' % name)
else:
self.redirect(adata.url)
return
context['profile'] = profile
self.render(['edit', 'editprofile'])
def post(self, username, name):
adata = UserData.load_from_nickname(username)
author = adata.user
get = self.request.get
public = get('public', 'True') == 'True'
markup = get('markup', 'Textile')
key_name = get('key_name')
profile = stores.Profile.get_by_key_name(key_name)
if not profile:
self.redirect('/create/profile/%s' % self.args_to_url())
return
if get("submit_action", "Cancel") == "Cancel":
self.flash.msg = '%s: Not Updated' % profile.name
self.redirect(profile.url)
return
# Only let admins and the author edit a profile
if not self.page_admin(author):
self.flash.msg = "Access Denied."
self.redirect(profile.url)
return
changed = []
def txn():
new_args = self.args_to_dict()
new_args.update({
'public':public,'markup':markup,
'updated':datetime.datetime.now()
})
if 'name' in new_args:
del new_args['name'] # Make it impossible to change the name.
for arg in profile.properties():
if arg not in new_args:
continue
new_arg = new_args.get(arg)
if new_arg == getattr(profile, arg):
continue
changed.append(arg)
setattr(profile, arg, new_arg)
profile.word_count = utils.word_count(
profile.apperence, profile.background, profile.extra_info
)
profile.put()
db.run_in_transaction(txn)
logging.info("User (%s) has made changes (%s) to a Profile (%s)" %
(self.user.email(), ' | '.join(changed), profile.name))
# Clear the profile from the memcache.
#Profile.unload(adata.key_name, profile.unix_name)
# Update the latest profiles list on the front page.
framework.unmemoize('/', 'profile_listing')
self.flash.msg = "%s: Updated" % profile.name
self.redirect(profile.url)
class DeleteProfile(framework.BaseRequestHandler):
def get(self, username, name):
adata = UserData.load_from_nickname(username)
author = adata.user
page_admin = self.page_admin(author)
name = urllib.unquote_plus(urllib.unquote(name))
unix_name = utils.unix_string(name)
context = self.context
context['author'] = author
context['page_author'] = adata
context['page_admin'] = page_admin
# Check to see if we already have a profile for that character.
profile = Profile.get_by_key_name(
stores.Profile.key_name_form % (unix_name, adata.key_name)
)
# If we can't find that character and we're the author we may want to
# make it, other wise we should move the user back to the user page.
if not profile or (not profile.public and not page_admin):
self.flash.msg = "Unknown Profile: %s" % name
self.redirect(Profile.get_url(username))
return
context['profile'] = profile
self.render(['delete', 'deleteprofile'], context)
def post(self, username, name):
adata = UserData.load_from_nickname(username)
author = adata.user
choice = self.request.get('choice')
name_check = self.request.get('name_check')
profile_key = self.request.get('profile_key', '')
profile = Profile.get(profile_key)
if not profile:
self.flash.msg = "Unknown Profile"
self.redirect(adata.url)
return
# Only let admins and the author delete a profile
if not self.page_admin(self.user):
self.flash.msg = "Access Denied."
self.redirect(profile.url)
return
if name_check != profile.name or choice != 'Confirm':
self.flash.msg = "%s: Preserved" % profile.name
self.redirect(profile.url)
return
query = Comment.all()
query.filter('host =', profile)
for comment in query:
comment.delete()
for conn in profile.worldconnection_set:
conn.delete()
# Clear the profile from the memcache.
#Profile.unload(adata.key_name, profile.unix_name)
profile.delete()
c = counter.Counter('TotalProfiles')
c.increment(-1)
c = counter.Counter('%sProfiles' % profile.author.key_name, 1)
c.increment(-1)
logging.info("%s(%s) deleted %s's Profile (%s)." % (
self.user.email(), self.udata.nickname,
profile.author.user.email(), profile.name
))
framework.unmemoize('/manage/', 'profile_listing', adata.nickname)
framework.unmemoize('/', 'profile_listing')
framework.unmemoize('/discover/', 'profile_listing')
framework.unmemoize('/discover/', 'profile_feed')
framework.unmemoize(profile.author.url, 'profile_listing')
framework.unmemoize(profile.author.url, 'profile_feed')
self.flash.msg = "%s Deleted Sucessfully" % profile.name
self.redirect(profile.author.url)
class ViewProfile(framework.BaseRequestHandler):
def get(self, username, name):
adata = UserData.load_from_nickname(username)
author = adata.user
get = self.request.get
if name is None:
name = get('name', '')
output = get('output', '')
# If we're loading the user's public page and not a profile
if not name:
if output in ["rss", "atom"]:
self.render_feed(user, author, adata, output)
else:
self.render_user(user, author, adata)
return
name = urllib.unquote_plus(urllib.unquote(name))
unix_name = utils.unix_string(name)
page_admin = self.page_admin(author)
action = get('action')
refresh_cache = get('refresh_cache', False) is not False
sterile_url = framework.sterilize_url(self.url)
context = self.context
context['author'] = author
context['page_author'] = adata
context['page_admin'] = page_admin
# Check to see if we already have a profile for that character.
# profile = Profile.gql('WHERE unix_name = :name AND author = :author',
# name=unix_name, author=adata).get()
profile = stores.Profile.get_by_key_name(
stores.Profile.key_name_form % (unix_name, adata.key_name)
)
# If we can't find that character and we're the author we may want to
# make it, other wise we should move the user back to the user page.
if not profile or (not profile.public and not page_admin):
self.flash.msg = "Unknown Profile: %s" % name
if author == self.user:
self.redirect('/create/profile/name=%s' % name)
else:
self.redirect(adata.url)
return
# Check for actions
if action:
if action == 'edit':
self.render(['edit', 'editprofile'], locals())
elif action == 'delete':
self.render(['delete', 'deleteprofile'], locals())
return
@framework.memoize(sterile_url, 'world_listing', refresh=refresh_cache)
def __fetch_world_data():
# This bit of hackery is used to fetch the actual world objects
# as opposed to the connection, which don't fetch their references
# when called inside the html.
return [conn.world for conn in profile.worldconnection_set.fetch(5)]
def __build_comment_data():
page = self.request.get_range('comments_page', min_value=1, default=1)
items_per_page = self.request.get_range(
'comments_items', min_value=1, max_value=25, default=6
)
offset = ((page - 1) * items_per_page)
last_page = True
key = profile.key()
q = Comment.all()
q.filter('host =', key)
q.order('-created')
comments = q.fetch((items_per_page + 1), offset)
if len(comments) > items_per_page:
last_page = False
comments.pop()
@framework.memoize(sterile_url, 'comment_listing', refresh=refresh_cache)
def fetch():
return comments
return {'comments': fetch(), 'host': key, 'host_type': 'profile',
'page': page, 'last_page': last_page}
context['world_data'] = {
'worlds': __fetch_world_data(),
'list_author': True,
}
context['comment_data'] = __build_comment_data()
if refresh_cache:
memcache.delete('markup:%s' % profile.key_name)
self.render(['view', 'viewProfile'], locals())
class ViewUser(framework.BaseRequestHandler):
def get(self, username):
get = self.request.get
name = get('name')
if name:
# Redriect people to the updated url format.
self.redirect(Profile.get_url(username, urllib.quote_plus(name)),
permanent=True)
return
adata = UserData.load_from_nickname(username)
if not adata:
self.redirect('/404/')
return
author = adata.user
page_admin = self.page_admin(author)
get = self.request.get
action = get('action')
refresh_cache = get('refresh_cache', False) is not False
sterile_url = framework.sterilize_url(self.request.url)
context = self.context
context['author'] = author
context['adata'] = adata
context['page_admin'] = page_admin
def __build_profile_data():
order = get('order', 'created').lower()
page = self.request.get_range('profiles_page', min_value=1, default=1)
# Allow page numbers to be more natural
items_per_page = self.request.get_range('profiles_items', min_value=1,
max_value=25, default=10)
offset = ((page - 1) * items_per_page)
last_page = True
# Orders the profiles most recently created first.
query = Profile.all()
query.filter('author =', adata)
if not page_admin:
query.filter('public =', True)
if order == 'alpha':
query.order('name')
else:
query.order('-created')
profiles = query.fetch((items_per_page + 1), offset)
# Since each page is 6 items long if there are 7 items then we
# know that there is at least one more page.
if len(profiles) > items_per_page:
last_page = False
profiles.pop()
@framework.memoize(sterile_url, 'profile_listing', refresh=refresh_cache)
def fetch():
return profiles
return {'profiles':fetch(), 'page':page, 'last_page':last_page}
@framework.memoize(sterile_url, 'world_listing', refresh=refresh_cache)
def __fetch_world_memberships():
query = WorldMember.all()
query.filter('user =', adata)
return [membership.world for membership in query.fetch(10)]
context['profile_data'] = __build_profile_data()
context['profile_data']['partial_listing'] = True
context['profile_data']['list_edit'] = True
context['profile_data']['list_pages'] = True
context['world_data'] = {
'worlds': __fetch_world_memberships(),
'list_author': True,
}
c = counter.Counter('%sProfiles' % adata.key_name, 1)
context['profile_count'] = c.get_count(refresh_cache)
c = counter.Counter('%sWorlds' % adata.key_name, 1)
context['world_count'] = c.get_count(refresh_cache)
c = counter.Counter('%sTotalWords' % adata.key_name, 1)
context['total_word_count'] = c.get_count(refresh_cache)
self.render(['view', 'viewUser'])
return
class UserFeed(framework.BaseRequestHandler):
def get(self, username, output):
adata = UserData.load_from_nickname(username)
author = adata.user
page_admin = self.page_admin(author)
get = self.request.get
refresh_cache = get('refresh_cache', False) is not False
sterile_url = framework.sterilize_url(self.request.url)
self.context['author'] = author
self.context['adata'] = adata
self.context['page_admin'] = page_admin
@framework.memoize(sterile_url, 'profile_feed', refresh=refresh_cache)
def __fetch_feed_data():
# Orders the profiles most recently created first.
q = adata.profile_set
q.order('-created')
return q.fetch(12)
profile_data = {'profiles': __fetch_feed_data()}
self.render(['feed', 'userprofiles'], output=output)
return
# Map URLs to our RequestHandler classes above
_URLS = [
('^/', Index),
('^/changelog/', ChangeLog),
('^/env/', PrintEnviron),
('^/([^/]+)/(rss|atom|feed)/?', UserFeed),
('^/([^/]+)/([^/]+)/edit/?', EditProfile),
('^/([^/]+)/([^/]+)/delete/?', DeleteProfile),
('^/([^/]+)/([^/]+)/?', ViewProfile),
('^/([^/]+)/?', ViewUser),
]
def main():
if not random.randint(0, 25):
framework.profile_main(_URLS)
else:
framework.real_main(_URLS)
if __name__ == '__main__':
main()
|
|
from typing import TYPE_CHECKING
from eth_utils import to_bytes, to_canonical_address, to_checksum_address, to_hex
from raiden.constants import UINT256_MAX
from raiden.transfer.architecture import (
ContractSendEvent,
ContractSendExpirableEvent,
Event,
SendMessageEvent,
)
from raiden.transfer.identifiers import CanonicalIdentifier
from raiden.utils import pex, serialization, sha3
from raiden.utils.serialization import deserialize_bytes, serialize_bytes
from raiden.utils.typing import (
Address,
Any,
BlockExpiration,
BlockHash,
ChannelID,
Dict,
InitiatorAddress,
MessageID,
Optional,
PaymentAmount,
PaymentID,
PaymentNetworkID,
Secret,
SecretHash,
T_Secret,
TargetAddress,
TokenAmount,
TokenNetworkAddress,
TokenNetworkID,
)
if TYPE_CHECKING:
# pylint: disable=unused-import
from raiden.transfer.state import BalanceProofSignedState
# pylint: disable=too-many-arguments,too-few-public-methods
class ContractSendChannelClose(ContractSendEvent):
""" Event emitted to close the netting channel.
This event is used when a node needs to prepare the channel to unlock
on-chain.
"""
def __init__(
self,
canonical_identifier: CanonicalIdentifier,
balance_proof: Optional["BalanceProofSignedState"],
triggered_by_block_hash: BlockHash,
) -> None:
super().__init__(triggered_by_block_hash)
self.canonical_identifier = canonical_identifier
self.balance_proof = balance_proof
def __repr__(self) -> str:
return (
"<ContractSendChannelClose channel:{} token:{} token_network:{} "
"balance_proof:{} triggered_by_block_hash:{}>"
).format(
self.canonical_identifier.channel_identifier,
pex(self.canonical_identifier.token_network_address),
self.balance_proof,
pex(self.triggered_by_block_hash),
)
def __eq__(self, other: Any) -> bool:
return (
super().__eq__(other)
and isinstance(other, ContractSendChannelClose)
and self.canonical_identifier == other.canonical_identifier
and self.balance_proof == other.balance_proof
)
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
@property
def token_network_identifier(self) -> TokenNetworkID:
return TokenNetworkID(self.canonical_identifier.token_network_address)
@property
def channel_identifier(self) -> ChannelID:
return self.canonical_identifier.channel_identifier
def to_dict(self) -> Dict[str, Any]:
result = {
"canonical_identifier": self.canonical_identifier.to_dict(),
"balance_proof": self.balance_proof,
"triggered_by_block_hash": serialize_bytes(self.triggered_by_block_hash),
}
return result
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "ContractSendChannelClose":
restored = cls(
canonical_identifier=CanonicalIdentifier.from_dict(data["canonical_identifier"]),
balance_proof=data["balance_proof"],
triggered_by_block_hash=BlockHash(deserialize_bytes(data["triggered_by_block_hash"])),
)
return restored
class ContractSendChannelSettle(ContractSendEvent):
""" Event emitted if the netting channel must be settled. """
def __init__(
self, canonical_identifier: CanonicalIdentifier, triggered_by_block_hash: BlockHash
):
super().__init__(triggered_by_block_hash)
canonical_identifier.validate()
self.canonical_identifier = canonical_identifier
@property
def token_network_identifier(self) -> TokenNetworkAddress:
return TokenNetworkAddress(self.canonical_identifier.token_network_address)
@property
def channel_identifier(self) -> ChannelID:
return self.canonical_identifier.channel_identifier
def __repr__(self) -> str:
return (
"<ContractSendChannelSettle channel:{} token_network:{} "
"triggered_by_block_hash:{}>".format(
self.channel_identifier,
pex(self.token_network_identifier),
pex(self.triggered_by_block_hash),
)
)
def __eq__(self, other: Any) -> bool:
return (
super().__eq__(other)
and isinstance(other, ContractSendChannelSettle)
and self.canonical_identifier == other.canonical_identifier
)
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def to_dict(self) -> Dict[str, Any]:
result = {
"canonical_identifier": self.canonical_identifier.to_dict(),
"triggered_by_block_hash": serialize_bytes(self.triggered_by_block_hash),
}
return result
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "ContractSendChannelSettle":
restored = cls(
canonical_identifier=CanonicalIdentifier.from_dict(data["canonical_identifier"]),
triggered_by_block_hash=BlockHash(deserialize_bytes(data["triggered_by_block_hash"])),
)
return restored
class ContractSendChannelUpdateTransfer(ContractSendExpirableEvent):
""" Event emitted if the netting channel balance proof must be updated. """
def __init__(
self,
expiration: BlockExpiration,
balance_proof: "BalanceProofSignedState",
triggered_by_block_hash: BlockHash,
) -> None:
super().__init__(triggered_by_block_hash, expiration)
self.balance_proof = balance_proof
@property
def token_network_identifier(self) -> TokenNetworkAddress:
return TokenNetworkAddress(self.balance_proof.canonical_identifier.token_network_address)
@property
def channel_identifier(self) -> ChannelID:
return self.balance_proof.channel_identifier
def __repr__(self) -> str:
return (
"<ContractSendChannelUpdateTransfer channel:{} token_network:{} "
"balance_proof:{} triggered_by_block_hash:{}>"
).format(
self.channel_identifier,
pex(self.token_network_identifier),
self.balance_proof,
pex(self.triggered_by_block_hash),
)
def __eq__(self, other: Any) -> bool:
return (
isinstance(other, ContractSendChannelUpdateTransfer)
and self.balance_proof == other.balance_proof
and super().__eq__(other)
)
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def to_dict(self) -> Dict[str, Any]:
result = {
"expiration": str(self.expiration),
"balance_proof": self.balance_proof,
"triggered_by_block_hash": serialize_bytes(self.triggered_by_block_hash),
}
return result
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "ContractSendChannelUpdateTransfer":
restored = cls(
expiration=BlockExpiration(int(data["expiration"])),
balance_proof=data["balance_proof"],
triggered_by_block_hash=BlockHash(deserialize_bytes(data["triggered_by_block_hash"])),
)
return restored
class ContractSendChannelBatchUnlock(ContractSendEvent):
""" Event emitted when the lock must be claimed on-chain. """
def __init__(
self,
canonical_identifier: CanonicalIdentifier,
participant: Address,
triggered_by_block_hash: BlockHash,
) -> None:
super().__init__(triggered_by_block_hash)
self.canonical_identifier = canonical_identifier
self.participant = participant
@property
def token_network_identifier(self) -> TokenNetworkAddress:
return TokenNetworkAddress(self.canonical_identifier.token_network_address)
@property
def channel_identifier(self) -> ChannelID:
return self.canonical_identifier.channel_identifier
def __repr__(self) -> str:
return (
"<ContractSendChannelBatchUnlock token_network_id:{} "
"channel:{} participant:{} triggered_by_block_hash:{}"
">"
).format(
pex(self.token_network_identifier),
self.channel_identifier,
pex(self.participant),
pex(self.triggered_by_block_hash),
)
def __eq__(self, other: Any) -> bool:
return (
super().__eq__(other)
and isinstance(other, ContractSendChannelBatchUnlock)
and self.canonical_identifier == other.canonical_identifier
and self.participant == other.participant
)
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def to_dict(self) -> Dict[str, Any]:
result = {
"canonical_identifier": self.canonical_identifier.to_dict(),
"participant": to_checksum_address(self.participant),
"triggered_by_block_hash": serialize_bytes(self.triggered_by_block_hash),
}
return result
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "ContractSendChannelBatchUnlock":
restored = cls(
canonical_identifier=CanonicalIdentifier.from_dict(data["canonical_identifier"]),
participant=to_canonical_address(data["participant"]),
triggered_by_block_hash=BlockHash(deserialize_bytes(data["triggered_by_block_hash"])),
)
return restored
class ContractSendSecretReveal(ContractSendExpirableEvent):
""" Event emitted when the lock must be claimed on-chain. """
def __init__(
self, expiration: BlockExpiration, secret: Secret, triggered_by_block_hash: BlockHash
) -> None:
if not isinstance(secret, T_Secret):
raise ValueError("secret must be a Secret instance")
super().__init__(triggered_by_block_hash, expiration)
self.secret = secret
def __repr__(self) -> str:
secrethash: SecretHash = SecretHash(sha3(self.secret))
return ("<ContractSendSecretReveal secrethash:{} triggered_by_block_hash:{}>").format(
secrethash, pex(self.triggered_by_block_hash)
)
def __eq__(self, other: Any) -> bool:
return (
isinstance(other, ContractSendSecretReveal)
and self.secret == other.secret
and super().__eq__(other)
)
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def to_dict(self) -> Dict[str, Any]:
result = {
"expiration": str(self.expiration),
"secret": serialization.serialize_bytes(self.secret),
"triggered_by_block_hash": serialize_bytes(self.triggered_by_block_hash),
}
return result
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "ContractSendSecretReveal":
restored = cls(
expiration=BlockExpiration(int(data["expiration"])),
secret=Secret(serialization.deserialize_bytes(data["secret"])),
triggered_by_block_hash=BlockHash(deserialize_bytes(data["triggered_by_block_hash"])),
)
return restored
class EventPaymentSentSuccess(Event):
""" Event emitted by the initiator when a transfer is considered successful.
A transfer is considered successful when the initiator's payee hop sends the
reveal secret message, assuming that each hop in the mediator chain has
also learned the secret and unlocked its token off-chain or on-chain.
This definition of successful is used to avoid the following corner case:
- The reveal secret message is sent, since the network is unreliable and we
assume byzantine behavior the message is considered delivered without an
acknowledgement.
- The transfer is considered successful because of the above.
- The reveal secret message was not delivered because of actual network
problems.
- The lock expires and an EventUnlockFailed follows, contradicting the
EventPaymentSentSuccess.
Note:
Mediators cannot use this event, since an off-chain unlock may be locally
successful but there is no knowledge about the global transfer.
"""
def __init__(
self,
payment_network_identifier: PaymentNetworkID,
token_network_identifier: TokenNetworkID,
identifier: PaymentID,
amount: PaymentAmount,
target: TargetAddress,
secret: Secret = None,
) -> None:
self.payment_network_identifier = payment_network_identifier
self.token_network_identifier = token_network_identifier
self.identifier = identifier
self.amount = amount
self.target = target
self.secret = secret
def __repr__(self) -> str:
return (
"<"
"EventPaymentSentSuccess payment_network_identifier:{} "
"token_network_identifier:{} "
"identifier:{} amount:{} "
"target:{} secret:{} "
">"
).format(
pex(self.payment_network_identifier),
pex(self.token_network_identifier),
self.identifier,
self.amount,
pex(self.target),
to_hex(self.secret),
)
def __eq__(self, other: Any) -> bool:
return (
isinstance(other, EventPaymentSentSuccess)
and self.identifier == other.identifier
and self.amount == other.amount
and self.target == other.target
and self.payment_network_identifier == other.payment_network_identifier
and self.token_network_identifier == other.token_network_identifier
and self.secret == other.secret
)
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def to_dict(self) -> Dict[str, Any]:
result = {
"payment_network_identifier": to_checksum_address(self.payment_network_identifier),
"token_network_identifier": to_checksum_address(self.token_network_identifier),
"identifier": str(self.identifier),
"amount": str(self.amount),
"target": to_checksum_address(self.target),
}
if self.secret is not None:
result["secret"] = to_hex(self.secret)
return result
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "EventPaymentSentSuccess":
if "secret" in data:
secret = to_bytes(hexstr=data["secret"])
else:
secret = None
restored = cls(
payment_network_identifier=to_canonical_address(data["payment_network_identifier"]),
token_network_identifier=to_canonical_address(data["token_network_identifier"]),
identifier=PaymentID(int(data["identifier"])),
amount=PaymentAmount(int(data["amount"])),
target=to_canonical_address(data["target"]),
secret=secret,
)
return restored
class EventPaymentSentFailed(Event):
""" Event emitted by the payer when a transfer has failed.
Note:
Mediators cannot use this event since they don't know when a transfer
has failed, they may infer about lock successes and failures.
"""
def __init__(
self,
payment_network_identifier: PaymentNetworkID,
token_network_identifier: TokenNetworkID,
identifier: PaymentID,
target: TargetAddress,
reason: str,
) -> None:
self.payment_network_identifier = payment_network_identifier
self.token_network_identifier = token_network_identifier
self.identifier = identifier
self.target = target
self.reason = reason
def __repr__(self) -> str:
return (
"<"
"EventPaymentSentFailed payment_network_identifier:{} "
"token_network_identifier:{} "
"id:{} target:{} reason:{} "
">"
).format(
pex(self.payment_network_identifier),
pex(self.token_network_identifier),
self.identifier,
pex(self.target),
self.reason,
)
def __eq__(self, other: Any) -> bool:
return (
isinstance(other, EventPaymentSentFailed)
and self.payment_network_identifier == other.payment_network_identifier
and self.token_network_identifier == other.token_network_identifier
and self.identifier == other.identifier
and self.target == other.target
and self.reason == other.reason
)
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def to_dict(self) -> Dict[str, Any]:
result = {
"payment_network_identifier": to_checksum_address(self.payment_network_identifier),
"token_network_identifier": to_checksum_address(self.token_network_identifier),
"identifier": str(self.identifier),
"target": to_checksum_address(self.target),
"reason": self.reason,
}
return result
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "EventPaymentSentFailed":
restored = cls(
payment_network_identifier=to_canonical_address(data["payment_network_identifier"]),
token_network_identifier=to_canonical_address(data["token_network_identifier"]),
identifier=PaymentID(int(data["identifier"])),
target=to_canonical_address(data["target"]),
reason=data["reason"],
)
return restored
class EventPaymentReceivedSuccess(Event):
""" Event emitted when a payee has received a payment.
Note:
A payee knows if a lock claim has failed, but this is not sufficient
information to deduce when a transfer has failed, because the initiator may
try again at a different time and/or with different routes, for this reason
there is no correspoding `EventTransferReceivedFailed`.
"""
def __init__(
self,
payment_network_identifier: PaymentNetworkID,
token_network_identifier: TokenNetworkID,
identifier: PaymentID,
amount: TokenAmount,
initiator: InitiatorAddress,
) -> None:
if amount < 0:
raise ValueError("transferred_amount cannot be negative")
if amount > UINT256_MAX:
raise ValueError("transferred_amount is too large")
self.identifier = identifier
self.amount = amount
self.initiator = initiator
self.payment_network_identifier = payment_network_identifier
self.token_network_identifier = token_network_identifier
def __repr__(self) -> str:
return (
"<"
"EventPaymentReceivedSuccess payment_network_identifier:{} "
"token_network_identifier:{} identifier:{} "
"amount:{} initiator:{} "
">"
).format(
pex(self.payment_network_identifier),
pex(self.token_network_identifier),
self.identifier,
self.amount,
pex(self.initiator),
)
def __eq__(self, other: Any) -> bool:
return (
isinstance(other, EventPaymentReceivedSuccess)
and self.identifier == other.identifier
and self.amount == other.amount
and self.initiator == other.initiator
and self.payment_network_identifier == other.payment_network_identifier
and self.token_network_identifier == other.token_network_identifier
)
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def to_dict(self) -> Dict[str, Any]:
result = {
"payment_network_identifier": to_checksum_address(self.payment_network_identifier),
"token_network_identifier": to_checksum_address(self.token_network_identifier),
"identifier": str(self.identifier),
"amount": str(self.amount),
"initiator": to_checksum_address(self.initiator),
}
return result
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "EventPaymentReceivedSuccess":
restored = cls(
payment_network_identifier=to_canonical_address(data["payment_network_identifier"]),
token_network_identifier=to_canonical_address(data["token_network_identifier"]),
identifier=PaymentID(int(data["identifier"])),
amount=TokenAmount(int(data["amount"])),
initiator=to_canonical_address(data["initiator"]),
)
return restored
class EventInvalidReceivedTransferRefund(Event):
""" Event emitted when an invalid refund transfer is received. """
def __init__(self, payment_identifier: PaymentID, reason: str) -> None:
self.payment_identifier = payment_identifier
self.reason = reason
def __repr__(self) -> str:
return (
f"<"
f"EventInvalidReceivedTransferRefund "
f"payment_identifier:{self.payment_identifier} "
f"reason:{self.reason}"
f">"
)
def __eq__(self, other: Any) -> bool:
return (
isinstance(other, EventInvalidReceivedTransferRefund)
and self.payment_identifier == other.payment_identifier
and self.reason == other.reason
)
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def to_dict(self) -> Dict[str, Any]:
result = {"payment_identifier": str(self.payment_identifier), "reason": self.reason}
return result
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "EventInvalidReceivedTransferRefund":
restored = cls(
payment_identifier=PaymentID(int(data["payment_identifier"])), reason=data["reason"]
)
return restored
class EventInvalidReceivedLockExpired(Event):
""" Event emitted when an invalid lock expired message is received. """
def __init__(self, secrethash: SecretHash, reason: str) -> None:
self.secrethash = secrethash
self.reason = reason
def __repr__(self) -> str:
return (
f"<"
f"EventInvalidReceivedLockExpired "
f"secrethash:{pex(self.secrethash)} "
f"reason:{self.reason}"
f">"
)
def __eq__(self, other: Any) -> bool:
return (
isinstance(other, EventInvalidReceivedLockExpired)
and self.secrethash == other.secrethash
and self.reason == other.reason
)
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def to_dict(self) -> Dict[str, Any]:
result = {
"secrethash": serialization.serialize_bytes(self.secrethash),
"reason": self.reason,
}
return result
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "EventInvalidReceivedLockExpired":
restored = cls(
secrethash=serialization.deserialize_secret_hash(data["secrethash"]),
reason=data["reason"],
)
return restored
class EventInvalidReceivedLockedTransfer(Event):
""" Event emitted when an invalid locked transfer is received. """
def __init__(self, payment_identifier: PaymentID, reason: str) -> None:
self.payment_identifier = payment_identifier
self.reason = reason
def __repr__(self) -> str:
return (
f"<"
f"EventInvalidReceivedLockedTransfer "
f"payment_identifier:{self.payment_identifier} "
f"reason:{self.reason}"
f">"
)
def __eq__(self, other: Any) -> bool:
return (
isinstance(other, EventInvalidReceivedLockedTransfer)
and self.payment_identifier == other.payment_identifier
and self.reason == other.reason
)
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def to_dict(self) -> Dict[str, Any]:
result = {"payment_identifier": str(self.payment_identifier), "reason": self.reason}
return result
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "EventInvalidReceivedLockedTransfer":
restored = cls(
payment_identifier=PaymentID(int(data["payment_identifier"])), reason=data["reason"]
)
return restored
class EventInvalidReceivedUnlock(Event):
""" Event emitted when an invalid unlock message is received. """
def __init__(self, secrethash: SecretHash, reason: str) -> None:
self.secrethash = secrethash
self.reason = reason
def __repr__(self) -> str:
return (
f"<"
f"EventInvalidReceivedUnlock "
f"secrethash:{pex(self.secrethash)} "
f"reason:{self.reason}"
f">"
)
def __eq__(self, other: Any) -> bool:
return (
isinstance(other, EventInvalidReceivedUnlock)
and self.secrethash == other.secrethash
and self.reason == other.reason
)
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def to_dict(self) -> Dict[str, Any]:
result = {
"secrethash": serialization.serialize_bytes(self.secrethash),
"reason": self.reason,
}
return result
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "EventInvalidReceivedUnlock":
restored = cls(
secrethash=serialization.deserialize_secret_hash(data["secrethash"]),
reason=data["reason"],
)
return restored
class SendProcessed(SendMessageEvent):
def __repr__(self) -> str:
return ("<SendProcessed confirmed_msgid:{} recipient:{}>").format(
self.message_identifier, pex(self.recipient)
)
def __eq__(self, other: Any) -> bool:
return isinstance(other, SendProcessed) and super().__eq__(other)
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def to_dict(self) -> Dict[str, Any]:
result = {
"recipient": to_checksum_address(self.recipient),
"channel_identifier": str(self.queue_identifier.channel_identifier),
"message_identifier": str(self.message_identifier),
}
return result
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "SendProcessed":
restored = cls(
recipient=to_canonical_address(data["recipient"]),
channel_identifier=ChannelID(int(data["channel_identifier"])),
message_identifier=MessageID(int(data["message_identifier"])),
)
return restored
|
|
#
# This file is part of GreatFET
#
from enum import IntEnum
from warnings import warn
from ..interface import GreatFETInterface
# TODOs:
# - XXX: Overhaul the GPIO(Collection) class to be more efficient
# - More cleanup to use the GPIOPin model.
# - Support ranges of pins from the same port (GPIOPort objects?)
# - Implement a release function so if e.g. an I2C device is no longer in use
# it releases its pins back to the GPIO pool.
class Directions(IntEnum):
IN = 0
OUT = 1
# Legacy convenience constants.
DIRECTION_IN = Directions.IN
DIRECTION_OUT = Directions.OUT
class GPIOProvider(GreatFETInterface):
""" Base class for an object that provides access to GPIO pins. """
# For convenience.
DIRECTION_IN = Directions.IN
DIRECTION_OUT = Directions.OUT
# If the subclass has a fixed set of pins, it can override this mapping to
# specify the fixed pin names to be automatically registered.
FIXED_GPIO_PINS = {}
# If the subclass doesn't want to allow external sources to register GPIO pins
ALLOW_EXTERNAL_REGISTRATION = True
def __init__(self, name_mappings=None):
""" Sets up the basic fields for a GPIOProvider.
Parameters:
name_mappings -- Allows callers to rename the local / fixed GPIO pin names.
Optional; accepts a dictionary mapping their fixed names to their new names, or
to None to remove the relevant pin from the list of available pins.
This allows instantiators to give a given GPIO collection more specific names, or
to hide them from general API display/usage.
"""
if name_mappings is None:
name_mappings = {}
# Set up our basic tracking parameters, which track which GPIO pins
# are available and in use.
self.pin_mappings = {}
self.active_gpio = {}
self.available_pins = []
# Add all of our fixed pins as acceptable GPIO.
for name, line in self.FIXED_GPIO_PINS.items():
# If we've been asked to rename the given pin, register it under
# the new name, rather than under the provided name,
if name in name_mappings:
name = name_mappings[name]
# If our name field winds up remapping to 'None', the instantiator
# is trying to hide the relevant pin. Skip registering it.
if name is None:
continue
# Register each fixed GPIO.
self.__register_gpio(name, line)
def register_gpio(self, name, line, used=False):
"""
Registers a GPIO pin for later use. Usually only used in board setup.
Args:
name -- The name for the GPIO, usually expressed as a position on
a GreatFET header.
line -- An abstract argument passed to subclass methods that serves
to identify the pin. Subclasses often use this to store e.g. port and pin
numbers.
"""
# If this class doesn't allow pin registration, raise an error.
if not self.ALLOW_EXTERNAL_REGISTRATION:
raise NotImplementedError("This GPIO collection does not allow registration of new pins.")
# Otherwise, delegate to our internal registration method.
self.__register_gpio(name, line, used)
def __register_gpio(self, name, line, used=False):
"""
Registers a GPIO pin for later use. Usually only used in board setup.
Args:
name -- The name for the GPIO, usually expressed as a position on
a GreatFET header.
line -- An abstract argument passed to subclass methods that serves
to identify the pin. Subclasses often use this to store e.g. port and pin
numbers.
"""
# Store the full name in our pin mappings.
self.pin_mappings[name] = line
if not used:
self.mark_pin_as_unused(name)
def mark_pin_as_used(self, name):
""" Marks a pin as used by another peripheral. """
if name not in self.pin_mappings:
raise ValueError("Unknown GPIO pin {}".format(name))
self.available_pins.remove(name)
def mark_pin_as_unused(self, name):
""" Mark a pin as no longer used by another peripheral. """
if name not in self.pin_mappings:
raise ValueError("Unknown GPIO pin {}".format(name))
if name not in self.available_pins:
self.available_pins.append(name)
def get_available_pins(self, include_active=True):
""" Returns a list of available GPIO names. """
available = self.available_pins[:]
available.extend(self.active_gpio.keys())
return available
def get_pin(self, name, unique=False):
"""
Returns a GPIOPin object by which a given pin can be controlled.
Args:
name -- The GPIO name to be used.
unique -- True if this should fail if a GPIO object for this pin
already exists.
"""
# If we already have an active GPIO pin for the relevant name, return it.
if name in self.active_gpio and not unique:
return self.active_gpio[name]
# If the pin's available for GPIO use, grab it.
if name in self.available_pins:
port = self.pin_mappings[name]
self.active_gpio[name] = GPIOPin(self, name, port)
self.mark_pin_as_used(name)
return self.active_gpio[name]
# If we couldn't create the GPIO pin, fail out.
raise ValueError("No available GPIO pin {}".format(name))
def get_port(self, *pin_names):
""" Creates a GPIOPort object that can set multiple pins to a binary value.
Arguments are a list of pin names to conglomerate into a port, MSB first. This may result in a GPIOPort
object, or in a derivative class such as a VirtualGPIOPort, depending on the pin locations.
"""
pins = []
# Convert each of the header pin names to a GPIOPin object.
for name in pin_names:
pins.append(self.get_pin(name))
# FIXME: apply an optimization for when each pin is on the same logical port:
return VirtualGPIOPort(pins)
def release_pin(self, gpio_pin):
"""
Releases a GPIO pin back to the system for re-use, potentially
not as a GPIO.
"""
if gpio_pin.name not in self.active_gpio:
raise ValueError("Trying to release a pin we don't own!")
# Mark the pin as an input, placing it into High-Z mode.
# TODO: Disable any pull-ups present on the pin.
gpio_pin.set_direction(DIRECTION_IN)
# Remove the GPIO pin from our active array, and add it back to the
# available pool.
del self.active_gpio[gpio_pin.name]
self.mark_pin_as_unused(gpio_pin.name)
def set_up_pin(self, line, direction, initial_value=False):
"""
Configure a GPIO line for use as an input or output. This must be
called before the line can be used by other functions.
Parameters:
line -- A unique identifier for the given pin that has meaning to the subclass.
direction -- Directions.IN (input) or Directions.OUT (output)
"""
pass
def set_pin_state(self, line, state):
"""
Set the state of an output line. The line must have previously been
configured as an output using setup().
Parameters:
line -- A unique identifier for the given pin that has meaning to the subclass.
state -- True sets line high, False sets line low
"""
pass
def read_pin_state(self, line):
"""
Get the state of an input line. The line must have previously been
configured as an input using setup().
Args:
line -- A unique identifier for the given pin that has meaning to the subclass.
Return:
bool -- True if line is high, False if line is low
"""
pass
def get_pin_direction(self, line):
"""
Gets the direction of a GPIO pin.
Args:
line -- A unique identifier for the given pin that has meaning to the subclass.
Return:
bool -- True if line is an output, False if line is an input
"""
pass
def get_pin_port(self, line):
""" Returns the 'port number' for a given GPIO pin.
For providers for which 'port' isn't a valid semantic concept, this should return
the same identifier for every pin that can be logically written in a single operation.
"""
pass
def get_pin_identifier(self, line):
""" Returns the 'pin number' for a given GPIO pin.
This number is typically the 'bit number' in a larger, organized port. For providers
in which this isn't a valid semantic concept, any convenient semantic identifier (or None)
is acceptable.
"""
pass
class GPIO(GPIOProvider):
""" Work with the GPIO directly present on the GreatFET board. """
def __init__(self, board):
"""
Args:
board -- GreatFET board whose GPIO lines are to be controlled
"""
# Set up our basic fields...
super(GPIO, self).__init__()
# ... and store information about the our low-level connection.
self.board = board
self.api = self.board.apis.gpio
# TODO: provide functionality to restore GPIO state on reconnect?
def set_up_pin(self, line, direction, initial_value=False):
"""
Configure a GPIO line for use as an input or output. This must be
called before the line can be used by other functions.
Args:
line -- (port, pin); typically a tuple from J1, J2, J7 below
direction -- Directions.IN (input) or Directions.OUT (output)
TODO: allow pull-up/pull-down resistors to be configured for inputs
"""
self.api.set_up_pin(line[0], line[1], direction, initial_value)
def set_pin_state(self, line, state):
"""
Set the state of an output line. The line must have previously been
configured as an output using setup().
Args:
line -- (port, pin); typically a tuple from J1, J2, J7 below
state -- True sets line high, False sets line low
"""
# TODO: validate GPIO direction?
single_write = (line[0], line[1], state,)
self.api.write_pins(single_write)
def read_pin_state(self, line):
"""
Get the state of an input line. The line must have previously been
configured as an input using setup().
Args:
line -- (port, pin); typically a tuple from J1, J2, J7 below
Return:
bool -- True if line is high, False if line is low
"""
values = self.api.read_pins(line)
return values[0]
def get_pin_direction(self, line):
"""
Gets the direction of a GPIO pin.
Args:
line -- (port, pin); typically a tuple from J1, J2, J7 below
Return:
bool -- True if line is an output, False if line is an input
"""
directions = self.api.get_pin_directions(line)
return directions[0]
def get_pin_port(self, line):
""" Returns the 'port number' for a given GPIO pin."""
return line[0]
def get_pin_identifier(self, line):
""" Returns the 'pin number' for a given GPIO pin. """
return line[1]
#
# Deprecated methods.
#
def output(self, line, state):
warn("GPIO.output is deprecated; prefer set_pin_state.", DeprecationWarning)
self.set_pin_state(line, state)
def input(self, line):
warn("GPIO.input is deprecated; prefer read_pin_state.", DeprecationWarning)
return self.read_pin_state(line)
def setup(self, line, direction):
warn("GPIO.setup is deprecated; prefer set_up_pin.", DeprecationWarning)
self.set_up_pin(line, direction)
class GPIOPin(object):
"""
Class representing a single GPIO pin.
"""
def __init__(self, gpio_provider, name, line):
"""
Creates a new object representing a GPIO Pin. Usually instantiated via
a GPIO object.
Args:
gpio_provider -- The GPIO object to which this pin belongs.
name -- The name of the given pin. Should match a name registered
in its GPIO collection.
line -- The pin's 'line' information, as defined by the object that created
this GPIO pin. This variable has semantic meaning to the GPIO collection;
but doesn't have any semantic meaning to this class.
"""
self.name = name
self._parent = gpio_provider
self._line = line
# For convenience:
self.DIRECTION_IN = Directions.IN
self.DIRECTION_OUT = Directions.OUT
# Set up the pin for use. Idempotent.
self._parent.set_up_pin(self._line, self.get_direction(), self.read())
def set_direction(self, direction, initial_value=False):
"""
Sets the GPIO pin to use a given direction.
"""
self._parent.set_up_pin(self._line, direction, initial_value)
def get_direction(self):
""" Returns the pin's direction; will be either Directions.IN or Directions.OUT """
return self._parent.get_pin_direction(self._line)
def is_input(self):
""" Convenience function that returns True iff the pin is configured as an input. """
return (self.get_direction() == self.DIRECTION_IN)
def is_output(self):
""" Convenience function that returns True iff the pin is configured as an output. """
return (self.get_direction() == self.DIRECTION_OUT)
def read(self, high_value=True, low_value=False, check_pin_direction=False, set_pin_direction=False):
""" Convenience alias for get_state."""
return self.get_state(high_value, low_value, check_pin_direction, set_pin_direction)
def input(self, high_value=True, low_value=False):
""" Convenience function that sets the pin to an input and reads its value. """
return self.read(set_pin_direction=True, high_value=high_value, low_value=low_value)
def get_state(self, high_value=True, low_value=False, check_pin_direction=False, set_pin_direction=False):
""" Returns the value of a GPIO pin. """
# If we're setting the pin direction while we're getting the state, set it.
if set_pin_direction:
self.set_direction(self.DIRECTION_IN)
# Otherwise, enforce the direction, if desired.
elif check_pin_direction and not self.is_input():
raise ValueError("Trying to read from a non-input pin {}! Set up the pin first with set_direction.".format(self.name))
# Finally, read the pin's state.
raw = self._parent.read_pin_state(self._line)
return high_value if raw else low_value
def write(self, high, check_direction=False):
""" Convenience alias for set_state."""
self.set_state(high, check_direction)
def set_state(self, high, check_direction=True):
""" Write a given value to the GPIO port.
Args:
high -- True iff the pin should be set to high; the pin will be set
to low otherwise.
"""
if check_direction and not self.is_output():
raise ValueError("Trying to write to a non-output pin {}! Set up the pin first with set_direction.".format(self.name))
self._parent.set_pin_state(self._line, high)
def high(self):
""" Convenience function that sets the given GPIO pin to both output mode and high, at once. """
# Note that we can't rely on initial_direction to set the actual port value; as some
# GPIOProviders may not support that.
self.set_direction(self.DIRECTION_OUT, True)
self.write(True)
def low(self):
""" Convenience function that sets the given GPIO pin to both output mode and low, at once. """
# Note that we can't rely on initial_direction to set the actual port value; as some
# GPIOProviders may not support that.
self.set_direction(self.DIRECTION_OUT, False)
self.write(False)
def get_port(self):
""" Returns device's port number, if possible. """
return self._parent.get_pin_port(self._line)
def get_pin(self):
""" Returns pin's pin number within its port, if possible. """
return self._parent.get_pin_identifier(self._line)
# TODO: Toggle-- we have the hardware for this :)
# TODO: handle pulldowns/pull-ups, etc.
class VirtualGPIOPort(object):
""" An object that represents a "virtually contiguous" group of GPIO pins. """
def __init__(self, *pin_arguments):
""" Creates a virtual GPIO Port from GPIOPin-compatible objects.
pins -- A list of pins to be coalesced into a virtual port;
with the MSB first. For convenience, pins (or lists) can
also be provided as variadic arguments. Pins should already
have their directions / resistors set.
"""
pins = []
# Take each of our passed in pins/objects, and add them to our ordered list.
for pin in pin_arguments:
if isinstance(pin, list):
pins.extend(pin)
else:
pins.append(pin)
# Reverse the order of our list, so element 0 corresponds to bit zero.
self.pins = pins[::-1]
def set_direction(self, word, initial_value=0):
""" Sets the direction of each individual pin.
Parameters:
word -- A number whose bits contain 1s for each bit that should be an output,
and zeroes for each bit that should be an input.
"""
for bit, pin in enumerate(self.pins):
direction = DIRECTION_OUT if (word & (1 << bit)) else DIRECTION_IN
initial_value = bool(initial_value & (1 << bit))
pin.set_direction(direction, initial_value=initial_value)
def all_output(self, initial_value=False):
""" Sets all of the pins in this port to output mode.
Parameters:
initial_value -- Optional; the start value to apply to each pin.
"""
for pin in self.pins:
pin.set_direction(DIRECTION_OUT, initial_value)
def all_input(self):
""" Sets all of the pins in this port to output mode. """
for pin in self.pins:
pin.set_direction(DIRECTION_IN)
def read(self):
""" Returns the integer value of the relevant port. """
value = 0
# Iterate over each of the contained pins, and add it to our value.
for bit, pin in enumerate(self.pins):
# If this pin reads as true, add it to our composite.
if pin.read():
value |= (1 << bit)
return value
def write(self, value):
""" Writes a given integer value to the port. """
for bit, pin in enumerate(self.pins):
new_value = bool(value & (1 << bit))
pin.write(new_value)
|
|
# This file includes the definition of a mix-in class that provides the low-
# and high-level WCS API to the astropy.wcs.WCS object. We keep this code
# isolated in this mix-in class to avoid making the main wcs.py file too
# long.
import warnings
import numpy as np
from astropy import units as u
from .low_level_api import BaseLowLevelWCS
from .high_level_api import HighLevelWCSMixin
from .sliced_low_level_wcs import SlicedLowLevelWCS
__all__ = ['custom_ctype_to_ucd_mapping', 'SlicedFITSWCS', 'FITSWCSAPIMixin']
# Mapping from CTYPE axis name to UCD1
CTYPE_TO_UCD1 = {
# Celestial coordinates
'RA': 'pos.eq.ra',
'DEC': 'pos.eq.dec',
'GLON': 'pos.galactic.lon',
'GLAT': 'pos.galactic.lat',
'ELON': 'pos.ecliptic.lon',
'ELAT': 'pos.ecliptic.lat',
'TLON': 'pos.bodyrc.lon',
'TLAT': 'pos.bodyrc.lat',
'HPLT': 'custom:pos.helioprojective.lat',
'HPLN': 'custom:pos.helioprojective.lon',
'HGLN': 'custom:pos.heliographic.stonyhurst.lon',
'HGLT': 'custom:pos.heliographic.stonyhurst.lat',
'CRLN': 'custom:pos.heliographic.carrington.lon',
'CRLT': 'custom:pos.heliographic.carrington.lat',
# Spectral coordinates (WCS paper 3)
'FREQ': 'em.freq', # Frequency
'ENER': 'em.energy', # Energy
'WAVN': 'em.wavenumber', # Wavenumber
'WAVE': 'em.wl', # Vacuum wavelength
'VRAD': 'spect.dopplerVeloc.radio', # Radio velocity
'VOPT': 'spect.dopplerVeloc.opt', # Optical velocity
'ZOPT': 'src.redshift', # Redshift
'AWAV': 'em.wl', # Air wavelength
'VELO': 'spect.dopplerVeloc', # Apparent radial velocity
'BETA': 'custom:spect.doplerVeloc.beta', # Beta factor (v/c)
# Time coordinates (https://www.aanda.org/articles/aa/pdf/2015/02/aa24653-14.pdf)
'TIME': 'time',
'TAI': 'time',
'TT': 'time',
'TDT': 'time',
'ET': 'time',
'IAT': 'time',
'UT1': 'time',
'UTC': 'time',
'GMT': 'time',
'GPS': 'time',
'TCG': 'time',
'TCB': 'time',
'TDB': 'time',
'LOCAL': 'time'
# UT() and TT() are handled separately in world_axis_physical_types
}
# Keep a list of additional custom mappings that have been registered. This
# is kept as a list in case nested context managers are used
CTYPE_TO_UCD1_CUSTOM = []
class custom_ctype_to_ucd_mapping:
"""
A context manager that makes it possible to temporarily add new CTYPE to
UCD1+ mapping used by :attr:`FITSWCSAPIMixin.world_axis_physical_types`.
Parameters
----------
mapping : dict
A dictionary mapping a CTYPE value to a UCD1+ value
Examples
--------
Consider a WCS with the following CTYPE::
>>> from astropy.wcs import WCS
>>> wcs = WCS(naxis=1)
>>> wcs.wcs.ctype = ['SPAM']
By default, :attr:`FITSWCSAPIMixin.world_axis_physical_types` returns `None`,
but this can be overridden::
>>> wcs.world_axis_physical_types
[None]
>>> with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}):
... wcs.world_axis_physical_types
['food.spam']
"""
def __init__(self, mapping):
CTYPE_TO_UCD1_CUSTOM.insert(0, mapping)
self.mapping = mapping
def __enter__(self):
pass
def __exit__(self, type, value, tb):
CTYPE_TO_UCD1_CUSTOM.remove(self.mapping)
class SlicedFITSWCS(SlicedLowLevelWCS, HighLevelWCSMixin):
pass
class FITSWCSAPIMixin(BaseLowLevelWCS, HighLevelWCSMixin):
"""
A mix-in class that is intended to be inherited by the
:class:`~astropy.wcs.WCS` class and provides the low- and high-level WCS API
"""
@property
def pixel_n_dim(self):
return self.naxis
@property
def world_n_dim(self):
return len(self.wcs.ctype)
@property
def array_shape(self):
if self._naxis == [0, 0]:
return None
else:
return tuple(self._naxis[::-1])
@array_shape.setter
def array_shape(self, value):
if value is None:
self._naxis = [0, 0]
else:
if len(value) != self.naxis:
raise ValueError("The number of data axes, "
"{}, does not equal the "
"shape {}.".format(self.naxis, len(value)))
self._naxis = list(value)[::-1]
@property
def pixel_shape(self):
if self._naxis == [0, 0]:
return None
else:
return tuple(self._naxis)
@pixel_shape.setter
def pixel_shape(self, value):
if value is None:
self._naxis = [0, 0]
else:
if len(value) != self.naxis:
raise ValueError("The number of data axes, "
"{}, does not equal the "
"shape {}.".format(self.naxis, len(value)))
self._naxis = list(value)
@property
def pixel_bounds(self):
return self._pixel_bounds
@pixel_bounds.setter
def pixel_bounds(self, value):
if value is None:
self._pixel_bounds = value
else:
if len(value) != self.naxis:
raise ValueError("The number of data axes, "
"{}, does not equal the number of "
"pixel bounds {}.".format(self.naxis, len(value)))
self._pixel_bounds = list(value)
@property
def world_axis_physical_types(self):
types = []
# TODO: need to support e.g. TT(TAI)
for ctype in self.wcs.ctype:
if ctype.startswith(('UT(', 'TT(')):
types.append('time')
else:
ctype_name = ctype.split('-')[0]
for custom_mapping in CTYPE_TO_UCD1_CUSTOM:
if ctype_name in custom_mapping:
types.append(custom_mapping[ctype_name])
break
else:
types.append(CTYPE_TO_UCD1.get(ctype_name, None))
return types
@property
def world_axis_units(self):
units = []
for unit in self.wcs.cunit:
if unit is None:
unit = ''
elif isinstance(unit, u.Unit):
unit = unit.to_string(format='vounit')
else:
try:
unit = u.Unit(unit).to_string(format='vounit')
except u.UnitsError:
unit = ''
units.append(unit)
return units
@property
def world_axis_names(self):
return list(self.wcs.cname)
@property
def axis_correlation_matrix(self):
# If there are any distortions present, we assume that there may be
# correlations between all axes. Maybe if some distortions only apply
# to the image plane we can improve this?
if self.has_distortion:
return np.ones((self.world_n_dim, self.pixel_n_dim), dtype=bool)
# Assuming linear world coordinates along each axis, the correlation
# matrix would be given by whether or not the PC matrix is zero
matrix = self.wcs.get_pc() != 0
# We now need to check specifically for celestial coordinates since
# these can assume correlations because of spherical distortions. For
# each celestial coordinate we copy over the pixel dependencies from
# the other celestial coordinates.
celestial = (self.wcs.axis_types // 1000) % 10 == 2
celestial_indices = np.nonzero(celestial)[0]
for world1 in celestial_indices:
for world2 in celestial_indices:
if world1 != world2:
matrix[world1] |= matrix[world2]
matrix[world2] |= matrix[world1]
return matrix
def pixel_to_world_values(self, *pixel_arrays):
world = self.all_pix2world(*pixel_arrays, 0)
return world[0] if self.world_n_dim == 1 else tuple(world)
def array_index_to_world_values(self, *indices):
world = self.all_pix2world(*indices[::-1], 0)
return world[0] if self.world_n_dim == 1 else tuple(world)
def world_to_pixel_values(self, *world_arrays):
pixel = self.all_world2pix(*world_arrays, 0)
return pixel[0] if self.pixel_n_dim == 1 else tuple(pixel)
def world_to_array_index_values(self, *world_arrays):
pixel_arrays = self.all_world2pix(*world_arrays, 0)[::-1]
array_indices = tuple(np.asarray(np.floor(pixel + 0.5), dtype=np.int_) for pixel in pixel_arrays)
return array_indices[0] if self.pixel_n_dim == 1 else array_indices
@property
def world_axis_object_components(self):
return self._get_components_and_classes()[0]
@property
def world_axis_object_classes(self):
return self._get_components_and_classes()[1]
@property
def serialized_classes(self):
return False
def _get_components_and_classes(self):
# The aim of this function is to return whatever is needed for
# world_axis_object_components and world_axis_object_classes. It's easier
# to figure it out in one go and then return the values and let the
# properties return part of it.
# Since this method might get called quite a few times, we need to cache
# it. We start off by defining a hash based on the attributes of the
# WCS that matter here (we can't just use the WCS object as a hash since
# it is mutable)
wcs_hash = (self.naxis,
list(self.wcs.ctype),
list(self.wcs.cunit),
self.wcs.radesys,
self.wcs.equinox,
self.wcs.dateobs,
self.wcs.lng,
self.wcs.lat)
# If the cache is present, we need to check that the 'hash' matches.
if getattr(self, '_components_and_classes_cache', None) is not None:
cache = self._components_and_classes_cache
if cache[0] == wcs_hash:
return cache[1]
else:
self._components_and_classes_cache = None
# Avoid circular imports by importing here
from astropy.wcs.utils import wcs_to_celestial_frame
from astropy.coordinates import SkyCoord, EarthLocation
from astropy.time.formats import FITS_DEPRECATED_SCALES
from astropy.time import Time, TimeDelta
components = [None] * self.naxis
classes = {}
# Let's start off by checking whether the WCS has a pair of celestial
# components
if self.has_celestial:
try:
frame = wcs_to_celestial_frame(self)
except ValueError:
# Some WCSes, e.g. solar, can be recognized by WCSLIB as being
# celestial but we don't necessarily have frames for them.
pass
else:
kwargs = {}
kwargs['frame'] = frame
kwargs['unit'] = u.deg
classes['celestial'] = (SkyCoord, (), kwargs)
components[self.wcs.lng] = ('celestial', 0, 'spherical.lon.degree')
components[self.wcs.lat] = ('celestial', 1, 'spherical.lat.degree')
# We can then make sure we correctly return Time objects where appropriate
# (https://www.aanda.org/articles/aa/pdf/2015/02/aa24653-14.pdf)
if 'time' in self.world_axis_physical_types:
multiple_time = self.world_axis_physical_types.count('time') > 1
for i in range(self.naxis):
if self.world_axis_physical_types[i] == 'time':
if multiple_time:
name = f'time.{i}'
else:
name = 'time'
# Initialize delta
reference_time_delta = None
# Extract time scale
scale = self.wcs.ctype[i].lower()
if scale == 'time':
if self.wcs.timesys:
scale = self.wcs.timesys.lower()
else:
scale = 'utc'
# Drop sub-scales
if '(' in scale:
pos = scale.index('(')
scale, subscale = scale[:pos], scale[pos+1:-1]
warnings.warn(f'Dropping unsupported sub-scale '
f'{subscale.upper()} from scale {scale.upper()}',
UserWarning)
# TODO: consider having GPS as a scale in Time
# For now GPS is not a scale, we approximate this by TAI - 19s
if scale == 'gps':
reference_time_delta = TimeDelta(19, format='sec')
scale = 'tai'
elif scale.upper() in FITS_DEPRECATED_SCALES:
scale = FITS_DEPRECATED_SCALES[scale.upper()]
elif scale not in Time.SCALES:
raise ValueError(f'Unrecognized time CTYPE={self.wcs.ctype[i]}')
# Determine location
trefpos = self.wcs.trefpos.lower()
if trefpos.startswith('topocent'):
# Note that some headers use TOPOCENT instead of TOPOCENTER
if np.any(np.isnan(self.wcs.obsgeo[:3])):
warnings.warn('Missing or incomplete observer location '
'information, setting location in Time to None',
UserWarning)
location = None
else:
location = EarthLocation(*self.wcs.obsgeo[:3], unit=u.m)
elif trefpos == 'geocenter':
location = EarthLocation(0, 0, 0, unit=u.m)
elif trefpos == '':
location = None
else:
# TODO: implement support for more locations when Time supports it
warnings.warn(f"Observation location '{trefpos}' is not "
"supported, setting location in Time to None", UserWarning)
location = None
reference_time = Time(np.nan_to_num(self.wcs.mjdref[0]),
np.nan_to_num(self.wcs.mjdref[1]),
format='mjd', scale=scale,
location=location)
if reference_time_delta is not None:
reference_time = reference_time + reference_time_delta
def time_from_reference_and_offset(offset):
if isinstance(offset, Time):
return offset
return reference_time + TimeDelta(offset, format='sec')
def offset_from_time_and_reference(time):
return (time - reference_time).sec
classes[name] = (Time, (), {}, time_from_reference_and_offset)
components[i] = (name, 0, offset_from_time_and_reference)
# Fallback: for any remaining components that haven't been identified, just
# return Quantity as the class to use
for i in range(self.naxis):
if components[i] is None:
name = self.wcs.ctype[i].split('-')[0].lower()
if name == '':
name = 'world'
while name in classes:
name += "_"
classes[name] = (u.Quantity, (), {'unit': self.wcs.cunit[i]})
components[i] = (name, 0, 'value')
# Keep a cached version of result
self._components_and_classes_cache = wcs_hash, (components, classes)
return components, classes
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import functools
import sys
import weakref
from keystoneclient import auth
from keystoneclient.auth.identity import v2
from keystoneclient.auth.identity import v3
from keystoneclient import exceptions
from keystoneclient import session
from oslo_config import cfg
import six
from heat.common import config
from heat.common.i18n import _
class ExceptionFilter(object):
"""A context manager that prevents some exceptions from being raised.
For backwards compatibility, these objects can also be called with the
exception value as an argument - any non-matching exception will be
re-raised from this call. We attempt but cannot guarantee to keep the same
traceback; the context manager method is preferred for this reason except
in cases where the ignored exception affects control flow.
Use this class as a decorator for a function that returns whether a given
exception should be ignored. e.g.
>>> @ExceptionFilter
>>> def ignore_assertions(ex):
... return isinstance(ex, AssertionError)
and then use it as a context manager:
>>> with ignore_assertions:
... assert False
or call it:
>>> try:
... assert False
... except Exception as ex:
... ignore_assertions(ex)
"""
def __init__(self, should_ignore_ex):
self._should_ignore_ex = should_ignore_ex
functools.update_wrapper(self, should_ignore_ex)
def __get__(self, obj, owner):
return type(self)(six.create_bound_method(self._should_ignore_ex, obj))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_val is not None:
return self._should_ignore_ex(exc_val)
def __call__(self, ex):
"""Re-raise any exception value not being filtered out.
If the exception was the last to be raised, it will be re-raised with
its original traceback.
"""
if not self._should_ignore_ex(ex):
exc_type, exc_val, traceback = sys.exc_info()
if exc_val is ex:
six.reraise(exc_type, exc_val, traceback)
else:
raise ex
@six.add_metaclass(abc.ABCMeta)
class ClientPlugin(object):
# Module which contains all exceptions classes which the client
# may emit
exceptions_module = None
# supported service types, service like cinder support multiple service
# types, so its used in list format
service_types = []
def __init__(self, context):
self._context = weakref.ref(context)
self._clients = weakref.ref(context.clients)
self._client = None
self._keystone_session_obj = None
@property
def context(self):
ctxt = self._context()
assert ctxt is not None, "Need a reference to the context"
return ctxt
@property
def clients(self):
return self._clients()
_get_client_option = staticmethod(config.get_client_option)
@property
def _keystone_session(self):
# FIXME(jamielennox): This session object is essentially static as the
# options won't change. Further it is allowed to be shared by multiple
# authentication requests so there is no reason to construct it fresh
# for every client plugin. It should be global and shared amongst them.
if not self._keystone_session_obj:
o = {'cacert': self._get_client_option('keystone', 'ca_file'),
'insecure': self._get_client_option('keystone', 'insecure'),
'cert': self._get_client_option('keystone', 'cert_file'),
'key': self._get_client_option('keystone', 'key_file')}
self._keystone_session_obj = session.Session.construct(o)
return self._keystone_session_obj
def invalidate(self):
"""Invalidate/clear any cached client."""
self._client = None
def client(self):
if not self._client:
self._client = self._create()
elif (cfg.CONF.reauthentication_auth_method == 'trusts'
and self.context.auth_plugin.auth_ref.will_expire_soon(
cfg.CONF.stale_token_duration)):
# If the token is near expiry, force creating a new client,
# which will get a new token via another call to auth_token
# We also have to invalidate all other cached clients
self.clients.invalidate_plugins()
self._client = self._create()
return self._client
@abc.abstractmethod
def _create(self):
"""Return a newly created client."""
pass
@property
def auth_token(self):
# NOTE(jamielennox): use the session defined by the keystoneclient
# options as traditionally the token was always retrieved from
# keystoneclient.
return self.context.auth_plugin.get_token(self._keystone_session)
def url_for(self, **kwargs):
def get_endpoint():
auth_plugin = self.context.auth_plugin
return auth_plugin.get_endpoint(self._keystone_session, **kwargs)
# NOTE(jamielennox): use the session defined by the keystoneclient
# options as traditionally the token was always retrieved from
# keystoneclient.
try:
kwargs.setdefault('interface', kwargs.pop('endpoint_type'))
except KeyError:
pass
reg = self.context.region_name or cfg.CONF.region_name_for_services
kwargs.setdefault('region_name', reg)
url = None
try:
url = get_endpoint()
except exceptions.EmptyCatalog:
kc = self.clients.client('keystone').client
auth_plugin = self.context.auth_plugin
endpoint = auth_plugin.get_endpoint(None,
interface=auth.AUTH_INTERFACE)
token = auth_plugin.get_token(None)
project_id = auth_plugin.get_project_id(None)
if kc.version == 'v3':
token_obj = v3.Token(endpoint, token, project_id=project_id)
catalog_key = 'catalog'
access_key = 'token'
elif kc.version == 'v2.0':
endpoint = endpoint.replace('v3', 'v2.0')
token_obj = v2.Token(endpoint, token, tenant_id=project_id)
catalog_key = 'serviceCatalog'
access_key = 'access'
else:
raise exceptions.Error(_("Unknown Keystone version"))
auth_ref = token_obj.get_auth_ref(self._keystone_session)
if catalog_key in auth_ref:
access_info = self.context.auth_token_info[access_key]
access_info[catalog_key] = auth_ref[catalog_key]
self.context.reload_auth_plugin()
url = get_endpoint()
# NOTE(jamielennox): raising exception maintains compatibility with
# older keystoneclient service catalog searching.
if url is None:
raise exceptions.EndpointNotFound()
return url
def is_client_exception(self, ex):
"""Returns True if the current exception comes from the client."""
if self.exceptions_module:
if isinstance(self.exceptions_module, list):
for m in self.exceptions_module:
if type(ex) in six.itervalues(m.__dict__):
return True
else:
return type(ex) in six.itervalues(
self.exceptions_module.__dict__)
return False
def is_not_found(self, ex):
"""Returns True if the exception is a not-found."""
return False
def is_over_limit(self, ex):
"""Returns True if the exception is an over-limit."""
return False
def is_conflict(self, ex):
"""Returns True if the exception is a conflict."""
return False
@ExceptionFilter
def ignore_not_found(self, ex):
"""Raises the exception unless it is a not-found."""
return self.is_not_found(ex)
@ExceptionFilter
def ignore_conflict_and_not_found(self, ex):
"""Raises the exception unless it is a conflict or not-found."""
return self.is_conflict(ex) or self.is_not_found(ex)
def _get_client_args(self,
service_name,
service_type):
endpoint_type = self._get_client_option(service_name,
'endpoint_type')
endpoint = self.url_for(service_type=service_type,
endpoint_type=endpoint_type)
args = {
'auth_url': self.context.auth_url,
'service_type': service_type,
'project_id': self.context.tenant_id,
'token': lambda: self.auth_token,
'endpoint_type': endpoint_type,
'os_endpoint': endpoint,
'cacert': self._get_client_option(service_name, 'ca_file'),
'cert_file': self._get_client_option(service_name, 'cert_file'),
'key_file': self._get_client_option(service_name, 'key_file'),
'insecure': self._get_client_option(service_name, 'insecure')
}
return args
# FIXME(kanagaraj-manickam) Update other client plugins to leverage
# this method (bug 1461041)
def does_endpoint_exist(self,
service_type,
service_name):
endpoint_type = self._get_client_option(service_name,
'endpoint_type')
try:
self.url_for(service_type=service_type,
endpoint_type=endpoint_type)
return True
except exceptions.EndpointNotFound:
return False
|
|
import os
from collections import OrderedDict
from typing import Union
import casadi as ca
# noinspection PyPackageRequirements
from lxml import etree
from .model import HybridDae
FILE_PATH = os.path.dirname(os.path.abspath(__file__))
SCHEMA_DIR = os.path.join(FILE_PATH, 'ModelicaXML', 'schemas')
class XMLParser:
def __init__(self, schema_dir, schema_file):
orig_path = os.path.abspath(os.curdir)
os.chdir(schema_dir)
with open(schema_file, 'r') as f:
schema = etree.XMLSchema(etree.XML(f.read().encode('utf-8')))
os.chdir(orig_path)
self._parser = etree.XMLParser(
schema=schema,
remove_comments=True,
remove_blank_text=False)
def parse(self, txt: str):
if not isinstance(txt, str):
raise ValueError('txt must be a str')
xml_file = txt.encode('utf-8')
return etree.fromstring(xml_file, self._parser)
Sym = Union[ca.MX, ca.SX]
# noinspection PyProtectedMember,PyPep8Naming
class ModelListener:
""" Converts ModelicaXML file to Hybrid DAE"""
def __init__(self, sym: Sym = ca.SX, verbose=False):
self.depth = 0
self.model = {}
self.scope_stack = []
self.verbose = verbose
self.sym = sym
# Define an operator map that can be used as
# self.op_map[n_operations][operator](*args)
self.op_map = {
1: {
'der': self.der,
'-': lambda x: -1 * x,
'abs': ca.fabs,
'sin': ca.sin,
'cos': ca.cos,
'tan': ca.tan,
},
2: {
'+': lambda x, y: x + y,
'-': lambda x, y: x - y,
'*': lambda x, y: x * y,
'/': lambda x, y: x / y,
'^': lambda x, y: ca.power(x, y),
'>': lambda x, y: x > y,
'<': lambda x, y: x < y,
'<=': lambda x, y: x <= y,
'>=': lambda x, y: x >= y,
'reinit': self.reinit,
'sample': self.sample,
'and': ca.logic_and,
'or': ca.logic_or,
'not': ca.logic_not,
'noise_gaussian': lambda mean, std: self.noise_gaussian(mean, std),
'noise_uniform': lambda lower, upper: self.noise_uniform(lower, upper),
},
}
@property
def scope(self):
return self.scope_stack[-1]
def call(self, tag_name: str, *args, **kwargs):
"""Convenience method for calling methods with walker."""
if hasattr(self, tag_name):
getattr(self, tag_name)(*args, **kwargs)
# ------------------------------------------------------------------------
# OPERATORS
# ------------------------------------------------------------------------
def der(self, x: Sym):
"""Get the derivative of the variable, create it if it doesn't exist."""
name = 'der({:s})'.format(x.name())
if name not in self.scope['dvar'].keys():
self.scope['dvar'][name] = self.sym.sym(name, *x.shape)
self.scope['states'].append(x.name())
return self.scope['dvar'][name]
def cond(self, expr):
c = self.sym.sym('c_{:d}'.format(len(self.scope['c'])))
self.scope['c'][c] = expr
return c
def pre_cond(self, x: Sym):
name = 'pre({:s})'.format(x.name())
if name not in self.scope['pre_c'].keys():
self.scope['pre_c'][name] = self.sym.sym(name, *x.shape)
return self.scope['pre_c'][name]
def edge(self, c):
"""rising edge"""
return ca.logic_and(c, ca.logic_not(self.pre_cond(c)))
@staticmethod
def reinit(x_old, x_new):
return 'reinit', x_old, x_new
@staticmethod
def sample(t_start, period):
print('sample', t_start, period)
return 'sample', t_start, period
def noise_gaussian(self, mean, std):
"""Create a gaussian noise variable"""
assert std > 0
ng = self.sym.sym('ng_{:d}'.format(len(self.scope['ng'])))
self.scope['ng'].append(ng)
return mean + std*ng
def noise_uniform(self, lower_bound, upper_bound):
"""Create a uniform noise variable"""
assert upper_bound > lower_bound
nu = self.sym.sym('nu_{:d}'.format(len(self.scope['nu'])))
self.scope['nu'].append(nu)
return lower_bound + nu*(upper_bound - lower_bound)
# ------------------------------------------------------------------------
# Helpers
# ------------------------------------------------------------------------
@staticmethod
def get_attr(e, name, default):
if name in e.attrib.keys():
return e.attrib[name]
else:
return default
def get_var(self, name):
"""Get the variable in the current scope"""
if name == 'time':
return self.scope['time']
else:
return self.scope['var'][name]
def log(self, *args, **kwargs):
"""Convenience function for printing indenting debug output."""
if self.verbose:
print(' ' * self.depth, *args, **kwargs)
# ------------------------------------------------------------------------
# Listener Methods
# ------------------------------------------------------------------------
def enter_every_before(self, tree: etree._Element):
# initialize model to None
self.model[tree] = None
# print name to log
self.log(tree.tag, '{')
# increment depth
self.depth += 1
def exit_every_after(self, tree: etree._Element):
# decrement depth
self.depth -= 1
# self.log('tree:', etree.tostring(tree))
# print model
if self.model[tree] is not None:
self.log('model:', self.model[tree])
# print name to log
self.log('}', tree.tag)
# noinspection PyUnusedLocal
def enter_classDefinition(self, tree: etree._Element):
# we don't know if variables are states
# yet, we need to wait until equations are parsed
self.scope_stack.append({
'time': self.sym.sym('time'),
'sample_times': [],
'var': OrderedDict(), # variables
'states': [], # list of which variables are states (based on der call)
'dvar': OrderedDict(), # derivative of variables
'eqs': [], # equations
'when_eqs': [], # when equations
'c': {}, # conditions
'pre_c': {}, # pre conditions
'p': [], # parameters and constants
'prop': {}, # properties for variables
'ng': [], # gaussian
'nu': [], # uniform
})
def exit_classDefinition(self, tree: etree._Element): # noqa: too-complex
dae = HybridDae()
dae.t = self.scope['time']
self.model[tree] = dae
# handle component declarations
for var_name, v in self.scope['var'].items():
variability = self.scope['prop'][var_name]['variability']
if variability == 'continuous':
if var_name in self.scope['states']:
dae.x = ca.vertcat(dae.x, v)
dae.dx = ca.vertcat(dae.dx, self.der(v))
else:
dae.y = ca.vertcat(dae.y, v)
elif variability == 'discrete':
dae.m = ca.vertcat(dae.m, v)
elif variability == 'parameter':
dae.p = ca.vertcat(dae.p, v)
elif variability == 'constant':
dae.p = ca.vertcat(dae.p, v)
else:
raise ValueError('unknown variability', variability)
for eq in self.scope['eqs']:
if isinstance(eq, self.sym):
dae.f_x = ca.vertcat(dae.f_x, eq)
# build reinit expression and discrete equations
dae.f_i = dae.x
dae.f_m = dae.m
for eq in self.scope['when_eqs']:
w = eq['cond']
for then_eq in eq['then']:
if isinstance(then_eq, tuple):
if then_eq[0] == 'reinit':
sub_var = then_eq[1]
sub_expr = ca.if_else(self.edge(w), then_eq[2], sub_var)
dae.f_i = ca.substitute(dae.f_i, sub_var, sub_expr)
elif isinstance(then_eq, self.sym):
# this is a discrete variable assignment
# so it should be a casadi subtraction y = x
assert then_eq.is_op(ca.OP_SUB) and then_eq.n_dep() == 2
sub_var = then_eq.dep(0)
sub_expr = ca.if_else(self.edge(w), then_eq.dep(1), sub_var)
dae.f_m = ca.substitute(dae.f_m, sub_var, sub_expr)
dae.t = self.scope['time']
dae.prop.update(self.scope['prop'])
c_dict = self.scope['c']
for k in c_dict.keys():
dae.c = ca.vertcat(dae.c, k)
dae.pre_c = ca.vertcat(dae.pre_c, self.pre_cond(k))
dae.f_c = ca.vertcat(dae.f_c, c_dict[k])
for l, r in [('f_c', 'c'), ('c', 'pre_c'), ('dx', 'x'), ('f_m', 'm')]:
vl = getattr(dae, l)
vr = getattr(dae, r)
if vl.shape != vr.shape:
raise ValueError(
'{:s} and {:s} must have the same shape:'
'\n{:s}: {:s}\t{:s}: {:s}'.format(
l, r, l, str(dae.f_m), r, str(dae.m)))
dae.ng = ca.vertcat(*self.scope['ng'])
dae.nu = ca.vertcat(*self.scope['nu'])
n_eq = dae.f_x.shape[0] + dae.f_m.shape[0]
n_var = dae.x.shape[0] + dae.m.shape[0] + dae.y.shape[0]
if n_eq != n_var:
raise ValueError(
'must have equal number of equations '
'{:d} and unknowns {:d}\n:{:s}'.format(
n_eq, n_var, str(dae)))
self.scope_stack.pop()
def enter_component(self, tree: etree._Element):
self.model[tree] = {
'start': None,
'fixed': None,
'value': None,
'variability': self.get_attr(tree, 'variability', 'continuous'),
'visibility': self.get_attr(tree, 'visibility', 'public'),
}
self.scope_stack.append(self.model[tree])
def exit_component(self, tree: etree._Element):
var_scope = self.scope_stack.pop()
name = tree.attrib['name']
shape = (1, 1)
sym = self.sym.sym(name, *shape)
self.scope['prop'][name] = var_scope
self.scope['var'][name] = sym
def exit_local(self, tree: etree._Element):
name = tree.attrib['name']
self.model[tree] = self.get_var(name)
def exit_operator(self, tree: etree._Element):
op = tree.attrib['name']
self.model[tree] = self.op_map[len(tree)][op](*[self.model[e] for e in tree])
def exit_if(self, tree: etree._Element):
assert len(tree) == 3
cond = self.model[tree[0]]
then_eq = self.model[tree[1]]
else_eq = self.model[tree[2]]
c = self.cond(cond)
if len(then_eq) != len(else_eq):
raise SyntaxError("then and else equations must have same number of statements")
self.model[tree] = ca.if_else(c, then_eq[0], else_eq[0])
def exit_apply(self, tree: etree._Element):
op = tree.attrib['builtin']
self.model[tree] = self.op_map[len(tree)][op](*[self.model[e] for e in tree])
def exit_equal(self, tree: etree._Element):
assert len(tree) == 2
self.model[tree] = self.model[tree[0]] - self.model[tree[1]]
def exit_equation(self, tree: etree._Element):
self.model[tree] = [self.model[c] for c in tree]
self.scope['eqs'].extend(self.model[tree])
def exit_modifier(self, tree: etree._Element):
props = {}
for e in tree:
props.update(self.model[e])
self.model[tree] = props
self.scope.update(props)
def exit_item(self, tree: etree._Element):
assert len(tree) == 1
self.model[tree] = {
tree.attrib['name']: self.model[tree[0]]
}
def exit_real(self, tree: etree._Element):
self.model[tree] = float(tree.attrib["value"])
def exit_true(self, tree: etree._Element):
self.model[tree] = True
def exit_false(self, tree: etree._Element):
self.model[tree] = False
def exit_modelica(self, tree: etree._Element):
# get all class definitions as a list
self.model[tree] = [self.model[c] for c in tree[0]]
def exit_when(self, tree: etree._Element):
assert len(tree) == 2
cond = self.model[tree[0]]
then = self.model[tree[1]]
self.model[tree] = {
'cond': self.cond(cond),
'then': then
}
self.scope['when_eqs'].append(self.model[tree])
def exit_cond(self, tree: etree._Element):
assert len(tree) == 1
self.model[tree] = self.model[tree[0]]
def exit_then(self, tree: etree._Element):
self.model[tree] = [self.model[c] for c in tree]
def exit_else(self, tree: etree._Element):
self.model[tree] = [self.model[c] for c in tree]
# noinspection PyProtectedMember
def walk(e: etree._Element, l: ModelListener) -> None:
tag = e.tag
l.call('enter_every_before', e)
l.call('enter_' + tag, e)
l.call('enter_every_after', e)
for c in e.getchildren():
walk(c, l)
l.call('exit_every_before', e)
l.call('exit_' + tag, e)
l.call('exit_every_after', e)
def parse(model_txt: str, verbose: bool = False) -> HybridDae:
parser = XMLParser(SCHEMA_DIR, 'Modelica.xsd')
root = parser.parse(model_txt)
listener = ModelListener(verbose=verbose)
walk(root, listener)
return listener.model[root][0]
def parse_file(file_path: str, verbose: bool = False) -> HybridDae:
with open(file_path, 'r') as f:
txt = f.read()
return parse(txt, verbose)
|
|
"""Generic resource pool implementation."""
from __future__ import absolute_import, unicode_literals
import os
from collections import deque
from . import exceptions
from .five import Empty, LifoQueue as _LifoQueue
from .utils.compat import register_after_fork
from .utils.functional import lazy
def _after_fork_cleanup_resource(resource):
try:
resource.force_close_all()
except Exception:
pass
class LifoQueue(_LifoQueue):
"""Last in first out version of Queue."""
def _init(self, maxsize):
self.queue = deque()
class Resource(object):
"""Pool of resources."""
LimitExceeded = exceptions.LimitExceeded
close_after_fork = False
def __init__(self, limit=None, preload=None, close_after_fork=None):
self._limit = limit
self.preload = preload or 0
self._closed = False
self.close_after_fork = (
close_after_fork
if close_after_fork is not None else self.close_after_fork
)
self._resource = LifoQueue()
self._dirty = set()
if self.close_after_fork and register_after_fork is not None:
register_after_fork(self, _after_fork_cleanup_resource)
self.setup()
def setup(self):
raise NotImplementedError('subclass responsibility')
def _add_when_empty(self):
if self.limit and len(self._dirty) >= self.limit:
raise self.LimitExceeded(self.limit)
# All taken, put new on the queue and
# try get again, this way the first in line
# will get the resource.
self._resource.put_nowait(self.new())
def acquire(self, block=False, timeout=None):
"""Acquire resource.
Arguments:
block (bool): If the limit is exceeded,
then block until there is an available item.
timeout (float): Timeout to wait
if ``block`` is true. Default is :const:`None` (forever).
Raises:
LimitExceeded: if block is false and the limit has been exceeded.
"""
if self._closed:
raise RuntimeError('Acquire on closed pool')
if self.limit:
while 1:
try:
R = self._resource.get(block=block, timeout=timeout)
except Empty:
self._add_when_empty()
else:
try:
R = self.prepare(R)
except BaseException:
if isinstance(R, lazy):
# not evaluated yet, just put it back
self._resource.put_nowait(R)
else:
# evaluted so must try to release/close first.
self.release(R)
raise
self._dirty.add(R)
break
else:
R = self.prepare(self.new())
def release():
"""Release resource so it can be used by another thread.
Warnings:
The caller is responsible for discarding the object,
and to never use the resource again. A new resource must
be acquired if so needed.
"""
self.release(R)
R.release = release
return R
def prepare(self, resource):
return resource
def close_resource(self, resource):
resource.close()
def release_resource(self, resource):
pass
def replace(self, resource):
"""Replace existing resource with a new instance.
This can be used in case of defective resources.
"""
if self.limit:
self._dirty.discard(resource)
self.close_resource(resource)
def release(self, resource):
if self.limit:
self._dirty.discard(resource)
self._resource.put_nowait(resource)
self.release_resource(resource)
else:
self.close_resource(resource)
def collect_resource(self, resource):
pass
def force_close_all(self):
"""Close and remove all resources in the pool (also those in use).
Used to close resources from parent processes after fork
(e.g. sockets/connections).
"""
if self._closed:
return
self._closed = True
dirty = self._dirty
resource = self._resource
while 1: # - acquired
try:
dres = dirty.pop()
except KeyError:
break
try:
self.collect_resource(dres)
except AttributeError: # Issue #78
pass
while 1: # - available
# deque supports '.clear', but lists do not, so for that
# reason we use pop here, so that the underlying object can
# be any object supporting '.pop' and '.append'.
try:
res = resource.queue.pop()
except IndexError:
break
try:
self.collect_resource(res)
except AttributeError:
pass # Issue #78
def resize(self, limit, force=False, ignore_errors=False, reset=False):
prev_limit = self._limit
if (self._dirty and 0 < limit < self._limit) and not ignore_errors:
if not force:
raise RuntimeError(
"Can't shrink pool when in use: was={0} now={1}".format(
self._limit, limit))
reset = True
self._limit = limit
if reset:
try:
self.force_close_all()
except Exception:
pass
self.setup()
if limit < prev_limit:
self._shrink_down(collect=limit > 0)
def _shrink_down(self, collect=True):
class Noop:
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
resource = self._resource
# Items to the left are last recently used, so we remove those first.
with getattr(resource, 'mutex', Noop()):
while len(resource.queue) > self.limit:
R = resource.queue.popleft()
if collect:
self.collect_resource(R)
@property
def limit(self):
return self._limit
@limit.setter
def limit(self, limit):
self.resize(limit)
if os.environ.get('KOMBU_DEBUG_POOL'): # pragma: no cover
_orig_acquire = acquire
_orig_release = release
_next_resource_id = 0
def acquire(self, *args, **kwargs): # noqa
import traceback
id = self._next_resource_id = self._next_resource_id + 1
print('+{0} ACQUIRE {1}'.format(id, self.__class__.__name__))
r = self._orig_acquire(*args, **kwargs)
r._resource_id = id
print('-{0} ACQUIRE {1}'.format(id, self.__class__.__name__))
if not hasattr(r, 'acquired_by'):
r.acquired_by = []
r.acquired_by.append(traceback.format_stack())
return r
def release(self, resource): # noqa
id = resource._resource_id
print('+{0} RELEASE {1}'.format(id, self.__class__.__name__))
r = self._orig_release(resource)
print('-{0} RELEASE {1}'.format(id, self.__class__.__name__))
self._next_resource_id -= 1
return r
|
|
#!/usr/bin/env python
import numpy as np
import time
import json
import copy
from sklearn.metrics import confusion_matrix
from multiclass_confidence_weighted_var_diag import MCWVarDiag
from multiclass_soft_confidence_weighted_1_diag import MSCWIDiag
from multiclass_soft_confidence_weighted_2_diag import MSCWIIDiag
from sklearn.svm import LinearSVC
# file path
filepath = "./evaluate_small_data_results.json"
# data cnofiguration
data_config = {
"abalone": {
"train": "/home/k_yoshiyama/datasets/uci_csv_train/abalone.csv",
"test": "/home/k_yoshiyama/datasets/uci_csv_test/abalone.csv",
},
"transfusion": {
"train": "/home/k_yoshiyama/datasets/uci_csv_train/transfusion.csv",
"test": "/home/k_yoshiyama/datasets/uci_csv_test/transfusion.csv",
},
"gisette": {
"train": "/home/k_yoshiyama/datasets/uci_csv_train/gisette.csv",
"test": "/home/k_yoshiyama/datasets/uci_csv_test/gisette.csv",
},
"iris": {
"train": "/home/k_yoshiyama/datasets/uci_csv_train/iris.csv",
"test": "/home/k_yoshiyama/datasets/uci_csv_test/iris.csv",
},
"glass": {
"train": "/home/k_yoshiyama/datasets/uci_csv_train/glass.csv",
"test": "/home/k_yoshiyama/datasets/uci_csv_test/glass.csv",
},
"breast_cancer": {
"train": "/home/k_yoshiyama/datasets/uci_csv_train/breast_cancer.csv",
"test": "/home/k_yoshiyama/datasets/uci_csv_test/breast_cancer.csv",
},
"car": {
"train": "/home/k_yoshiyama/datasets/uci_csv_train/car.csv",
"test": "/home/k_yoshiyama/datasets/uci_csv_test/car.csv",
},
"creadit": {
"train": "/home/k_yoshiyama/datasets/uci_csv_train/credit.csv",
"test": "/home/k_yoshiyama/datasets/uci_csv_test/credit.csv",
},
"usps": {
"train": "/home/k_yoshiyama/datasets/uci_csv_train/usps.csv",
"test": "/home/k_yoshiyama/datasets/uci_csv_test/usps.csv",
},
"liver": {
"train": "/home/k_yoshiyama/datasets/uci_csv_train/liver.csv",
"test": "/home/k_yoshiyama/datasets/uci_csv_test/liver.csv",
},
"haberman": {
"train": "/home/k_yoshiyama/datasets/uci_csv_train/haberman.csv",
"test": "/home/k_yoshiyama/datasets/uci_csv_test/haberman.csv",
},
"pima": {
"train": "/home/k_yoshiyama/datasets/uci_csv_train/pima.csv",
"test": "/home/k_yoshiyama/datasets/uci_csv_test/pima.csv",
},
"ionosphere": {
"train": "/home/k_yoshiyama/datasets/uci_csv_train/ionosphere.csv",
"test": "/home/k_yoshiyama/datasets/uci_csv_test/ionosphere.csv",
},
"isolet": {
"train": "/home/k_yoshiyama/datasets/uci_csv_train/isolet.csv",
"test": "/home/k_yoshiyama/datasets/uci_csv_test/isolet.csv",
},
"magicGamaTelescope": {
"train": "/home/k_yoshiyama/datasets/uci_csv_train/magicGamaTelescope.csv",
"test": "/home/k_yoshiyama/datasets/uci_csv_test/magicGamaTelescope.csv",
},
"mammographic": {
"train": "/home/k_yoshiyama/datasets/uci_csv_train/mammographic.csv",
"test": "/home/k_yoshiyama/datasets/uci_csv_test/mammographic.csv",
},
"yeast": {
"train": "/home/k_yoshiyama/datasets/uci_csv_train/yeast.csv",
"test": "/home/k_yoshiyama/datasets/uci_csv_test/yeast.csv",
},
}
# results
results = {
"abalone": {
},
"transfusion": {
},
"gisette": {
},
"iris": {
},
"glass": {
},
"breast_cancer": {
},
"car": {
},
"creadit": {
},
"usps": {
},
"liver": {
},
"haberman": {
},
"pima": {
},
"ionosphere": {
},
"isolet": {
},
"magicGamaTelescope": {
},
"mammographic": {
},
"yeast": {
},
}
# model config
models = [MCWVarDiag, MSCWIDiag, MSCWIIDiag]
model_class_name_map = {
MCWVarDiag: "MCWVarDiag",
MSCWIDiag: "MSCWIDiag",
MSCWIIDiag: "MSCWIIDiag",
}
# results
result_per_data = {
"MCWVarDiag": {
"acc": [], # per epoch
"elapsed": [], # per epoch
},
"MSCWIDiag": {
"acc": [],
"elapsed": [],
},
"MSCWIIDiag": {
"acc": [],
"elapsed": [],
},
"LinearSVC": {
"acc": [],
"elapsed": [],
},
}
# results for each data
for data in results:
results[data] = copy.deepcopy(result_per_data)
pass
# run experiment
epochs = xrange(1, 51)
for data in data_config:
print "data %s is processing..." % data
# train/test
data_train = np.loadtxt(data_config[data]["train"], delimiter=" ")
X_train = data_train[:, 1:]
y_train = data_train[:, 0]
data_test = np.loadtxt(data_config[data]["test"], delimiter=" ")
X_test = data_test[:, 1:]
y_test = data_test[:, 0]
# evaluate
for model in models: # foreach __main__.class
# init
print "model is %s" % str(model)
model_ = model(epochs=1)
print "model is %s." % model_class_name_map[model]
# epoch
for epoch in epochs:
print "the number of epochs is %d" % epoch
# warm start
if not epoch == 1:
mu = model_.model["mu"]
S = model_.model["S"]
model_.init_params(mu, S)
pass
# learn
st = time.time()
model_.epochs = 1
model_.learn(X_train, y_train)
et = time.time()
# elapsed time
results[data][model_class_name_map[model]]["elapsed"].append(et - st)
# predict
y_pred = []
for x in X_test:
y_pred.append(model_.predict(x))
pass
cm = confusion_matrix(y_test, y_pred)
# accuracy
results[data][model_class_name_map[model]]["acc"].append(np.sum(cm.diagonal()) * 100.0 / np.sum(cm))
pass
pass
# Linear SVC
print "model is LinearSVC."
model_ = LinearSVC()
st = time.time()
model_.fit(X_train, y_train)
et = time.time()
y_pred = model_.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
acc = np.sum(cm.diagonal()) * 100.0 / np.sum(cm)
elapsed_time = et - st
for epoch in epochs: # add the same results to all epochs
results[data]["LinearSVC"]["acc"].append(acc)
results[data]["LinearSVC"]["elapsed"].append(elapsed_time)
pass
with open(filepath, "w") as fpout:
json.dump(results, fpout)
pass
|
|
"""Functions to generate example data, e.g. example images or segmaps.
Added in 0.5.0.
"""
from __future__ import print_function, division, absolute_import
import os
import json
import imageio
import numpy as np
# filepath to the quokka image, its annotations and depth map
# Added in 0.5.0.
_FILE_DIR = os.path.dirname(os.path.abspath(__file__))
# Added in 0.5.0.
_QUOKKA_FP = os.path.join(_FILE_DIR, "quokka.jpg")
# Added in 0.5.0.
_QUOKKA_ANNOTATIONS_FP = os.path.join(_FILE_DIR, "quokka_annotations.json")
# Added in 0.5.0.
_QUOKKA_DEPTH_MAP_HALFRES_FP = os.path.join(
_FILE_DIR, "quokka_depth_map_halfres.png")
def _quokka_normalize_extract(extract):
"""Generate a normalized rectangle for the standard quokka image.
Added in 0.5.0. (Moved from ``imgaug.imgaug``.)
Parameters
----------
extract : 'square' or tuple of number or imgaug.augmentables.bbs.BoundingBox or imgaug.augmentables.bbs.BoundingBoxesOnImage
Unnormalized representation of the image subarea to be extracted.
* If ``str`` ``square``, then a squared area
``(x: 0 to max 643, y: 0 to max 643)`` will be extracted from
the image.
* If a ``tuple``, then expected to contain four ``number`` s
denoting ``(x1, y1, x2, y2)``.
* If a :class:`~imgaug.augmentables.bbs.BoundingBox`, then that
bounding box's area will be extracted from the image.
* If a :class:`~imgaug.augmentables.bbs.BoundingBoxesOnImage`,
then expected to contain exactly one bounding box and a shape
matching the full image dimensions (i.e. ``(643, 960, *)``).
Then the one bounding box will be used similar to
``BoundingBox`` above.
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Normalized representation of the area to extract from the standard
quokka image.
"""
# TODO get rid of this deferred import
from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
if extract == "square":
bb = BoundingBox(x1=0, y1=0, x2=643, y2=643)
elif isinstance(extract, tuple) and len(extract) == 4:
bb = BoundingBox(x1=extract[0], y1=extract[1],
x2=extract[2], y2=extract[3])
elif isinstance(extract, BoundingBox):
bb = extract
elif isinstance(extract, BoundingBoxesOnImage):
assert len(extract.bounding_boxes) == 1, (
"Provided BoundingBoxesOnImage instance may currently only "
"contain a single bounding box.")
assert extract.shape[0:2] == (643, 960), (
"Expected BoundingBoxesOnImage instance on an image of shape "
"(643, 960, ?). Got shape %s." % (extract.shape,))
bb = extract.bounding_boxes[0]
else:
raise Exception(
"Expected 'square' or tuple of four entries or BoundingBox or "
"BoundingBoxesOnImage for parameter 'extract', "
"got %s." % (type(extract),)
)
return bb
# TODO is this the same as the project functions in augmentables?
def _compute_resized_shape(from_shape, to_shape):
"""Compute the intended new shape of an image-like array after resizing.
Added in 0.5.0. (Moved from ``imgaug.imgaug``.)
Parameters
----------
from_shape : tuple or ndarray
Old shape of the array. Usually expected to be a ``tuple`` of form
``(H, W)`` or ``(H, W, C)`` or alternatively an array with two or
three dimensions.
to_shape : None or tuple of ints or tuple of floats or int or float or ndarray
New shape of the array.
* If ``None``, then `from_shape` will be used as the new shape.
* If an ``int`` ``V``, then the new shape will be ``(V, V, [C])``,
where ``C`` will be added if it is part of `from_shape`.
* If a ``float`` ``V``, then the new shape will be
``(H*V, W*V, [C])``, where ``H`` and ``W`` are the old
height/width.
* If a ``tuple`` ``(H', W', [C'])`` of ints, then ``H'`` and ``W'``
will be used as the new height and width.
* If a ``tuple`` ``(H', W', [C'])`` of floats (except ``C``), then
``H'`` and ``W'`` will be used as the new height and width.
* If a numpy array, then the array's shape will be used.
Returns
-------
tuple of int
New shape.
"""
from . import imgaug as ia
if ia.is_np_array(from_shape):
from_shape = from_shape.shape
if ia.is_np_array(to_shape):
to_shape = to_shape.shape
to_shape_computed = list(from_shape)
if to_shape is None:
pass
elif isinstance(to_shape, tuple):
assert len(from_shape) in [2, 3]
assert len(to_shape) in [2, 3]
if len(from_shape) == 3 and len(to_shape) == 3:
assert from_shape[2] == to_shape[2]
elif len(to_shape) == 3:
to_shape_computed.append(to_shape[2])
is_to_s_valid_values = all(
[v is None or ia.is_single_number(v) for v in to_shape[0:2]])
assert is_to_s_valid_values, (
"Expected the first two entries in to_shape to be None or "
"numbers, got types %s." % (
str([type(v) for v in to_shape[0:2]]),))
for i, from_shape_i in enumerate(from_shape[0:2]):
if to_shape[i] is None:
to_shape_computed[i] = from_shape_i
elif ia.is_single_integer(to_shape[i]):
to_shape_computed[i] = to_shape[i]
else: # float
to_shape_computed[i] = int(np.round(from_shape_i * to_shape[i]))
elif ia.is_single_integer(to_shape) or ia.is_single_float(to_shape):
to_shape_computed = _compute_resized_shape(
from_shape, (to_shape, to_shape))
else:
raise Exception(
"Expected to_shape to be None or ndarray or tuple of floats or "
"tuple of ints or single int or single float, "
"got %s." % (type(to_shape),))
return tuple(to_shape_computed)
def quokka(size=None, extract=None):
"""Return an image of a quokka as a numpy array.
Added in 0.5.0. (Moved from ``imgaug.imgaug``.)
Parameters
----------
size : None or float or tuple of int, optional
Size of the output image. Input into
:func:`~imgaug.imgaug.imresize_single_image`. Usually expected to be a
``tuple`` ``(H, W)``, where ``H`` is the desired height and ``W`` is
the width. If ``None``, then the image will not be resized.
extract : None or 'square' or tuple of number or imgaug.augmentables.bbs.BoundingBox or imgaug.augmentables.bbs.BoundingBoxesOnImage
Subarea of the quokka image to extract:
* If ``None``, then the whole image will be used.
* If ``str`` ``square``, then a squared area
``(x: 0 to max 643, y: 0 to max 643)`` will be extracted from
the image.
* If a ``tuple``, then expected to contain four ``number`` s
denoting ``(x1, y1, x2, y2)``.
* If a :class:`~imgaug.augmentables.bbs.BoundingBox`, then that
bounding box's area will be extracted from the image.
* If a :class:`~imgaug.augmentables.bbs.BoundingBoxesOnImage`,
then expected to contain exactly one bounding box and a shape
matching the full image dimensions (i.e. ``(643, 960, *)``).
Then the one bounding box will be used similar to
``BoundingBox`` above.
Returns
-------
(H,W,3) ndarray
The image array of dtype ``uint8``.
"""
from . import imgaug as ia
img = imageio.imread(_QUOKKA_FP, pilmode="RGB")
if extract is not None:
bb = _quokka_normalize_extract(extract)
img = bb.extract_from_image(img)
if size is not None:
shape_resized = _compute_resized_shape(img.shape, size)
img = ia.imresize_single_image(img, shape_resized[0:2])
return img
def quokka_square(size=None):
"""Return an (square) image of a quokka as a numpy array.
Added in 0.5.0. (Moved from ``imgaug.imgaug``.)
Parameters
----------
size : None or float or tuple of int, optional
Size of the output image. Input into
:func:`~imgaug.imgaug.imresize_single_image`. Usually expected to be a
``tuple`` ``(H, W)``, where ``H`` is the desired height and ``W`` is
the width. If ``None``, then the image will not be resized.
Returns
-------
(H,W,3) ndarray
The image array of dtype ``uint8``.
"""
return quokka(size=size, extract="square")
def quokka_heatmap(size=None, extract=None):
"""Return a heatmap (here: depth map) for the standard example quokka image.
Added in 0.5.0. (Moved from ``imgaug.imgaug``.)
Parameters
----------
size : None or float or tuple of int, optional
See :func:`~imgaug.imgaug.quokka`.
extract : None or 'square' or tuple of number or imgaug.augmentables.bbs.BoundingBox or imgaug.augmentables.bbs.BoundingBoxesOnImage
See :func:`~imgaug.imgaug.quokka`.
Returns
-------
imgaug.augmentables.heatmaps.HeatmapsOnImage
Depth map as an heatmap object. Values close to ``0.0`` denote objects
that are close to the camera. Values close to ``1.0`` denote objects
that are furthest away (among all shown objects).
"""
# TODO get rid of this deferred import
from . import imgaug as ia
from imgaug.augmentables.heatmaps import HeatmapsOnImage
img = imageio.imread(_QUOKKA_DEPTH_MAP_HALFRES_FP, pilmode="RGB")
img = ia.imresize_single_image(img, (643, 960), interpolation="cubic")
if extract is not None:
bb = _quokka_normalize_extract(extract)
img = bb.extract_from_image(img)
if size is None:
size = img.shape[0:2]
shape_resized = _compute_resized_shape(img.shape, size)
img = ia.imresize_single_image(img, shape_resized[0:2])
img_0to1 = img[..., 0] # depth map was saved as 3-channel RGB
img_0to1 = img_0to1.astype(np.float32) / 255.0
img_0to1 = 1 - img_0to1 # depth map was saved as 0 being furthest away
return HeatmapsOnImage(img_0to1, shape=img_0to1.shape[0:2] + (3,))
def quokka_segmentation_map(size=None, extract=None):
"""Return a segmentation map for the standard example quokka image.
Added in 0.5.0. (Moved from ``imgaug.imgaug``.)
Parameters
----------
size : None or float or tuple of int, optional
See :func:`~imgaug.imgaug.quokka`.
extract : None or 'square' or tuple of number or imgaug.augmentables.bbs.BoundingBox or imgaug.augmentables.bbs.BoundingBoxesOnImage
See :func:`~imgaug.imgaug.quokka`.
Returns
-------
imgaug.augmentables.segmaps.SegmentationMapsOnImage
Segmentation map object.
"""
# pylint: disable=invalid-name
import skimage.draw
# TODO get rid of this deferred import
from imgaug.augmentables.segmaps import SegmentationMapsOnImage
with open(_QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
xx = []
yy = []
for kp_dict in json_dict["polygons"][0]["keypoints"]:
x = kp_dict["x"]
y = kp_dict["y"]
xx.append(x)
yy.append(y)
img_seg = np.zeros((643, 960, 1), dtype=np.int32)
rr, cc = skimage.draw.polygon(
np.array(yy), np.array(xx), shape=img_seg.shape)
img_seg[rr, cc, 0] = 1
if extract is not None:
bb = _quokka_normalize_extract(extract)
img_seg = bb.extract_from_image(img_seg)
segmap = SegmentationMapsOnImage(img_seg, shape=img_seg.shape[0:2] + (3,))
if size is not None:
shape_resized = _compute_resized_shape(img_seg.shape, size)
segmap = segmap.resize(shape_resized[0:2])
segmap.shape = tuple(shape_resized[0:2]) + (3,)
return segmap
def quokka_keypoints(size=None, extract=None):
"""Return example keypoints on the standard example quokke image.
The keypoints cover the eyes, ears, nose and paws.
Added in 0.5.0. (Moved from ``imgaug.imgaug``.)
Parameters
----------
size : None or float or tuple of int or tuple of float, optional
Size of the output image on which the keypoints are placed. If
``None``, then the keypoints are not projected to any new size
(positions on the original image are used). ``float`` s lead to
relative size changes, ``int`` s to absolute sizes in pixels.
extract : None or 'square' or tuple of number or imgaug.augmentables.bbs.BoundingBox or imgaug.augmentables.bbs.BoundingBoxesOnImage
Subarea to extract from the image. See :func:`~imgaug.imgaug.quokka`.
Returns
-------
imgaug.augmentables.kps.KeypointsOnImage
Example keypoints on the quokka image.
"""
# TODO get rid of this deferred import
from imgaug.augmentables.kps import Keypoint, KeypointsOnImage
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(_QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
keypoints = []
for kp_dict in json_dict["keypoints"]:
keypoints.append(Keypoint(x=kp_dict["x"] - left, y=kp_dict["y"] - top))
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
kpsoi = KeypointsOnImage(keypoints, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
kpsoi = kpsoi.on(shape_resized)
return kpsoi
def quokka_bounding_boxes(size=None, extract=None):
"""Return example bounding boxes on the standard example quokke image.
Currently only a single bounding box is returned that covers the quokka.
Added in 0.5.0. (Moved from ``imgaug.imgaug``.)
Parameters
----------
size : None or float or tuple of int or tuple of float, optional
Size of the output image on which the BBs are placed. If ``None``, then
the BBs are not projected to any new size (positions on the original
image are used). ``float`` s lead to relative size changes, ``int`` s
to absolute sizes in pixels.
extract : None or 'square' or tuple of number or imgaug.augmentables.bbs.BoundingBox or imgaug.augmentables.bbs.BoundingBoxesOnImage
Subarea to extract from the image. See :func:`~imgaug.imgaug.quokka`.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Example BBs on the quokka image.
"""
# TODO get rid of this deferred import
from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(_QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
bbs = []
for bb_dict in json_dict["bounding_boxes"]:
bbs.append(
BoundingBox(
x1=bb_dict["x1"] - left,
y1=bb_dict["y1"] - top,
x2=bb_dict["x2"] - left,
y2=bb_dict["y2"] - top
)
)
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
bbsoi = BoundingBoxesOnImage(bbs, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
bbsoi = bbsoi.on(shape_resized)
return bbsoi
def quokka_polygons(size=None, extract=None):
"""
Returns example polygons on the standard example quokke image.
The result contains one polygon, covering the quokka's outline.
Added in 0.5.0. (Moved from ``imgaug.imgaug``.)
Parameters
----------
size : None or float or tuple of int or tuple of float, optional
Size of the output image on which the polygons are placed. If ``None``,
then the polygons are not projected to any new size (positions on the
original image are used). ``float`` s lead to relative size changes,
``int`` s to absolute sizes in pixels.
extract : None or 'square' or tuple of number or imgaug.augmentables.bbs.BoundingBox or imgaug.augmentables.bbs.BoundingBoxesOnImage
Subarea to extract from the image. See :func:`~imgaug.imgaug.quokka`.
Returns
-------
imgaug.augmentables.polys.PolygonsOnImage
Example polygons on the quokka image.
"""
# TODO get rid of this deferred import
from imgaug.augmentables.polys import Polygon, PolygonsOnImage
left, top = 0, 0
if extract is not None:
bb_extract = _quokka_normalize_extract(extract)
left = bb_extract.x1
top = bb_extract.y1
with open(_QUOKKA_ANNOTATIONS_FP, "r") as f:
json_dict = json.load(f)
polygons = []
for poly_json in json_dict["polygons"]:
polygons.append(
Polygon([(point["x"] - left, point["y"] - top)
for point in poly_json["keypoints"]])
)
if extract is not None:
shape = (bb_extract.height, bb_extract.width, 3)
else:
shape = (643, 960, 3)
psoi = PolygonsOnImage(polygons, shape=shape)
if size is not None:
shape_resized = _compute_resized_shape(shape, size)
psoi = psoi.on(shape_resized)
return psoi
|
|
# -*- coding: utf-8 -*-
import logging
import re
import sys
import pytest
import cloudpickle
import six
from google.protobuf import json_format
import verta
from verta.environment import (
Python,
)
from verta._internal_utils._pip_requirements_utils import (
SPACY_MODEL_PATTERN,
get_pip_freeze,
pin_verta_and_cloudpickle,
)
def assert_parsed_reqs_match(parsed_reqs, original_reqs):
"""Assert that requirements match as expected
``pip freeze`` can return ``black==21.6b0`` while our parsing yields
``black==21.6.0b0``, though these are equivalent.
Parameters
----------
parsed_reqs : list of str
e.g. ``Python.requirements``
original_reqs : list of str
e.g. ``Python.read_pip_environment()``
"""
parsed_reqs = set(parsed_reqs)
original_reqs = set(original_reqs)
parsed_mapping = {
req.split("==")[0]: Python._req_spec_to_msg(req)
for req in parsed_reqs
}
original_mapping = {
req.split("==")[0]: Python._req_spec_to_msg(req)
for req in original_reqs
}
assert parsed_mapping == original_mapping
class TestObject:
def test_repr(self):
requirements = ["pytest=={}".format(pytest.__version__)]
constraints = ["six=={}".format(six.__version__)]
env_vars = ["HOME"]
env = Python(
requirements=requirements,
constraints=constraints,
env_vars=env_vars,
)
requirements = pin_verta_and_cloudpickle(requirements)
for line in requirements:
assert line in repr(env)
for line in constraints:
assert line in repr(env)
for line in env_vars:
assert line in repr(env)
def test_raw_repr(self):
requirements = [
"-e git+https://github.com/matplotlib/matplotlib.git@master#egg=matplotlib",
]
constraints = ["pytest > 6; python_version >= '2.7'"]
env = Python(
requirements=requirements,
constraints=constraints,
)
assert env._msg.python.raw_requirements
assert env._msg.python.raw_constraints
requirements = pin_verta_and_cloudpickle(requirements)
for line in requirements:
assert line in repr(env)
for line in constraints:
assert line in repr(env)
def test_no_autocapture(self):
env_ver = Python(requirements=[], _autocapture=False)
# protobuf message is empty
assert not json_format.MessageToDict(
env_ver._msg,
including_default_value_fields=False,
)
class TestReadPipEnvironment:
@pytest.mark.skipif(
not any(re.match(SPACY_MODEL_PATTERN + "==", req) for req in get_pip_freeze()),
reason="requires spaCy model pinned in environment (`python -m spacy download en_core_web_sm` with pip<20)",
)
def test_skip_spacy_models(self):
pattern = SPACY_MODEL_PATTERN + "=="
requirements = Python.read_pip_environment()
assert not any(re.match(pattern, req) for req in requirements)
class TestPythonVersion:
def test_py_ver(self):
env = Python(requirements=[])
assert env._msg.python.version.major == sys.version_info.major
assert env._msg.python.version.minor == sys.version_info.minor
assert env._msg.python.version.patch == sys.version_info.micro
class TestAptPackages:
def test_apt_packages(self):
env = Python([])
assert len(env.apt_packages) == 0
proto_with_empty_apt = env._as_env_proto()
assert len(proto_with_empty_apt.apt.packages) == 0
env.apt_packages = ["opencv"]
assert env.apt_packages == ["opencv"]
proto = env._as_env_proto()
assert list(proto.apt.packages) == ["opencv"]
env.apt_packages = None
proto_with_empty_apt = env._as_env_proto()
assert len(proto_with_empty_apt.apt.packages) == 0
env_initialized = Python([], apt_packages=["opencv"])
assert env_initialized.apt_packages == ["opencv"]
class TestParsedRequirements:
def test_from_env(self):
reqs = Python.read_pip_environment(
skip_options=True,
)
env = Python(requirements=reqs)
assert env._msg.python.requirements
assert not env._msg.python.raw_requirements
reqs = pin_verta_and_cloudpickle(reqs)
assert_parsed_reqs_match(env.requirements, reqs)
def test_from_files(self, requirements_file):
reqs = Python.read_pip_file(requirements_file.name)
env = Python(requirements=reqs)
assert env._msg.python.requirements
assert not env._msg.python.raw_requirements
reqs = pin_verta_and_cloudpickle(reqs)
assert_parsed_reqs_match(env.requirements, reqs)
def test_legacy_no_unsupported_lines(self, requirements_file_with_unsupported_lines):
"""Unsupported lines are filtered out with legacy `skip_options=True`"""
reqs = Python.read_pip_file(
requirements_file_with_unsupported_lines.name,
skip_options=True,
)
env = Python(requirements=reqs)
requirements = {req.library for req in env._msg.python.requirements}
# only has injected requirements
assert requirements == {"verta", "cloudpickle"}
def test_from_file_no_versions(self, requirements_file_without_versions):
reqs = Python.read_pip_file(requirements_file_without_versions.name)
env = Python(requirements=reqs)
assert env._msg.python.requirements
assert not env._msg.python.raw_requirements
parsed_libraries = set(req.split("==")[0] for req in env.requirements)
assert parsed_libraries == set(reqs) | {"verta", "cloudpickle"}
def test_torch_no_suffix(self):
# NOTE: this test takes too long for Hypothesis
requirement = "torch==1.8.1+cu102"
env_ver = Python([requirement])
assert requirement not in env_ver.requirements
assert requirement.split("+")[0] in env_ver.requirements
def test_torch_no_suffix_autocapture(self):
torch = pytest.importorskip("torch")
version = torch.__version__
if "+" not in version:
pytest.skip("no metadata on version number")
requirement = "torch=={}".format(version)
env_ver = Python(["torch"])
assert requirement not in env_ver.requirements
assert requirement.split("+")[0] in env_ver.requirements
def test_inject_verta_cloudpickle(self):
env = Python(requirements=["pytest"])
requirements = {req.library for req in env._msg.python.requirements}
assert "verta" in requirements
assert "cloudpickle" in requirements
class TestRawRequirements:
def test_unsupported_lines(
self, requirements_file_with_unsupported_lines, caplog
):
"""Requirements with unsupported lines get logged raw."""
reqs = Python.read_pip_file(requirements_file_with_unsupported_lines.name)
# each line gets logged raw
for req in reqs:
with caplog.at_level(logging.INFO, logger="verta"):
env = Python(requirements=[req])
assert "failed to manually parse requirements; falling back to capturing raw contents" in caplog.text
caplog.clear()
assert not env._msg.python.requirements
assert env._msg.python.raw_requirements
expected_reqs = pin_verta_and_cloudpickle([req])
assert env.requirements == expected_reqs
def test_inject_verta_cloudpickle(self):
reqs = [
"--no-binary :all:",
]
env = Python(requirements=reqs)
assert not env._msg.python.requirements
assert env._msg.python.raw_requirements
assert env.requirements == reqs + [
"verta=={}".format(verta.__version__),
"cloudpickle=={}".format(cloudpickle.__version__),
]
class TestParsedConstraints:
def test_from_file(self, requirements_file):
reqs = Python.read_pip_file(requirements_file.name)
env = Python(requirements=[], constraints=reqs)
assert env._msg.python.constraints
assert not env._msg.python.raw_constraints
assert_parsed_reqs_match(env.constraints, reqs)
class TestRawConstraints:
def test_unsupported_lines(
self, requirements_file_with_unsupported_lines, caplog
):
"""Constraints with unsupported lines get logged raw."""
constraints = Python.read_pip_file(requirements_file_with_unsupported_lines.name)
# each line gets logged raw
for constraint in constraints:
with caplog.at_level(logging.INFO, logger="verta"):
env = Python(requirements=[], constraints=[constraint])
assert "failed to manually parse constraints; falling back to capturing raw contents" in caplog.text
caplog.clear()
assert not env._msg.python.constraints
assert env._msg.python.raw_constraints
expected_constraints = [constraint]
assert env.constraints == expected_constraints
def test_from_file_no_versions(
self, requirements_file_without_versions, caplog
):
constraints = Python.read_pip_file(requirements_file_without_versions.name)
with caplog.at_level(logging.INFO, logger="verta"):
env = Python(requirements=[], constraints=constraints)
assert "failed to manually parse constraints; falling back to capturing raw contents" in caplog.text
assert "missing its version specifier" in caplog.text
assert not env._msg.python.constraints
assert env._msg.python.raw_constraints
assert env._msg.python.raw_constraints == requirements_file_without_versions.read()
assert set(env.constraints) == set(constraints)
class TestVCSInstalledVerta:
@pytest.mark.parametrize(
"requirements",
[
["-e git+git@github.com:VertaAI/modeldb.git@master#egg=verta&subdirectory=client/verta"],
["-e git+https://github.com/VertaAI/modeldb.git@master#egg=verta&subdirectory=client/verta"],
["-e git+ssh://git@github.com/VertaAI/modeldb.git@master#egg=verta&subdirectory=client/verta"],
],
)
def test_vcs_installed_verta(self, requirements):
vcs_verta_req = requirements[0]
pinned_verta_req = "verta=={}".format(verta.__version__)
env = Python(requirements=requirements)
assert vcs_verta_req not in env.requirements
assert pinned_verta_req in env.requirements
|
|
from JumpScale import j
import os
import time
import stat
from tarfile import filemode as _filemode
_months_map = {1: 'Jan', 2: 'Feb', 3: 'Mar', 4: 'Apr', 5: 'May', 6: 'Jun', 7: 'Jul',
8: 'Aug', 9: 'Sep', 10: 'Oct', 11: 'Nov', 12: 'Dec'}
class FilesystemBase(object):
def __init__(self, root, cmd_channel):
"""
- (str) root: the user "real" home directory (e.g. '/home/user')
- (instance) cmd_channel: the FTPHandler class instance
"""
# Set initial current working directory.
# By default initial cwd is set to "/" to emulate a chroot jail.
# If a different behavior is desired (e.g. initial cwd = root,
# to reflect the real filesystem) users overriding this class
# are responsible to set _cwd attribute as necessary.
self.cwd = '/'
self.root = root
self.ftproot = root
self.cmd_channel = cmd_channel
self.handler = None
# --- Pathname / conversion utilities
def realpath(self, path):
return self.ftp2fs(path)
def joinPaths(self, *args):
out = ""
for arg in args:
out += "%s/" % arg
out = out.replace("//", "/")
out = out.replace("//", "/")
if len(out) > 2:
out = out.rstrip("/")
return out
def ftpnorm(self, ftppath):
if ftppath.strip() == "":
ftppath = self.joinPaths(self.ftproot, self.cwd)
ftppathn = os.path.normpath(ftppath)
ftppathn = ftppathn.replace("\\", "/").strip()
# print "%s -> %s" % (ftppath,ftppathn)
if ftppathn[0] != "/":
ftppathn = self.joinPaths(self.ftproot, self.cwd, ftppathn)
return ftppathn
def _removeFtproot(self, ftppath):
if ftppath.find(self.ftproot) == 0:
ftppath = ftppath[len(self.ftproot) + 1:]
elif ftppath.find(self.ftproot) != -1:
ftppath = self.fs2ftp(ftppath)
ftppath = ftppath[len(self.ftproot) + 1:]
else:
import ipdb
ipdb.set_trace()
raise RuntimeError("ftppath needs to start with self.ftproot")
return ftppath
def ftp2fs(self, ftppath):
ftppath = self.ftpnorm(ftppath)
ftppath = self._removeFtproot(ftppath)
# print "REMOVEROOT:%s"%ftppath
result = j.system.fs.joinPaths(self.root, ftppath)
# print "ISDIRPOST:%s"%result
return result
# if os.path.normpath(self.root) == os.sep:
# return os.path.normpath(self.ftpnorm(ftppath))
# else:
# p = self.ftpnorm(ftppath)[1:]
# return os.path.normpath(os.path.join(self.root, p))
# def ftpnorm(self, ftppath):
# """Normalize a "virtual" ftp pathname (tipically the raw string
# coming from client) depending on the current working directory.
# Example (having "/foo" as current working directory):
# >>> ftpnorm('bar')
# '/foo/bar'
# Note: directory separators are system independent ("/").
# Pathname returned is always absolutized.
# """
# if os.path.isabs(ftppath):
# p = os.path.normpath(ftppath)
# else:
# p = os.path.normpath(os.path.join(self.cwd, ftppath))
# normalize string in a standard web-path notation having '/'
# as separator.
# p = p.replace("\\", "/")
# os.path.normpath supports UNC paths (e.g. "//a/b/c") but we
# don't need them. In case we get an UNC path we collapse
# redundant separators appearing at the beginning of the string
# while p[:2] == '//':
# p = p[1:]
# Anti path traversal: don't trust user input, in the event
# that self.cwd is not absolute, return "/" as a safety measure.
# This is for extra protection, maybe not really necessary.
# if not os.path.isabs(p):
# raise RuntimeError("ftpnorm error, possible security breach")
# p = "/"
# return p
# def ftp2fs(self, ftppath):
# """Translate a "virtual" ftp pathname (tipically the raw string
# coming from client) into equivalent absolute "real" filesystem
# pathname.
# Example (having "/home/user" as root directory):
# >>> ftp2fs("foo")
# '/home/user/foo'
# Note: directory separators are system dependent.
# """
# raise NotImplementedError
# def fs2ftp(self, fspath):
# """Translate a "real" filesystem pathname into equivalent
# absolute "virtual" ftp pathname depending on the user's
# root directory.
# Example (having "/home/user" as root directory):
# >>> fs2ftp("/home/user/foo")
# '/foo'
# As for ftpnorm, directory separators are system independent
# ("/") and pathname returned is always absolutized.
# """
# raise NotImplementedError
def validpath(self, path):
"""
"""
raise RuntimeError("not implemented")
def openfile(self, filename, mode):
"""Open a file returning its handler"""
raise RuntimeError("not implemented")
def mkstemp(self, suffix='', prefix='', dir=None, mode='wb'):
"""A wrap around tempfile.mkstemp creating a file with a unique
name. Unlike mkstemp it returns an object with a file-like
interface.
"""
raise RuntimeError("not implemented")
def chdir(self, ftppath):
"""Change the current directory."""
raise RuntimeError("not implemented")
def mkdir(self, ftppath):
"""Create the specified directory."""
raise RuntimeError("not implemented")
def listdir(self, path):
"""List the content of a directory."""
raise RuntimeError("not implemented")
def rmdir(self, path):
"""Remove the specified directory."""
raise RuntimeError("not implemented")
def remove(self, path):
"""Remove the specified file."""
raise RuntimeError("not implemented")
def rename(self, src, dst):
"""Rename the specified src file to the dst filename."""
raise RuntimeError("not implemented")
def chmod(self, path, mode):
"""Change file/directory mode."""
raise NotImplementedError
def stat(self, path):
"""Perform a stat() system call on the given path."""
return os.stat(path)
def lstat(self, path):
"""Like stat but does not follow symbolic links."""
raise NotImplementedError
if not hasattr(os, 'lstat'):
lstat = stat
# --- Wrapper methods around os.path.* calls
def isfile(self, path):
"""Return True if path is a file."""
raise NotImplementedError
def islink(self, path):
"""Return True if path is a symbolic link."""
raise NotImplementedError
def isdir(self, path):
"""Return True if path is a directory."""
raise NotImplementedError
def getsize(self, path):
"""Return the size of the specified file in bytes."""
raise NotImplementedError
def getmtime(self, path):
"""Return the last modified time as a number of seconds since
the epoch."""
raise NotImplementedError
# def realpath(self, path):
#"""Return the canonical version of path eliminating any
# symbolic links encountered in the path (if they are
# supported by the operating system).
#"""
# return os.path.realpath(path)
def lexists(self, path):
"""Return True if path refers to an existing path, including
a broken or circular symbolic link.
"""
return os.path.lexists(path)
def get_user_by_uid(self, uid):
"""Return the username associated with user id.
If this can't be determined return raw uid instead.
On Windows just return "owner".
"""
raise NotImplementedError
def get_group_by_gid(self, gid):
"""Return the groupname associated with group id.
If this can't be determined return raw gid instead.
On Windows just return "group".
"""
raise NotImplementedError
def readlink(self, path):
"""Return a string representing the path to which a
symbolic link points.
"""
raise NotImplementedError
def get_list_dir(self, path):
""""Return an iterator object that yields a directory listing
in a form suitable for LIST command.
"""
if self.isdir(path):
listing = sorted(self.listdir(path))
return self.format_list(path, listing)
# if path is a file or a symlink we return information about it
else:
basedir, filename = os.path.split(path)
self.lstat(path) # raise exc in case of problems
return self.format_list(basedir, [filename])
def format_list(self, basedir, listing, ignore_err=True):
"""Return an iterator object that yields the entries of given
directory emulating the "/bin/ls -lA" UNIX command output.
- (str) basedir: the absolute dirname.
- (list) listing: the names of the entries in basedir
- (bool) ignore_err: when False raise exception if os.lstat()
call fails.
On platforms which do not support the pwd and grp modules (such
as Windows), ownership is printed as "owner" and "group" as a
default, and number of hard links is always "1". On UNIX
systems, the actual owner, group, and number of links are
printed.
This is how output appears to client:
-rw-rw-rw- 1 owner group 7045120 Sep 02 3:47 music.mp3
drwxrwxrwx 1 owner group 0 Aug 31 18:50 e-books
-rw-rw-rw- 1 owner group 380 Sep 02 3:40 module.py
"""
if self.cmd_channel.use_gmt_times:
timefunc = time.gmtime
else:
timefunc = time.localtime
now = time.time()
for basename in listing:
file = os.path.join(basedir, basename)
try:
st = self.lstat(file)
except OSError:
if ignore_err:
continue
raise
perms = _filemode(st.st_mode) # permissions
nlinks = st.st_nlink # number of links to inode
if not nlinks: # non-posix system, let's use a bogus value
nlinks = 1
size = st.st_size # file size
uname = self.get_user_by_uid(st.st_uid)
gname = self.get_group_by_gid(st.st_gid)
mtime = timefunc(st.st_mtime)
# if modificaton time > 6 months shows "month year"
# else "month hh:mm"; this matches proftpd format, see:
# http://code.google.com/p/pyftpdlib/issues/detail?id=187
if (now - st.st_mtime) > 180 * 24 * 60 * 60:
fmtstr = "%d %Y"
else:
fmtstr = "%d %H:%M"
try:
mtimestr = "%s %s" % (_months_map[mtime.tm_mon],
time.strftime(fmtstr, mtime))
except ValueError:
# It could be raised if last mtime happens to be too
# old (prior to year 1900) in which case we return
# the current time as last mtime.
mtime = timefunc()
mtimestr = "%s %s" % (_months_map[mtime.tm_mon],
time.strftime("%d %H:%M", mtime))
# if the file is a symlink, resolve it, e.g. "symlink -> realfile"
if stat.S_ISLNK(st.st_mode) and hasattr(self, 'readlink'):
basename = basename + " -> " + self.readlink(file)
# formatting is matched with proftpd ls output
yield "%s %3s %-8s %-8s %8s %s %s\r\n" % (perms, nlinks, uname, gname,
size, mtimestr, basename)
def format_mlsx(self, basedir, listing, perms, facts, ignore_err=False):
"""Return an iterator object that yields the entries of a given
directory or of a single file in a form suitable with MLSD and
MLST commands.
Every entry includes a list of "facts" referring the listed
element. See RFC-3659, chapter 7, to see what every single
fact stands for.
- (str) basedir: the absolute dirname.
- (list) listing: the names of the entries in basedir
- (str) perms: the string referencing the user permissions.
- (str) facts: the list of "facts" to be returned.
- (bool) ignore_err: when False raise exception if os.stat()
call fails.
Note that "facts" returned may change depending on the platform
and on what user specified by using the OPTS command.
This is how output could appear to the client issuing
a MLSD request:
type=file;size=156;perm=r;modify=20071029155301;unique=801cd2; music.mp3
type=dir;size=0;perm=el;modify=20071127230206;unique=801e33; ebooks
type=file;size=211;perm=r;modify=20071103093626;unique=801e32; module.py
"""
try:
# print "format_mlsx:%s"%basedir
if self.cmd_channel.use_gmt_times:
timefunc = time.gmtime
else:
timefunc = time.localtime
permdir = ''.join([x for x in perms if x not in 'arw'])
permfile = ''.join([x for x in perms if x not in 'celmp'])
if ('w' in perms) or ('a' in perms) or ('f' in perms):
permdir += 'c'
if 'd' in perms:
permdir += 'p'
basedir2 = self.ftp2fs(basedir)
for basename in listing:
file = os.path.join(basedir2, basename)
retfacts = dict()
# in order to properly implement 'unique' fact (RFC-3659,
# chapter 7.5.2) we are supposed to follow symlinks, hence
try:
st = os.stat(file)
except OSError:
if ignore_err:
print "error for %s, cannot list (stat)" % file
continue
raise
# type + perm
if stat.S_ISDIR(st.st_mode):
if 'type' in facts:
if basename == '.':
retfacts['type'] = 'cdir'
elif basename == '..':
retfacts['type'] = 'pdir'
else:
retfacts['type'] = 'dir'
if 'perm' in facts:
retfacts['perm'] = permdir
else:
if 'type' in facts:
retfacts['type'] = 'file'
if 'perm' in facts:
retfacts['perm'] = permfile
if 'size' in facts:
retfacts['size'] = st.st_size # file size
# last modification time
if 'modify' in facts:
try:
retfacts['modify'] = time.strftime("%Y%m%d%H%M%S",
timefunc(st.st_mtime))
# it could be raised if last mtime happens to be too old
# (prior to year 1900)
except ValueError:
pass
if 'create' in facts:
# on Windows we can provide also the creation time
try:
retfacts['create'] = time.strftime("%Y%m%d%H%M%S",
timefunc(st.st_ctime))
except ValueError:
pass
# UNIX only
if 'unix.mode' in facts:
retfacts['unix.mode'] = oct(st.st_mode & 0o777)
if 'unix.uid' in facts:
retfacts['unix.uid'] = st.st_uid
if 'unix.gid' in facts:
retfacts['unix.gid'] = st.st_gid
# We provide unique fact (see RFC-3659, chapter 7.5.2) on
# posix platforms only; we get it by mixing st_dev and
# st_ino values which should be enough for granting an
# uniqueness for the file listed.
# The same approach is used by pure-ftpd.
# Implementors who want to provide unique fact on other
# platforms should use some platform-specific method (e.g.
# on Windows NTFS filesystems MTF records could be used).
if 'unique' in facts:
retfacts['unique'] = "%xg%x" % (st.st_dev, st.st_ino)
# facts can be in any order but we sort them by name
factstring = "".join(["%s=%s;" % (x, retfacts[x])
for x in sorted(retfacts.keys())])
# print "FACT:%s" % factstring+" "+basename
yield "%s %s\r\n" % (factstring, basename)
except Exception as err:
print err
|
|
# Copyright (c) 2004-2010 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""diagram objects
"""
from logilab import astng
from pylint.pyreverse.utils import is_interface, FilterMixIn
def set_counter(value):
"""Figure counter (re)set"""
Figure._UID_COUNT = value
class Figure:
"""base class for counter handling"""
_UID_COUNT = 0
def __init__(self):
Figure._UID_COUNT += 1
self.fig_id = Figure._UID_COUNT
class Relationship(Figure):
"""a relation ship from an object in the diagram to another
"""
def __init__(self, from_object, to_object, relation_type, name=None):
Figure.__init__(self)
self.from_object = from_object
self.to_object = to_object
self.type = relation_type
self.name = name
class DiagramEntity(Figure):
"""a diagram object, i.e. a label associated to an astng node
"""
def __init__(self, title='No name', node=None):
Figure.__init__(self)
self.title = title
self.node = node
class ClassDiagram(Figure, FilterMixIn):
"""main class diagram handling
"""
TYPE = 'class'
def __init__(self, title, mode):
FilterMixIn.__init__(self, mode)
Figure.__init__(self)
self.title = title
self.objects = []
self.relationships = {}
self._nodes = {}
self.depends = []
def add_relationship(self, from_object, to_object,
relation_type, name=None):
"""create a relation ship
"""
rel = Relationship(from_object, to_object, relation_type, name)
self.relationships.setdefault(relation_type, []).append(rel)
def get_relationship(self, from_object, relation_type):
"""return a relation ship or None
"""
for rel in self.relationships.get(relation_type, ()):
if rel.from_object is from_object:
return rel
raise KeyError(relation_type)
def get_attrs(self, node):
"""return visible attributes, possibly with class name"""
attrs = []
for node_name, ass_nodes in node.instance_attrs_type.items() + \
node.locals_type.items():
if not self.show_attr(node_name):
continue
names = self.class_names(ass_nodes)
if names:
node_name = "%s : %s" % (node_name, ", ".join(names))
attrs.append(node_name)
return attrs
def get_methods(self, node):
"""return visible methods"""
return [m for m in node.values()
if isinstance(m, astng.Function) and self.show_attr(m.name)]
def add_object(self, title, node):
"""create a diagram object
"""
assert node not in self._nodes
ent = DiagramEntity(title, node)
self._nodes[node] = ent
self.objects.append(ent)
def class_names(self, nodes):
"""return class names if needed in diagram"""
names = []
for ass_node in nodes:
if isinstance(ass_node, astng.Instance):
ass_node = ass_node._proxied
if isinstance(ass_node, astng.Class) \
and hasattr(ass_node, "name") and not self.has_node(ass_node):
if ass_node.name not in names:
ass_name = ass_node.name
names.append(ass_name)
return names
def nodes(self):
"""return the list of underlying nodes
"""
return self._nodes.keys()
def has_node(self, node):
"""return true if the given node is included in the diagram
"""
return node in self._nodes
def object_from_node(self, node):
"""return the diagram object mapped to node
"""
return self._nodes[node]
def classes(self):
"""return all class nodes in the diagram"""
return [o for o in self.objects if isinstance(o.node, astng.Class)]
def classe(self, name):
"""return a class by its name, raise KeyError if not found
"""
for klass in self.classes():
if klass.node.name == name:
return klass
raise KeyError(name)
def extract_relationships(self):
"""extract relation ships between nodes in the diagram
"""
for obj in self.classes():
node = obj.node
obj.attrs = self.get_attrs(node)
obj.methods = self.get_methods(node)
# shape
if is_interface(node):
obj.shape = 'interface'
else:
obj.shape = 'class'
# inheritance link
for par_node in node.ancestors(recurs=False):
try:
par_obj = self.object_from_node(par_node)
self.add_relationship(obj, par_obj, 'specialization')
except KeyError:
continue
# implements link
for impl_node in node.implements:
try:
impl_obj = self.object_from_node(impl_node)
self.add_relationship(obj, impl_obj, 'implements')
except KeyError:
continue
# associations link
for name, values in node.instance_attrs_type.items() + \
node.locals_type.items():
for value in values:
if value is astng.YES:
continue
if isinstance( value, astng.Instance):
value = value._proxied
try:
ass_obj = self.object_from_node(value)
self.add_relationship(ass_obj, obj, 'association', name)
except KeyError:
continue
class PackageDiagram(ClassDiagram):
"""package diagram handling
"""
TYPE = 'package'
def modules(self):
"""return all module nodes in the diagram"""
return [o for o in self.objects if isinstance(o.node, astng.Module)]
def module(self, name):
"""return a module by its name, raise KeyError if not found
"""
for mod in self.modules():
if mod.node.name == name:
return mod
raise KeyError(name)
def get_module(self, name, node):
"""return a module by its name, looking also for relative imports;
raise KeyError if not found
"""
for mod in self.modules():
mod_name = mod.node.name
if mod_name == name:
return mod
#search for fullname of relative import modules
package = node.root().name
if mod_name == "%s.%s" % (package, name):
return mod
if mod_name == "%s.%s" % (package.rsplit('.', 1)[0], name):
return mod
raise KeyError(name)
def add_from_depend(self, node, from_module):
"""add dependencies created by from-imports
"""
mod_name = node.root().name
obj = self.module( mod_name )
if from_module not in obj.node.depends:
obj.node.depends.append(from_module)
def extract_relationships(self):
"""extract relation ships between nodes in the diagram
"""
ClassDiagram.extract_relationships(self)
for obj in self.classes():
# ownership
try:
mod = self.object_from_node(obj.node.root())
self.add_relationship(obj, mod, 'ownership')
except KeyError:
continue
for obj in self.modules():
obj.shape = 'package'
# dependencies
for dep_name in obj.node.depends:
try:
dep = self.get_module(dep_name, obj.node)
except KeyError:
continue
self.add_relationship(obj, dep, 'depends')
|
|
from sympy import AccumBounds, Symbol, floor, nan, oo, E, symbols, ceiling, pi, \
Rational, Float, I, sin, exp, log, factorial, frac
from sympy.utilities.pytest import XFAIL
x = Symbol('x')
i = Symbol('i', imaginary=True)
y = Symbol('y', real=True)
k, n = symbols('k,n', integer=True)
def test_floor():
assert floor(nan) == nan
assert floor(oo) == oo
assert floor(-oo) == -oo
assert floor(0) == 0
assert floor(1) == 1
assert floor(-1) == -1
assert floor(E) == 2
assert floor(-E) == -3
assert floor(2*E) == 5
assert floor(-2*E) == -6
assert floor(pi) == 3
assert floor(-pi) == -4
assert floor(Rational(1, 2)) == 0
assert floor(-Rational(1, 2)) == -1
assert floor(Rational(7, 3)) == 2
assert floor(-Rational(7, 3)) == -3
assert floor(Float(17.0)) == 17
assert floor(-Float(17.0)) == -17
assert floor(Float(7.69)) == 7
assert floor(-Float(7.69)) == -8
assert floor(I) == I
assert floor(-I) == -I
e = floor(i)
assert e.func is floor and e.args[0] == i
assert floor(oo*I) == oo*I
assert floor(-oo*I) == -oo*I
assert floor(2*I) == 2*I
assert floor(-2*I) == -2*I
assert floor(I/2) == 0
assert floor(-I/2) == -I
assert floor(E + 17) == 19
assert floor(pi + 2) == 5
assert floor(E + pi) == floor(E + pi)
assert floor(I + pi) == floor(I + pi)
assert floor(floor(pi)) == 3
assert floor(floor(y)) == floor(y)
assert floor(floor(x)) == floor(floor(x))
assert floor(x) == floor(x)
assert floor(2*x) == floor(2*x)
assert floor(k*x) == floor(k*x)
assert floor(k) == k
assert floor(2*k) == 2*k
assert floor(k*n) == k*n
assert floor(k/2) == floor(k/2)
assert floor(x + y) == floor(x + y)
assert floor(x + 3) == floor(x + 3)
assert floor(x + k) == floor(x + k)
assert floor(y + 3) == floor(y) + 3
assert floor(y + k) == floor(y) + k
assert floor(3 + I*y + pi) == 6 + floor(y)*I
assert floor(k + n) == k + n
assert floor(x*I) == floor(x*I)
assert floor(k*I) == k*I
assert floor(Rational(23, 10) - E*I) == 2 - 3*I
assert floor(sin(1)) == 0
assert floor(sin(-1)) == -1
assert floor(exp(2)) == 7
assert floor(log(8)/log(2)) != 2
assert int(floor(log(8)/log(2)).evalf(chop=True)) == 3
assert floor(factorial(50)/exp(1)) == \
11188719610782480504630258070757734324011354208865721592720336800
assert (floor(y) <= y) == True
assert (floor(y) > y) == False
assert (floor(x) <= x).is_Relational # x could be non-real
assert (floor(x) > x).is_Relational
assert (floor(x) <= y).is_Relational # arg is not same as rhs
assert (floor(x) > y).is_Relational
def test_ceiling():
assert ceiling(nan) == nan
assert ceiling(oo) == oo
assert ceiling(-oo) == -oo
assert ceiling(0) == 0
assert ceiling(1) == 1
assert ceiling(-1) == -1
assert ceiling(E) == 3
assert ceiling(-E) == -2
assert ceiling(2*E) == 6
assert ceiling(-2*E) == -5
assert ceiling(pi) == 4
assert ceiling(-pi) == -3
assert ceiling(Rational(1, 2)) == 1
assert ceiling(-Rational(1, 2)) == 0
assert ceiling(Rational(7, 3)) == 3
assert ceiling(-Rational(7, 3)) == -2
assert ceiling(Float(17.0)) == 17
assert ceiling(-Float(17.0)) == -17
assert ceiling(Float(7.69)) == 8
assert ceiling(-Float(7.69)) == -7
assert ceiling(I) == I
assert ceiling(-I) == -I
e = ceiling(i)
assert e.func is ceiling and e.args[0] == i
assert ceiling(oo*I) == oo*I
assert ceiling(-oo*I) == -oo*I
assert ceiling(2*I) == 2*I
assert ceiling(-2*I) == -2*I
assert ceiling(I/2) == I
assert ceiling(-I/2) == 0
assert ceiling(E + 17) == 20
assert ceiling(pi + 2) == 6
assert ceiling(E + pi) == ceiling(E + pi)
assert ceiling(I + pi) == ceiling(I + pi)
assert ceiling(ceiling(pi)) == 4
assert ceiling(ceiling(y)) == ceiling(y)
assert ceiling(ceiling(x)) == ceiling(ceiling(x))
assert ceiling(x) == ceiling(x)
assert ceiling(2*x) == ceiling(2*x)
assert ceiling(k*x) == ceiling(k*x)
assert ceiling(k) == k
assert ceiling(2*k) == 2*k
assert ceiling(k*n) == k*n
assert ceiling(k/2) == ceiling(k/2)
assert ceiling(x + y) == ceiling(x + y)
assert ceiling(x + 3) == ceiling(x + 3)
assert ceiling(x + k) == ceiling(x + k)
assert ceiling(y + 3) == ceiling(y) + 3
assert ceiling(y + k) == ceiling(y) + k
assert ceiling(3 + pi + y*I) == 7 + ceiling(y)*I
assert ceiling(k + n) == k + n
assert ceiling(x*I) == ceiling(x*I)
assert ceiling(k*I) == k*I
assert ceiling(Rational(23, 10) - E*I) == 3 - 2*I
assert ceiling(sin(1)) == 1
assert ceiling(sin(-1)) == 0
assert ceiling(exp(2)) == 8
assert ceiling(-log(8)/log(2)) != -2
assert int(ceiling(-log(8)/log(2)).evalf(chop=True)) == -3
assert ceiling(factorial(50)/exp(1)) == \
11188719610782480504630258070757734324011354208865721592720336801
assert (ceiling(y) >= y) == True
assert (ceiling(y) < y) == False
assert (ceiling(x) >= x).is_Relational # x could be non-real
assert (ceiling(x) < x).is_Relational
assert (ceiling(x) >= y).is_Relational # arg is not same as rhs
assert (ceiling(x) < y).is_Relational
def test_frac():
assert isinstance(frac(x), frac)
assert frac(oo) == AccumBounds(0, 1)
assert frac(-oo) == AccumBounds(0, 1)
assert frac(n) == 0
assert frac(nan) == nan
assert frac(Rational(4, 3)) == Rational(1, 3)
assert frac(-Rational(4, 3)) == Rational(2, 3)
r = Symbol('r', real=True)
assert frac(I*r) == I*frac(r)
assert frac(1 + I*r) == I*frac(r)
assert frac(0.5 + I*r) == 0.5 + I*frac(r)
assert frac(n + I*r) == I*frac(r)
assert frac(n + I*k) == 0
assert frac(x + I*x) == frac(x + I*x)
assert frac(x + I*n) == frac(x)
assert frac(x).rewrite(floor) == x - floor(x)
def test_series():
x, y = symbols('x,y')
assert floor(x).nseries(x, y, 100) == floor(y)
assert ceiling(x).nseries(x, y, 100) == ceiling(y)
assert floor(x).nseries(x, pi, 100) == 3
assert ceiling(x).nseries(x, pi, 100) == 4
assert floor(x).nseries(x, 0, 100) == 0
assert ceiling(x).nseries(x, 0, 100) == 1
assert floor(-x).nseries(x, 0, 100) == -1
assert ceiling(-x).nseries(x, 0, 100) == 0
@XFAIL
def test_issue_4149():
assert floor(3 + pi*I + y*I) == 3 + floor(pi + y)*I
assert floor(3*I + pi*I + y*I) == floor(3 + pi + y)*I
assert floor(3 + E + pi*I + y*I) == 5 + floor(pi + y)*I
|
|
# This code was mostly based on ipaddr-py
# Copyright 2007 Google Inc. http://code.google.com/p/ipaddr-py/
# Licensed under the Apache License, Version 2.0 (the "License").
from django.core.exceptions import ValidationError
from django.utils.six.moves import range
from django.utils.translation import ugettext_lazy as _
def clean_ipv6_address(ip_str, unpack_ipv4=False,
error_message=_("This is not a valid IPv6 address.")):
"""
Cleans an IPv6 address string.
Validity is checked by calling is_valid_ipv6_address() - if an
invalid address is passed, ValidationError is raised.
Replaces the longest continuous zero-sequence with "::" and
removes leading zeroes and makes sure all hextets are lowercase.
Args:
ip_str: A valid IPv6 address.
unpack_ipv4: if an IPv4-mapped address is found,
return the plain IPv4 address (default=False).
error_message: An error message used in the ValidationError.
Returns:
A compressed IPv6 address, or the same value
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
if not is_valid_ipv6_address(ip_str):
raise ValidationError(error_message, code='invalid')
# This algorithm can only handle fully exploded
# IP strings
ip_str = _explode_shorthand_ip_string(ip_str)
ip_str = _sanitize_ipv4_mapping(ip_str)
# If needed, unpack the IPv4 and return straight away
# - no need in running the rest of the algorithm
if unpack_ipv4:
ipv4_unpacked = _unpack_ipv4(ip_str)
if ipv4_unpacked:
return ipv4_unpacked
hextets = ip_str.split(":")
for index in range(len(hextets)):
# Remove leading zeroes
hextets[index] = hextets[index].lstrip('0')
if not hextets[index]:
hextets[index] = '0'
# Determine best hextet to compress
if hextets[index] == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
# Compress the most suitable hextet
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
result = ":".join(hextets)
return result.lower()
def _sanitize_ipv4_mapping(ip_str):
"""
Sanitize IPv4 mapping in an expanded IPv6 address.
This converts ::ffff:0a0a:0a0a to ::ffff:10.10.10.10.
If there is nothing to sanitize, returns an unchanged
string.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The sanitized output string, if applicable.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
# not an ipv4 mapping
return ip_str
hextets = ip_str.split(':')
if '.' in hextets[-1]:
# already sanitized
return ip_str
ipv4_address = "%d.%d.%d.%d" % (
int(hextets[6][0:2], 16),
int(hextets[6][2:4], 16),
int(hextets[7][0:2], 16),
int(hextets[7][2:4], 16),
)
result = ':'.join(hextets[0:6])
result += ':' + ipv4_address
return result
def _unpack_ipv4(ip_str):
"""
Unpack an IPv4 address that was mapped in a compressed IPv6 address.
This converts 0000:0000:0000:0000:0000:ffff:10.10.10.10 to 10.10.10.10.
If there is nothing to sanitize, returns None.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The unpacked IPv4 address, or None if there was nothing to unpack.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
return None
return ip_str.rsplit(':', 1)[1]
def is_valid_ipv6_address(ip_str):
"""
Ensure we have a valid IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if this is a valid IPv6 address.
"""
from django.core.validators import validate_ipv4_address
# We need to have at least one ':'.
if ':' not in ip_str:
return False
# We can only have one '::' shortener.
if ip_str.count('::') > 1:
return False
# '::' should be encompassed by start, digits or end.
if ':::' in ip_str:
return False
# A single colon can neither start nor end an address.
if ((ip_str.startswith(':') and not ip_str.startswith('::')) or
(ip_str.endswith(':') and not ip_str.endswith('::'))):
return False
# We can never have more than 7 ':' (1::2:3:4:5:6:7:8 is invalid)
if ip_str.count(':') > 7:
return False
# If we have no concatenation, we need to have 8 fields with 7 ':'.
if '::' not in ip_str and ip_str.count(':') != 7:
# We might have an IPv4 mapped address.
if ip_str.count('.') != 3:
return False
ip_str = _explode_shorthand_ip_string(ip_str)
# Now that we have that all squared away, let's check that each of the
# hextets are between 0x0 and 0xFFFF.
for hextet in ip_str.split(':'):
if hextet.count('.') == 3:
# If we have an IPv4 mapped address, the IPv4 portion has to
# be at the end of the IPv6 portion.
if not ip_str.split(':')[-1] == hextet:
return False
try:
validate_ipv4_address(hextet)
except ValidationError:
return False
else:
try:
# a value error here means that we got a bad hextet,
# something like 0xzzzz
if int(hextet, 16) < 0x0 or int(hextet, 16) > 0xFFFF:
return False
except ValueError:
return False
return True
def _explode_shorthand_ip_string(ip_str):
"""
Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if not _is_shorthand_ip(ip_str):
# We've already got a longhand ip_str.
return ip_str
new_ip = []
hextet = ip_str.split('::')
# If there is a ::, we need to expand it with zeroes
# to get to 8 hextets - unless there is a dot in the last hextet,
# meaning we're doing v4-mapping
if '.' in ip_str.split(':')[-1]:
fill_to = 7
else:
fill_to = 8
if len(hextet) > 1:
sep = len(hextet[0].split(':')) + len(hextet[1].split(':'))
new_ip = hextet[0].split(':')
for __ in range(fill_to - sep):
new_ip.append('0000')
new_ip += hextet[1].split(':')
else:
new_ip = ip_str.split(':')
# Now need to make sure every hextet is 4 lower case characters.
# If a hextet is < 4 characters, we've got missing leading 0's.
ret_ip = []
for hextet in new_ip:
ret_ip.append(('0' * (4 - len(hextet)) + hextet).lower())
return ':'.join(ret_ip)
def _is_shorthand_ip(ip_str):
"""Determine if the address is shortened.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if the address is shortened.
"""
if ip_str.count('::') == 1:
return True
if any(len(x) < 4 for x in ip_str.split(':')):
return True
return False
|
|
from __future__ import print_function
import os
import sys
from rosdistro import get_distribution_cache
from rosdistro import get_index
from ros_buildfarm.common import get_binarydeb_job_name
from ros_buildfarm.common import get_debian_package_name
from ros_buildfarm.common import get_github_project_url
from ros_buildfarm.common import get_release_binary_view_name
from ros_buildfarm.common import get_release_binary_view_prefix
from ros_buildfarm.common import get_release_job_prefix
from ros_buildfarm.common import get_release_source_view_name
from ros_buildfarm.common import get_release_view_name
from ros_buildfarm.common \
import get_repositories_and_script_generating_key_files
from ros_buildfarm.common import get_sourcedeb_job_name
from ros_buildfarm.common import JobValidationError
from ros_buildfarm.common import write_groovy_script_and_configs
from ros_buildfarm.config import get_distribution_file
from ros_buildfarm.config import get_index as get_config_index
from ros_buildfarm.config import get_release_build_files
from ros_buildfarm.git import get_repository
from ros_buildfarm.jenkins import configure_job
from ros_buildfarm.jenkins import configure_management_view
from ros_buildfarm.jenkins import configure_view
from ros_buildfarm.jenkins import connect
from ros_buildfarm.jenkins import remove_jobs
from ros_buildfarm.templates import expand_template
def configure_release_jobs(
config_url, rosdistro_name, release_build_name, groovy_script=None):
"""
Configure all Jenkins release jobs.
L{configure_release_job} will be invoked for every released package and
target which matches the build file criteria.
Additionally a job to import Debian packages into the Debian repository is
created.
"""
config = get_config_index(config_url)
build_files = get_release_build_files(config, rosdistro_name)
build_file = build_files[release_build_name]
index = get_index(config.rosdistro_index_url)
# get targets
platforms = []
for os_name in build_file.targets.keys():
for os_code_name in build_file.targets[os_name].keys():
platforms.append((os_name, os_code_name))
print('The build file contains the following targets:')
for os_name, os_code_name in platforms:
print(' - %s %s: %s' % (os_name, os_code_name, ', '.join(
build_file.targets[os_name][os_code_name])))
dist_file = get_distribution_file(index, rosdistro_name, build_file)
if not dist_file:
print('No distribution file matches the build file')
return
pkg_names = dist_file.release_packages.keys()
filtered_pkg_names = build_file.filter_packages(pkg_names)
explicitly_ignored_pkg_names = set(pkg_names) - set(filtered_pkg_names)
if explicitly_ignored_pkg_names:
print(('The following packages are being %s because of ' +
'white-/blacklisting:') %
('ignored' if build_file.skip_ignored_packages else 'disabled'))
for pkg_name in sorted(explicitly_ignored_pkg_names):
print(' -', pkg_name)
dist_cache = None
if build_file.notify_maintainers or \
build_file.abi_incompatibility_assumed or \
explicitly_ignored_pkg_names:
dist_cache = get_distribution_cache(index, rosdistro_name)
if explicitly_ignored_pkg_names:
# get direct dependencies from distro cache for each package
direct_dependencies = {}
for pkg_name in pkg_names:
direct_dependencies[pkg_name] = _get_direct_dependencies(
pkg_name, dist_cache, pkg_names) or set([])
# find recursive downstream deps for all explicitly ignored packages
ignored_pkg_names = set(explicitly_ignored_pkg_names)
while True:
implicitly_ignored_pkg_names = _get_downstream_package_names(
ignored_pkg_names, direct_dependencies)
if implicitly_ignored_pkg_names - ignored_pkg_names:
ignored_pkg_names |= implicitly_ignored_pkg_names
continue
break
implicitly_ignored_pkg_names = \
ignored_pkg_names - explicitly_ignored_pkg_names
if implicitly_ignored_pkg_names:
print(('The following packages are being %s because their ' +
'dependencies are being ignored:') % ('ignored'
if build_file.skip_ignored_packages else 'disabled'))
for pkg_name in sorted(implicitly_ignored_pkg_names):
print(' -', pkg_name)
filtered_pkg_names = \
set(filtered_pkg_names) - implicitly_ignored_pkg_names
jenkins = connect(config.jenkins_url)
configure_import_package_job(
config_url, rosdistro_name, release_build_name,
config=config, build_file=build_file, jenkins=jenkins)
configure_sync_packages_to_main_job(
config_url, rosdistro_name, release_build_name,
config=config, build_file=build_file, jenkins=jenkins)
for os_name, os_code_name in platforms:
for arch in sorted(build_file.targets[os_name][os_code_name]):
configure_sync_packages_to_testing_job(
config_url, rosdistro_name, release_build_name,
os_code_name, arch,
config=config, build_file=build_file, jenkins=jenkins)
targets = []
for os_name, os_code_name in platforms:
targets.append((os_name, os_code_name, 'source'))
for arch in build_file.targets[os_name][os_code_name]:
targets.append((os_name, os_code_name, arch))
views = configure_release_views(
jenkins, rosdistro_name, release_build_name, targets)
if groovy_script is not None:
# all further configuration will be handled by the groovy script
jenkins = False
all_source_job_names = []
all_binary_job_names = []
all_job_configs = {}
for pkg_name in sorted(pkg_names):
pkg = dist_file.release_packages[pkg_name]
repo_name = pkg.repository_name
repo = dist_file.repositories[repo_name]
is_disabled = pkg_name not in filtered_pkg_names
if is_disabled and build_file.skip_ignored_packages:
print("Skipping ignored package '%s' in repository '%s'" %
(pkg_name, repo_name), file=sys.stderr)
continue
if not repo.release_repository:
print(("Skipping package '%s' in repository '%s': no release " +
"section") % (pkg_name, repo_name), file=sys.stderr)
continue
if not repo.release_repository.version:
print(("Skipping package '%s' in repository '%s': no release " +
"version") % (pkg_name, repo_name), file=sys.stderr)
continue
for os_name, os_code_name in platforms:
try:
source_job_names, binary_job_names, job_configs = \
configure_release_job(
config_url, rosdistro_name, release_build_name,
pkg_name, os_name, os_code_name,
config=config, build_file=build_file,
index=index, dist_file=dist_file,
dist_cache=dist_cache,
jenkins=jenkins, views=views,
generate_import_package_job=False,
generate_sync_packages_jobs=False,
is_disabled=is_disabled,
groovy_script=groovy_script)
all_source_job_names += source_job_names
all_binary_job_names += binary_job_names
if groovy_script is not None:
print('Configuration for jobs: ' +
', '.join(source_job_names + binary_job_names))
all_job_configs.update(job_configs)
except JobValidationError as e:
print(e.message, file=sys.stderr)
groovy_data = {
'expected_num_jobs': len(all_job_configs),
'job_prefixes_and_names': {},
}
# delete obsolete binary jobs
for os_name, os_code_name in platforms:
for arch in build_file.targets[os_name][os_code_name]:
binary_view = get_release_binary_view_name(
rosdistro_name, release_build_name,
os_name, os_code_name, arch)
binary_job_prefix = '%s__' % binary_view
excluded_job_names = set([
j for j in all_binary_job_names
if j.startswith(binary_job_prefix)])
if groovy_script is None:
print("Removing obsolete binary jobs with prefix '%s'" %
binary_job_prefix)
remove_jobs(
jenkins, binary_job_prefix, excluded_job_names)
else:
binary_key = 'binary_%s_%s_%s' % (os_name, os_code_name, arch)
groovy_data['job_prefixes_and_names'][binary_key] = \
(binary_job_prefix, excluded_job_names)
# delete obsolete source jobs
# requires knowledge about all other release build files
for os_name, os_code_name in platforms:
other_source_job_names = []
# get source job names for all other release build files
for other_release_build_name in [
k for k in build_files.keys() if k != release_build_name]:
other_build_file = build_files[other_release_build_name]
other_dist_file = get_distribution_file(
index, rosdistro_name, other_build_file)
if not other_dist_file:
continue
if os_name not in other_build_file.targets or \
os_code_name not in other_build_file.targets[os_name]:
continue
if other_build_file.skip_ignored_packages:
filtered_pkg_names = other_build_file.filter_packages(
pkg_names)
else:
filtered_pkg_names = pkg_names
for pkg_name in sorted(filtered_pkg_names):
pkg = other_dist_file.release_packages[pkg_name]
repo_name = pkg.repository_name
repo = other_dist_file.repositories[repo_name]
if not repo.release_repository:
continue
if not repo.release_repository.version:
continue
other_job_name = get_sourcedeb_job_name(
rosdistro_name, other_release_build_name,
pkg_name, os_name, os_code_name)
other_source_job_names.append(other_job_name)
source_view_prefix = get_release_source_view_name(
rosdistro_name, os_name, os_code_name)
source_job_prefix = '%s__' % source_view_prefix
excluded_job_names = set([
j for j in (all_source_job_names + other_source_job_names)
if j.startswith(source_job_prefix)])
if groovy_script is None:
print("Removing obsolete source jobs with prefix '%s'" %
source_job_prefix)
remove_jobs(jenkins, source_job_prefix, excluded_job_names)
else:
source_key = 'source_%s_%s' % (os_name, os_code_name)
groovy_data['job_prefixes_and_names'][source_key] = (
source_job_prefix, excluded_job_names)
if groovy_script is not None:
print("Writing groovy script '%s' to reconfigure %d jobs" %
(groovy_script, len(all_job_configs)))
content = expand_template(
'snippet/reconfigure_jobs.groovy.em', groovy_data)
write_groovy_script_and_configs(
groovy_script, content, all_job_configs)
def _get_downstream_package_names(pkg_names, dependencies):
downstream_pkg_names = set([])
for pkg_name, deps in dependencies.items():
if deps.intersection(pkg_names):
downstream_pkg_names.add(pkg_name)
return downstream_pkg_names
# Configure a Jenkins release job which consists of
# - a source deb job
# - N binary debs, one for each archicture
def configure_release_job(
config_url, rosdistro_name, release_build_name,
pkg_name, os_name, os_code_name,
config=None, build_file=None,
index=None, dist_file=None, dist_cache=None,
jenkins=None, views=None,
generate_import_package_job=True,
generate_sync_packages_jobs=True,
is_disabled=False,
groovy_script=None,
filter_arches=None):
"""
Configure a Jenkins release job.
The following jobs are created for each package:
- M source jobs, one for each OS node name
- M * N binary jobs, one for each combination of OS code name and arch
"""
if config is None:
config = get_config_index(config_url)
if build_file is None:
build_files = get_release_build_files(config, rosdistro_name)
build_file = build_files[release_build_name]
if index is None:
index = get_index(config.rosdistro_index_url)
if dist_file is None:
dist_file = get_distribution_file(index, rosdistro_name, build_file)
if not dist_file:
raise JobValidationError(
'No distribution file matches the build file')
pkg_names = dist_file.release_packages.keys()
if pkg_name not in pkg_names:
raise JobValidationError(
"Invalid package name '%s' " % pkg_name +
'choose one of the following: ' + ', '.join(sorted(pkg_names)))
pkg = dist_file.release_packages[pkg_name]
repo_name = pkg.repository_name
repo = dist_file.repositories[repo_name]
if not repo.release_repository:
raise JobValidationError(
"Repository '%s' has no release section" % repo_name)
if not repo.release_repository.version:
raise JobValidationError(
"Repository '%s' has no release version" % repo_name)
if os_name not in build_file.targets.keys():
raise JobValidationError(
"Invalid OS name '%s' " % os_name +
'choose one of the following: ' +
', '.join(sorted(build_file.targets.keys())))
if os_code_name not in build_file.targets[os_name].keys():
raise JobValidationError(
"Invalid OS code name '%s' " % os_code_name +
'choose one of the following: ' +
', '.join(sorted(build_file.targets[os_name].keys())))
if dist_cache is None and \
(build_file.notify_maintainers or
build_file.abi_incompatibility_assumed):
dist_cache = get_distribution_cache(index, rosdistro_name)
if jenkins is None:
jenkins = connect(config.jenkins_url)
if views is None:
targets = []
targets.append((os_name, os_code_name, 'source'))
for arch in build_file.targets[os_name][os_code_name]:
targets.append((os_name, os_code_name, arch))
configure_release_views(
jenkins, rosdistro_name, release_build_name, targets)
if generate_import_package_job:
configure_import_package_job(
config_url, rosdistro_name, release_build_name,
config=config, build_file=build_file, jenkins=jenkins)
if generate_sync_packages_jobs:
configure_sync_packages_to_main_job(
config_url, rosdistro_name, release_build_name,
config=config, build_file=build_file, jenkins=jenkins)
for arch in build_file.targets[os_name][os_code_name]:
configure_sync_packages_to_testing_job(
config_url, rosdistro_name, release_build_name,
os_code_name, arch,
config=config, build_file=build_file, jenkins=jenkins)
source_job_names = []
binary_job_names = []
job_configs = {}
# sourcedeb job
source_job_name = get_sourcedeb_job_name(
rosdistro_name, release_build_name,
pkg_name, os_name, os_code_name)
job_config = _get_sourcedeb_job_config(
config_url, rosdistro_name, release_build_name,
config, build_file, os_name, os_code_name,
pkg_name, repo_name, repo.release_repository, dist_cache=dist_cache,
is_disabled=is_disabled)
# jenkinsapi.jenkins.Jenkins evaluates to false if job count is zero
if isinstance(jenkins, object) and jenkins is not False:
configure_job(jenkins, source_job_name, job_config)
source_job_names.append(source_job_name)
job_configs[source_job_name] = job_config
dependency_names = []
if build_file.abi_incompatibility_assumed:
dependency_names = _get_direct_dependencies(
pkg_name, dist_cache, pkg_names)
# if dependencies are not yet available in rosdistro cache
# skip binary jobs
if dependency_names is None:
print(("Skipping binary jobs for package '%s' because it is not " +
"yet in the rosdistro cache") % pkg_name, file=sys.stderr)
return source_job_names, binary_job_names, job_configs
# binarydeb jobs
for arch in build_file.targets[os_name][os_code_name]:
if filter_arches and arch not in filter_arches:
continue
job_name = get_binarydeb_job_name(
rosdistro_name, release_build_name,
pkg_name, os_name, os_code_name, arch)
upstream_job_names = [source_job_name] + [
get_binarydeb_job_name(
rosdistro_name, release_build_name,
dependency_name, os_name, os_code_name, arch)
for dependency_name in dependency_names]
job_config = _get_binarydeb_job_config(
config_url, rosdistro_name, release_build_name,
config, build_file, os_name, os_code_name, arch,
pkg_name, repo_name, repo.release_repository,
dist_cache=dist_cache, upstream_job_names=upstream_job_names,
is_disabled=is_disabled)
# jenkinsapi.jenkins.Jenkins evaluates to false if job count is zero
if isinstance(jenkins, object) and jenkins is not False:
configure_job(jenkins, job_name, job_config)
binary_job_names.append(job_name)
job_configs[job_name] = job_config
return source_job_names, binary_job_names, job_configs
def configure_release_views(
jenkins, rosdistro_name, release_build_name, targets):
views = []
# generate view aggregating all binary views
if len([t for t in targets if t[2] != 'source']) > 1:
view_prefix = get_release_binary_view_prefix(
rosdistro_name, release_build_name)
views.append(configure_view(
jenkins, view_prefix, include_regex='%s_.+__.+' % view_prefix,
template_name='dashboard_view_all_jobs.xml.em'))
for os_name, os_code_name, arch in targets:
view_name = get_release_view_name(
rosdistro_name, release_build_name, os_name, os_code_name,
arch)
if arch == 'source':
include_regex = '%s__.+__%s_%s__source' % \
(view_name, os_name, os_code_name)
else:
include_regex = '%s__.+__%s_%s_%s__binary' % \
(view_name, os_name, os_code_name, arch)
views.append(configure_view(
jenkins, view_name, include_regex=include_regex,
template_name='dashboard_view_all_jobs.xml.em'))
return views
def _get_direct_dependencies(pkg_name, dist_cache, pkg_names):
from catkin_pkg.package import parse_package_string
if pkg_name not in dist_cache.release_package_xmls:
return None
pkg_xml = dist_cache.release_package_xmls[pkg_name]
pkg = parse_package_string(pkg_xml)
depends = set([
d.name for d in (
pkg.buildtool_depends +
pkg.build_depends +
pkg.buildtool_export_depends +
pkg.build_export_depends +
pkg.exec_depends +
pkg.test_depends)
if d.name in pkg_names])
return depends
def _get_sourcedeb_job_config(
config_url, rosdistro_name, release_build_name,
config, build_file, os_name, os_code_name,
pkg_name, repo_name, release_repository, dist_cache=None,
is_disabled=False):
template_name = 'release/sourcedeb_job.xml.em'
repository_args, script_generating_key_files = \
get_repositories_and_script_generating_key_files(build_file=build_file)
sourcedeb_files = [
'sourcedeb/*.debian.tar.gz',
'sourcedeb/*.debian.tar.xz',
'sourcedeb/*.dsc',
'sourcedeb/*.orig.tar.gz',
'sourcedeb/*_source.changes',
]
maintainer_emails = get_maintainer_emails(dist_cache, repo_name) \
if build_file.notify_maintainers \
else set([])
job_data = {
'github_url': get_github_project_url(release_repository.url),
'job_priority': build_file.jenkins_source_job_priority,
'node_label': build_file.jenkins_source_job_label,
'disabled': is_disabled,
'ros_buildfarm_repository': get_repository(),
'script_generating_key_files': script_generating_key_files,
'rosdistro_index_url': config.rosdistro_index_url,
'rosdistro_name': rosdistro_name,
'release_build_name': release_build_name,
'pkg_name': pkg_name,
'os_name': os_name,
'os_code_name': os_code_name,
'repository_args': repository_args,
'sourcedeb_files': sourcedeb_files,
'import_package_job_name': get_import_package_job_name(rosdistro_name),
'debian_package_name': get_debian_package_name(
rosdistro_name, pkg_name),
'notify_emails': build_file.notify_emails,
'maintainer_emails': maintainer_emails,
'notify_maintainers': build_file.notify_maintainers,
'timeout_minutes': build_file.jenkins_source_job_timeout,
'credential_id': build_file.upload_credential_id,
}
job_config = expand_template(template_name, job_data)
return job_config
def _get_binarydeb_job_config(
config_url, rosdistro_name, release_build_name,
config, build_file, os_name, os_code_name, arch,
pkg_name, repo_name, release_repository,
dist_cache=None, upstream_job_names=None,
is_disabled=False):
template_name = 'release/binarydeb_job.xml.em'
repository_args, script_generating_key_files = \
get_repositories_and_script_generating_key_files(build_file=build_file)
binarydeb_files = [
'binarydeb/*.changes',
'binarydeb/*.deb',
]
sync_to_testing_job_name = [get_sync_packages_to_testing_job_name(
rosdistro_name, os_code_name, arch)]
maintainer_emails = get_maintainer_emails(dist_cache, repo_name) \
if build_file.notify_maintainers \
else set([])
job_data = {
'github_url': get_github_project_url(release_repository.url),
'job_priority': build_file.jenkins_binary_job_priority,
'node_label': build_file.jenkins_binary_job_label,
'disabled': is_disabled,
'upstream_projects': upstream_job_names,
'ros_buildfarm_repository': get_repository(),
'script_generating_key_files': script_generating_key_files,
'rosdistro_index_url': config.rosdistro_index_url,
'rosdistro_name': rosdistro_name,
'release_build_name': release_build_name,
'pkg_name': pkg_name,
'os_name': os_name,
'os_code_name': os_code_name,
'arch': arch,
'repository_args': repository_args,
'append_timestamp': build_file.abi_incompatibility_assumed,
'binarydeb_files': binarydeb_files,
'import_package_job_name': get_import_package_job_name(rosdistro_name),
'debian_package_name': get_debian_package_name(
rosdistro_name, pkg_name),
'child_projects': sync_to_testing_job_name,
'notify_emails': build_file.notify_emails,
'maintainer_emails': maintainer_emails,
'notify_maintainers': build_file.notify_maintainers,
'timeout_minutes': build_file.jenkins_binary_job_timeout,
'credential_id': build_file.upload_credential_id,
}
job_config = expand_template(template_name, job_data)
return job_config
def configure_import_package_job(
config_url, rosdistro_name, release_build_name,
config=None, build_file=None, jenkins=None):
if config is None:
config = get_config_index(config_url)
if build_file is None:
build_files = get_release_build_files(config, rosdistro_name)
build_file = build_files[release_build_name]
if jenkins is None:
jenkins = connect(config.jenkins_url)
job_name = get_import_package_job_name(rosdistro_name)
job_config = _get_import_package_job_config(build_file)
# jenkinsapi.jenkins.Jenkins evaluates to false if job count is zero
if isinstance(jenkins, object) and jenkins is not False:
configure_management_view(jenkins)
configure_job(jenkins, job_name, job_config)
def get_import_package_job_name(rosdistro_name):
view_name = get_release_job_prefix(rosdistro_name)
return '%s_import-package' % view_name
def _get_import_package_job_config(build_file):
template_name = 'release/import_package_job.xml.em'
job_data = {
'abi_incompatibility_assumed': build_file.abi_incompatibility_assumed,
'notify_emails': build_file.notify_emails,
}
job_config = expand_template(template_name, job_data)
return job_config
def configure_sync_packages_to_testing_job(
config_url, rosdistro_name, release_build_name, os_code_name, arch,
config=None, build_file=None, jenkins=None):
if config is None:
config = get_config_index(config_url)
if build_file is None:
build_files = get_release_build_files(config, rosdistro_name)
build_file = build_files[release_build_name]
if jenkins is None:
jenkins = connect(config.jenkins_url)
job_name = get_sync_packages_to_testing_job_name(
rosdistro_name, os_code_name, arch)
job_config = _get_sync_packages_to_testing_job_config(
config_url, rosdistro_name, release_build_name, os_code_name, arch,
config, build_file)
# jenkinsapi.jenkins.Jenkins evaluates to false if job count is zero
if isinstance(jenkins, object) and jenkins is not False:
configure_management_view(jenkins)
configure_job(jenkins, job_name, job_config)
def get_sync_packages_to_testing_job_name(
rosdistro_name, os_code_name, arch):
view_name = get_release_job_prefix(rosdistro_name)
return '%s_sync-packages-to-testing_%s_%s' % \
(view_name, os_code_name, arch)
def _get_sync_packages_to_testing_job_config(
config_url, rosdistro_name, release_build_name, os_code_name, arch,
config, build_file):
template_name = 'release/sync_packages_to_testing_job.xml.em'
repository_args, script_generating_key_files = \
get_repositories_and_script_generating_key_files(build_file=build_file)
job_data = {
'ros_buildfarm_repository': get_repository(),
'script_generating_key_files': script_generating_key_files,
'config_url': config_url,
'rosdistro_name': rosdistro_name,
'release_build_name': release_build_name,
'os_code_name': os_code_name,
'arch': arch,
'repository_args': repository_args,
'notify_emails': build_file.notify_emails,
}
job_config = expand_template(template_name, job_data)
return job_config
def configure_sync_packages_to_main_job(
config_url, rosdistro_name, release_build_name,
config=None, build_file=None, jenkins=None):
if config is None:
config = get_config_index(config_url)
if build_file is None:
build_files = get_release_build_files(config, rosdistro_name)
build_file = build_files[release_build_name]
if jenkins is None:
jenkins = connect(config.jenkins_url)
job_name = get_sync_packages_to_main_job_name(
rosdistro_name)
job_config = _get_sync_packages_to_main_job_config(
rosdistro_name, build_file)
# jenkinsapi.jenkins.Jenkins evaluates to false if job count is zero
if isinstance(jenkins, object) and jenkins is not False:
configure_management_view(jenkins)
configure_job(jenkins, job_name, job_config)
def get_sync_packages_to_main_job_name(rosdistro_name):
view_name = get_release_job_prefix(rosdistro_name)
return '%s_sync-packages-to-main' % view_name
def _get_sync_packages_to_main_job_config(rosdistro_name, build_file):
template_name = 'release/sync_packages_to_main_job.xml.em'
job_data = {
'rosdistro_name': rosdistro_name,
'notify_emails': build_file.notify_emails,
}
job_config = expand_template(template_name, job_data)
return job_config
def get_maintainer_emails(dist_cache, repo_name):
maintainer_emails = set([])
if dist_cache and repo_name in dist_cache.distribution_file.repositories:
from catkin_pkg.package import parse_package_string
# add maintainers listed in latest release to recipients
repo = dist_cache.distribution_file.repositories[repo_name]
if repo.release_repository:
for pkg_name in repo.release_repository.package_names:
if pkg_name not in dist_cache.release_package_xmls:
continue
pkg_xml = dist_cache.release_package_xmls[pkg_name]
pkg = parse_package_string(pkg_xml)
for m in pkg.maintainers:
maintainer_emails.add(m.email)
return maintainer_emails
|
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from tempest import auth
from tempest import clients
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
CONF = config.CONF
LOG = logging.getLogger(__name__)
class IsolatedCreds(object):
def __init__(self, name, tempest_client=True, interface='json',
password='pass', network_resources=None):
self.network_resources = network_resources
self.isolated_creds = {}
self.isolated_net_resources = {}
self.ports = []
self.name = name
self.tempest_client = tempest_client
self.interface = interface
self.password = password
self.identity_admin_client, self.network_admin_client = (
self._get_admin_clients())
def _get_admin_clients(self):
"""
Returns a tuple with instances of the following admin clients (in this
order):
identity
network
"""
if self.tempest_client:
os = clients.AdminManager(interface=self.interface)
else:
os = clients.OfficialClientManager(
auth.get_default_credentials('identity_admin')
)
return os.identity_client, os.network_client
def _create_tenant(self, name, description):
if self.tempest_client:
resp, tenant = self.identity_admin_client.create_tenant(
name=name, description=description)
else:
tenant = self.identity_admin_client.tenants.create(
name,
description=description)
return tenant
def _get_tenant_by_name(self, name):
if self.tempest_client:
resp, tenant = self.identity_admin_client.get_tenant_by_name(name)
else:
tenants = self.identity_admin_client.tenants.list()
for ten in tenants:
if ten['name'] == name:
tenant = ten
break
else:
raise exceptions.NotFound('No such tenant')
return tenant
def _create_user(self, username, password, tenant, email):
if self.tempest_client:
resp, user = self.identity_admin_client.create_user(username,
password,
tenant['id'],
email)
else:
user = self.identity_admin_client.users.create(username, password,
email,
tenant_id=tenant.id)
return user
def _get_user(self, tenant, username):
if self.tempest_client:
resp, user = self.identity_admin_client.get_user_by_username(
tenant['id'],
username)
else:
user = self.identity_admin_client.users.get(username)
return user
def _list_roles(self):
if self.tempest_client:
resp, roles = self.identity_admin_client.list_roles()
else:
roles = self.identity_admin_client.roles.list()
return roles
def _assign_user_role(self, tenant, user, role):
if self.tempest_client:
self.identity_admin_client.assign_user_role(tenant, user, role)
else:
self.identity_admin_client.roles.add_user_role(user,
role, tenant=tenant)
def _delete_user(self, user):
if self.tempest_client:
self.identity_admin_client.delete_user(user)
else:
self.identity_admin_client.users.delete(user)
def _delete_tenant(self, tenant):
if self.tempest_client:
self.identity_admin_client.delete_tenant(tenant)
else:
self.identity_admin_client.tenants.delete(tenant)
def _create_creds(self, suffix="", admin=False):
"""Create random credentials under the following schema.
If the name contains a '.' is the full class path of something, and
we don't really care. If it isn't, it's probably a meaningful name,
so use it.
For logging purposes, -user and -tenant are long and redundant,
don't use them. The user# will be sufficient to figure it out.
"""
if '.' in self.name:
root = ""
else:
root = self.name
tenant_name = data_utils.rand_name(root) + suffix
tenant_desc = tenant_name + "-desc"
tenant = self._create_tenant(name=tenant_name,
description=tenant_desc)
username = data_utils.rand_name(root) + suffix
email = data_utils.rand_name(root) + suffix + "@example.com"
user = self._create_user(username, self.password,
tenant, email)
if admin:
role = None
try:
roles = self._list_roles()
admin_role = CONF.identity.admin_role
if self.tempest_client:
role = next(r for r in roles if r['name'] == admin_role)
else:
role = next(r for r in roles if r.name == admin_role)
except StopIteration:
msg = "No admin role found"
raise exceptions.NotFound(msg)
if self.tempest_client:
self._assign_user_role(tenant['id'], user['id'], role['id'])
else:
self._assign_user_role(tenant.id, user.id, role.id)
return self._get_credentials(user, tenant)
def _get_credentials(self, user, tenant):
if self.tempest_client:
user_get = user.get
tenant_get = tenant.get
else:
user_get = user.__dict__.get
tenant_get = tenant.__dict__.get
return auth.get_credentials(
username=user_get('name'), user_id=user_get('id'),
tenant_name=tenant_get('name'), tenant_id=tenant_get('id'),
password=self.password)
def _create_network_resources(self, tenant_id):
network = None
subnet = None
router = None
# Make sure settings
if self.network_resources:
if self.network_resources['router']:
if (not self.network_resources['subnet'] or
not self.network_resources['network']):
raise exceptions.InvalidConfiguration(
'A router requires a subnet and network')
elif self.network_resources['subnet']:
if not self.network_resources['network']:
raise exceptions.InvalidConfiguration(
'A subnet requires a network')
elif self.network_resources['dhcp']:
raise exceptions.InvalidConfiguration('DHCP requires a subnet')
data_utils.rand_name_root = data_utils.rand_name(self.name)
if not self.network_resources or self.network_resources['network']:
network_name = data_utils.rand_name_root + "-network"
network = self._create_network(network_name, tenant_id)
try:
if not self.network_resources or self.network_resources['subnet']:
subnet_name = data_utils.rand_name_root + "-subnet"
subnet = self._create_subnet(subnet_name, tenant_id,
network['id'])
if not self.network_resources or self.network_resources['router']:
router_name = data_utils.rand_name_root + "-router"
router = self._create_router(router_name, tenant_id)
self._add_router_interface(router['id'], subnet['id'])
except Exception:
if router:
self._clear_isolated_router(router['id'], router['name'])
if subnet:
self._clear_isolated_subnet(subnet['id'], subnet['name'])
if network:
self._clear_isolated_network(network['id'], network['name'])
raise
return network, subnet, router
def _create_network(self, name, tenant_id):
if self.tempest_client:
resp, resp_body = self.network_admin_client.create_network(
name=name, tenant_id=tenant_id)
else:
body = {'network': {'tenant_id': tenant_id, 'name': name}}
resp_body = self.network_admin_client.create_network(body)
return resp_body['network']
def _create_subnet(self, subnet_name, tenant_id, network_id):
if not self.tempest_client:
body = {'subnet': {'name': subnet_name, 'tenant_id': tenant_id,
'network_id': network_id, 'ip_version': 4}}
if self.network_resources:
body['enable_dhcp'] = self.network_resources['dhcp']
base_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
mask_bits = CONF.network.tenant_network_mask_bits
for subnet_cidr in base_cidr.subnet(mask_bits):
try:
if self.tempest_client:
if self.network_resources:
resp, resp_body = self.network_admin_client.\
create_subnet(
network_id=network_id, cidr=str(subnet_cidr),
name=subnet_name,
tenant_id=tenant_id,
enable_dhcp=self.network_resources['dhcp'],
ip_version=4)
else:
resp, resp_body = self.network_admin_client.\
create_subnet(network_id=network_id,
cidr=str(subnet_cidr),
name=subnet_name,
tenant_id=tenant_id,
ip_version=4)
else:
body['subnet']['cidr'] = str(subnet_cidr)
resp_body = self.network_admin_client.create_subnet(body)
break
except exceptions.BadRequest as e:
if 'overlaps with another subnet' not in str(e):
raise
else:
e = exceptions.BuildErrorException()
e.message = 'Available CIDR for subnet creation could not be found'
raise e
return resp_body['subnet']
def _create_router(self, router_name, tenant_id):
external_net_id = dict(
network_id=CONF.network.public_network_id)
if self.tempest_client:
resp, resp_body = self.network_admin_client.create_router(
router_name,
external_gateway_info=external_net_id,
tenant_id=tenant_id)
else:
body = {'router': {'name': router_name, 'tenant_id': tenant_id,
'external_gateway_info': external_net_id,
'admin_state_up': True}}
resp_body = self.network_admin_client.create_router(body)
return resp_body['router']
def _add_router_interface(self, router_id, subnet_id):
if self.tempest_client:
self.network_admin_client.add_router_interface_with_subnet_id(
router_id, subnet_id)
else:
body = {'subnet_id': subnet_id}
self.network_admin_client.add_interface_router(router_id, body)
def get_primary_network(self):
return self.isolated_net_resources.get('primary')[0]
def get_primary_subnet(self):
return self.isolated_net_resources.get('primary')[1]
def get_primary_router(self):
return self.isolated_net_resources.get('primary')[2]
def get_admin_network(self):
return self.isolated_net_resources.get('admin')[0]
def get_admin_subnet(self):
return self.isolated_net_resources.get('admin')[1]
def get_admin_router(self):
return self.isolated_net_resources.get('admin')[2]
def get_alt_network(self):
return self.isolated_net_resources.get('alt')[0]
def get_alt_subnet(self):
return self.isolated_net_resources.get('alt')[1]
def get_alt_router(self):
return self.isolated_net_resources.get('alt')[2]
def get_credentials(self, credential_type):
if self.isolated_creds.get(credential_type):
credentials = self.isolated_creds[credential_type]
else:
is_admin = (credential_type == 'admin')
credentials = self._create_creds(admin=is_admin)
self.isolated_creds[credential_type] = credentials
# Maintained until tests are ported
LOG.info("Acquired isolated creds:\n credentials: %s"
% credentials)
if CONF.service_available.neutron:
network, subnet, router = self._create_network_resources(
credentials.tenant_id)
self.isolated_net_resources[credential_type] = (
network, subnet, router,)
LOG.info("Created isolated network resources for : \n"
+ " credentials: %s" % credentials)
return credentials
def get_primary_creds(self):
return self.get_credentials('primary')
def get_admin_creds(self):
return self.get_credentials('admin')
def get_alt_creds(self):
return self.get_credentials('alt')
def _clear_isolated_router(self, router_id, router_name):
net_client = self.network_admin_client
try:
net_client.delete_router(router_id)
except exceptions.NotFound:
LOG.warn('router with name: %s not found for delete' %
router_name)
def _clear_isolated_subnet(self, subnet_id, subnet_name):
net_client = self.network_admin_client
try:
net_client.delete_subnet(subnet_id)
except exceptions.NotFound:
LOG.warn('subnet with name: %s not found for delete' %
subnet_name)
def _clear_isolated_network(self, network_id, network_name):
net_client = self.network_admin_client
try:
net_client.delete_network(network_id)
except exceptions.NotFound:
LOG.warn('network with name: %s not found for delete' %
network_name)
def _cleanup_ports(self, network_id):
# TODO(mlavalle) This method will be removed once patch
# https://review.openstack.org/#/c/46563/ merges in Neutron
if not self.ports:
if self.tempest_client:
resp, resp_body = self.network_admin_client.list_ports()
else:
resp_body = self.network_admin_client.list_ports()
self.ports = resp_body['ports']
ports_to_delete = [
port
for port in self.ports
if (port['network_id'] == network_id and
port['device_owner'] != 'network:router_interface' and
port['device_owner'] != 'network:dhcp')
]
for port in ports_to_delete:
try:
LOG.info('Cleaning up port id %s, name %s' %
(port['id'], port['name']))
self.network_admin_client.delete_port(port['id'])
except exceptions.NotFound:
LOG.warn('Port id: %s, name %s not found for clean-up' %
(port['id'], port['name']))
def _clear_isolated_net_resources(self):
net_client = self.network_admin_client
for cred in self.isolated_net_resources:
network, subnet, router = self.isolated_net_resources.get(cred)
LOG.debug("Clearing network: %(network)s, "
"subnet: %(subnet)s, router: %(router)s",
{'network': network, 'subnet': subnet, 'router': router})
if (not self.network_resources or
self.network_resources.get('router')):
try:
if self.tempest_client:
net_client.remove_router_interface_with_subnet_id(
router['id'], subnet['id'])
else:
body = {'subnet_id': subnet['id']}
net_client.remove_interface_router(router['id'], body)
except exceptions.NotFound:
LOG.warn('router with name: %s not found for delete' %
router['name'])
self._clear_isolated_router(router['id'], router['name'])
if (not self.network_resources or
self.network_resources.get('network')):
# TODO(mlavalle) This method call will be removed once patch
# https://review.openstack.org/#/c/46563/ merges in Neutron
self._cleanup_ports(network['id'])
if (not self.network_resources or
self.network_resources.get('subnet')):
self._clear_isolated_subnet(subnet['id'], subnet['name'])
if (not self.network_resources or
self.network_resources.get('network')):
self._clear_isolated_network(network['id'], network['name'])
def clear_isolated_creds(self):
if not self.isolated_creds:
return
self._clear_isolated_net_resources()
for creds in self.isolated_creds.itervalues():
try:
self._delete_user(creds.user_id)
except exceptions.NotFound:
LOG.warn("user with name: %s not found for delete" %
creds.username)
try:
self._delete_tenant(creds.tenant_id)
except exceptions.NotFound:
LOG.warn("tenant with name: %s not found for delete" %
creds.tenant_name)
|
|
# The MIT License (MIT)
# Copyright (c) 2016, 2017 by the ESA CCI Toolbox development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import fnmatch
import os
import os.path
import re
import sys
import urllib.parse
from collections import OrderedDict
from contextlib import contextmanager
from datetime import datetime, date, timedelta
from io import StringIO
from typing import Union, Tuple, Sequence, Optional, Iterable, Any
import numpy as np
from .sround import sround
from .undefined import UNDEFINED
__author__ = "Norman Fomferra (Brockmann Consult GmbH)"
def qualified_name_to_object(qualified_name: str, default_module_name='builtins'):
"""
Convert a fully qualified name into a Python object.
It is true that ``qualified_name_to_object(object_to_qualified_name(obj)) is obj``.
>>> qualified_name_to_object('unittest.TestCase')
<class 'unittest.case.TestCase'>
See also :py:func:`object_to_qualified_name`.
:param qualified_name: fully qualified name of the form [<module>'.'{<name>'.'}]<name>
:param default_module_name: default module name to be used if the name does not contain one
:return: the Python object
:raise ImportError: If the module could not be imported
:raise AttributeError: If the name could not be found
"""
parts = qualified_name.split('.')
if len(parts) == 1:
module_name = default_module_name
else:
module_name = parts[0]
parts = parts[1:]
value = __import__(module_name)
for name in parts:
value = getattr(value, name)
return value
def object_to_qualified_name(value, fail=False, default_module_name='builtins') -> Union[str, None]:
"""
Get the fully qualified name of a Python object.
It is true that ``qualified_name_to_object(object_to_qualified_name(obj)) is obj``.
>>> from unittest import TestCase
>>> object_to_qualified_name(TestCase)
'unittest.case.TestCase'
See also :py:func:`qualified_name_to_object`.
:param value: some Python object
:param fail: raise ``ValueError`` if name cannot be derived.
:param default_module_name: if this is the *value*'s module name, no module name will be returned.
:return: fully qualified name if it can be derived, otherwise ``None`` if *fail* is ``False``.
:raise ValueError: if *fail* is ``True`` and the name cannot be derived.
"""
try:
module_name = value.__module__
except AttributeError:
module_name = None
if module_name == default_module_name:
module_name = None
try:
name = value.__name__
except AttributeError:
name = None
if name:
return module_name + '.' + name if module_name else name
elif fail:
raise ValueError("missing attribute '__name__'")
else:
return str(value)
@contextmanager
def fetch_std_streams():
"""
A context manager which can be used to temporarily fetch the standard output streams
``sys.stdout`` and ``sys.stderr``.
Usage:::
with fetch_std_streams() as stdout, stderr
sys.stdout.write('yes')
sys.stderr.write('oh no')
print('fetched', stdout.getvalue())
print('fetched', stderr.getvalue())
:return: yields ``sys.stdout`` and ``sys.stderr`` redirected into buffers of type ``StringIO``
"""
sys.stdout.flush()
sys.stderr.flush()
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = StringIO()
sys.stderr = StringIO()
try:
yield sys.stdout, sys.stderr
finally:
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def encode_url_path(path_pattern: str, path_args: dict = None, query_args: dict = None) -> str:
"""
Return an URL path with an optional query string which is composed of a *path_pattern* that may contain
placeholders of the form ``{name}`` which will be replaced by URL-encoded versions of the
corresponding values in *path_args*, i.e. ``urllib.parse.quote_plus(path_args[name])``.
An optional query string is composed of the URL-encoded key-value pairs given in *query_args*, i.e.
``urllib.parse.urlencode(query_args)``.
:param path_pattern: The path pattern which may include any number of placeholders of the form ``{name}``
:param path_args: The values for the placeholders in *path_pattern*
:param query_args: The query arguments
:return: an URL-encoded path
"""
path = path_pattern
if path_args:
quoted_pattern_args = dict(path_args)
for name, value in path_args.items():
quoted_pattern_args[name] = urllib.parse.quote_plus(str(value)) if value is not None else ''
path = path_pattern.format(**quoted_pattern_args)
query_string = ''
if query_args:
query_string = '?' + urllib.parse.urlencode(query_args)
return path + query_string
def to_datetime_range(start_datetime_or_str: Union[datetime, date, str, None],
end_datetime_or_str: Union[datetime, date, str, None],
default=None) -> Tuple[datetime, datetime]:
if not start_datetime_or_str and not end_datetime_or_str:
return default
if not end_datetime_or_str:
if not start_datetime_or_str:
raise ValueError('start_datetime_or_str argument must be given')
end_datetime_or_str = start_datetime_or_str
start_datetime = to_datetime(start_datetime_or_str, upper_bound=False)
end_datetime = to_datetime(end_datetime_or_str, upper_bound=True)
return start_datetime, end_datetime
def to_datetime(datetime_or_str: Union[datetime, date, str, None], upper_bound=False, default=None) -> datetime:
if datetime_or_str is None:
return default
elif isinstance(datetime_or_str, str):
if datetime_or_str.strip() == '':
return default
format_to_timedelta = [("%Y-%m-%dT%H:%M:%S", timedelta()),
("%Y-%m-%d %H:%M:%S", timedelta()),
("%Y-%m-%d", timedelta(hours=24, seconds=-1)),
("%Y-%m", timedelta(weeks=4, seconds=-1)),
("%Y", timedelta(days=365, seconds=-1)),
]
for f, td in format_to_timedelta:
try:
dt = datetime.strptime(datetime_or_str, f)
return dt + td if upper_bound else dt
except ValueError:
pass
raise ValueError('Invalid date/time value: "%s"' % datetime_or_str)
elif isinstance(datetime_or_str, datetime):
return datetime_or_str
elif isinstance(datetime_or_str, date):
return datetime(datetime_or_str.year, datetime_or_str.month, datetime_or_str.day, 12)
else:
raise TypeError('datetime_or_str argument must be a string or instance of datetime.date')
def to_list(value,
dtype: type = str,
name: str = None,
nullable: bool = True,
csv: bool = True,
strip: bool = True):
"""
Convert *value* into a list of items of type *dtype*.
:param value: Some value that may be a sequence or a scalar
:param dtype: The desired target type
:param name: An (argument) name used for ``ValueError`` messages
:param nullable: Whether *value* can be None.
:param csv: Whether to split *value* if it is a string containing commas.
:param strip: Whether to strip CSV string values, used only if *csv* is True.
:return: A list with elements of type *dtype* or None if *value* is None and *nullable* is True
"""
if value is None:
if not nullable:
raise ValueError('%s argument must not be None' % (name or 'some'))
return value
if csv and isinstance(value, str):
items = value.split(',')
return [dtype(item.strip() if strip else item) for item in items]
if isinstance(value, dtype):
return [value]
# noinspection PyBroadException
try:
return [dtype(item) for item in value]
except Exception:
return [dtype(value)]
_PYTHON_QUOTE_CHARS = ['"', "'"]
def to_str_constant(s: str, quote="'") -> str:
"""
Convert a given string into another string that is a valid Python representation of a string constant.
:param s: the string
:param quote: the quote character, either a single or double quote
:return:
"""
if s is None:
raise ValueError()
if quote not in _PYTHON_QUOTE_CHARS:
raise ValueError()
return quote + s.replace('\\', '\\\\').replace(quote, "\\%s" % quote) + quote
def is_str_constant(s: str) -> bool:
"""
Test whether a given string is a Python representation of a string constant.
:param s: the string
:return: True, if so.
"""
return s and len(s) >= 2 and s[0] == s[-1] and s[0] in _PYTHON_QUOTE_CHARS
@contextmanager
def cwd(path: str):
"""
A context manager which can be used to temporarily change the current working directory to *path*.
Usage:::
print(os.getcwd())
with cwd('./test'):
print(os.getcwd())
print(os.getcwd())
:return: yields the new working directory (absolute *path* passed in)
"""
if path is None:
raise ValueError('path argument must be given')
old_dir = os.getcwd()
try:
os.chdir(path)
yield os.getcwd()
finally:
os.chdir(old_dir)
_DATETIME64 = np.dtype('datetime64')
_ZERO_THMS_POSTFIX = 'T00:00:00'
_ZERO_MICR_POSTFIX = '.000000000'
def date_to_simple_str(v):
time_str = str(v)
if time_str.endswith(_ZERO_MICR_POSTFIX):
time_str = time_str[0: -len(_ZERO_MICR_POSTFIX)]
if time_str.endswith(_ZERO_THMS_POSTFIX):
time_str = time_str[0: -len(_ZERO_THMS_POSTFIX)]
return time_str
def to_json(v):
if v is None:
return v
t = type(v)
if t in {bool, int, float, str}:
return v
if t == complex:
return [v.real, v.imag]
if isinstance(v, type):
return object_to_qualified_name(v)
# TODO (forman): handle dtype=uint64/int64 here, as JSON does not support 64-bit ints
is_datetime64 = False
try:
is_datetime64 = np.issubdtype(v.dtype, np.datetime64)
except AttributeError:
pass
if is_datetime64:
# Convert time values to time strings
is_scalar = False
try:
is_scalar = v.size == 1 and len(v.shape) == 0
except AttributeError:
pass
if is_scalar:
return date_to_simple_str(v)
else:
li = []
for vi in v:
li.append(date_to_simple_str(vi))
return li
if isinstance(v, np.ndarray) and not np.issubdtype(v.dtype, np.datetime64):
try:
return v.tolist()
except AttributeError:
pass
try:
return v.item()
except (AttributeError, ValueError):
pass
try:
d = OrderedDict()
for ki, vi in v.items():
d[str(ki)] = to_json(vi)
return d
except AttributeError:
pass
try:
li = []
for vi in v:
li.append(to_json(vi))
return li
except TypeError:
pass
return str(v)
def filter_fileset(names: Sequence[str],
includes: Optional[Sequence[str]] = None,
excludes: Optional[Sequence[str]] = None) -> Sequence[str]:
"""
Filter a fileset given by the sequence *names* using the wildcard patterns in *includes* and *excludes*.
:param names: The names of the fileset
:param includes: Wildcard patterns that select the file names to be included,
:param excludes: Wildcard patterns that select the file names to be excluded,
:return: The filtered fileset
"""
if includes is not None:
filtered_names = set()
for pattern in includes:
filtered_names.update(fnmatch.filter(names, pattern))
if excludes is not None:
filtered_names_old = filtered_names
filtered_names = set(filtered_names)
for pattern in excludes:
filtered_names.difference_update(fnmatch.filter(filtered_names_old, pattern))
elif excludes is not None:
filtered_names = set(names)
for pattern in excludes:
filtered_names.difference_update(fnmatch.filter(filtered_names, pattern))
else:
filtered_names = names
return filtered_names
def new_indexed_name(names: Iterable[str], pattern: str) -> str:
"""
Return a new name that is unique in *names* and that conforms to *pattern*. The argument
*pattern* must contain a single ``"{index}"`` substring.
:param names: Sequence of names
:param pattern: Naming pattern, e.g. "var_{index}"
:return: a new name, e.g. "var_3"
"""
if "{index}" not in pattern:
raise ValueError('pattern must contain "{index}"')
re_pattern = re.compile(pattern.replace("{index}", "(\d+)"))
max_index = 0
for name in names:
match_result = re_pattern.match(name)
if match_result and match_result.group(1):
max_index = max(max_index, int(match_result.group(1)))
new_index = max_index + 1
while True:
new_name = pattern.replace("{index}", str(new_index))
if not new_name.isidentifier():
raise ValueError('pattern does not yield a valid name')
if new_name not in names:
return new_name
new_index += 1
NoneType = type(None)
# noinspection PyBroadException
def to_scalar(value: Any, nchars=None, ndigits=None, stringify=False) -> Any:
"""
Convert the given *value* into a JSON-serializable, scalar value.
If the conversion fails, UNDEFINED is returned.
:param value: Any value.
:param nchars: If not None and greater zero, text values will be limited to *nchars* characters.
:param ndigits: If not None, floating point values will be rounded to *ndigits* significant digits.
:param stringify: If True, non-primitive values will be converted to strings.
:return: A JSON-serializable, scalar value or UNDEFINED, if the conversion fails.
"""
is_float = False
is_str = False
if isinstance(value, (int, bool, NoneType)):
return value
elif isinstance(value, float):
is_float = True
elif isinstance(value, str):
is_str = True
elif hasattr(value, 'shape') and hasattr(value, 'dtype'):
try:
shape = value.shape
dtype = value.dtype
ndim = len(shape)
size = 1
for dim in shape:
size *= dim
if size > 1:
return UNDEFINED
if ndim >= 1:
index = 0 if ndim == 1 else (0,) * ndim
try:
value = value[index]
except BaseException:
pass
if np.issubdtype(dtype, np.integer):
return int(value)
elif np.issubdtype(dtype, np.bool_):
return bool(value)
elif np.issubdtype(dtype, np.floating):
value = float(value)
is_float = True
elif np.issubdtype(dtype, np.str_) or stringify:
value = str(value)
is_str = True
else:
return UNDEFINED
except BaseException:
return UNDEFINED
elif stringify:
value = str(value)
is_str = True
else:
return UNDEFINED
if is_float:
if ndigits is not None:
return sround(value, ndigits=ndigits)
return value
if is_str:
if nchars is not None and len(value) > nchars:
return value[0: nchars] + '...'
return value
return UNDEFINED
|
|
"""HTML5 Push Messaging notification service."""
from datetime import datetime, timedelta
from functools import partial
from urllib.parse import urlparse
import json
import logging
import time
import uuid
from aiohttp.hdrs import AUTHORIZATION
import voluptuous as vol
from voluptuous.humanize import humanize_error
from homeassistant.components import websocket_api
from homeassistant.components.frontend import add_manifest_json_key
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import (
HTTP_BAD_REQUEST,
HTTP_INTERNAL_SERVER_ERROR,
HTTP_UNAUTHORIZED,
URL_ROOT,
)
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv
from homeassistant.util import ensure_unique_string
from homeassistant.util.json import load_json, save_json
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_TARGET,
ATTR_TITLE,
ATTR_TITLE_DEFAULT,
DOMAIN,
PLATFORM_SCHEMA,
BaseNotificationService,
)
_LOGGER = logging.getLogger(__name__)
REGISTRATIONS_FILE = "html5_push_registrations.conf"
SERVICE_DISMISS = "html5_dismiss"
ATTR_GCM_SENDER_ID = "gcm_sender_id"
ATTR_GCM_API_KEY = "gcm_api_key"
ATTR_VAPID_PUB_KEY = "vapid_pub_key"
ATTR_VAPID_PRV_KEY = "vapid_prv_key"
ATTR_VAPID_EMAIL = "vapid_email"
def gcm_api_deprecated(value):
"""Warn user that GCM API config is deprecated."""
if value:
_LOGGER.warning(
"Configuring html5_push_notifications via the GCM api"
" has been deprecated and will stop working after April 11,"
" 2019. Use the VAPID configuration instead. For instructions,"
" see https://www.home-assistant.io/components/notify.html5/"
)
return value
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(ATTR_GCM_SENDER_ID): vol.All(cv.string, gcm_api_deprecated),
vol.Optional(ATTR_GCM_API_KEY): cv.string,
vol.Optional(ATTR_VAPID_PUB_KEY): cv.string,
vol.Optional(ATTR_VAPID_PRV_KEY): cv.string,
vol.Optional(ATTR_VAPID_EMAIL): cv.string,
}
)
ATTR_SUBSCRIPTION = "subscription"
ATTR_BROWSER = "browser"
ATTR_NAME = "name"
ATTR_ENDPOINT = "endpoint"
ATTR_KEYS = "keys"
ATTR_AUTH = "auth"
ATTR_P256DH = "p256dh"
ATTR_EXPIRATIONTIME = "expirationTime"
ATTR_TAG = "tag"
ATTR_ACTION = "action"
ATTR_ACTIONS = "actions"
ATTR_TYPE = "type"
ATTR_URL = "url"
ATTR_DISMISS = "dismiss"
ATTR_PRIORITY = "priority"
DEFAULT_PRIORITY = "normal"
ATTR_TTL = "ttl"
DEFAULT_TTL = 86400
ATTR_JWT = "jwt"
WS_TYPE_APPKEY = "notify/html5/appkey"
SCHEMA_WS_APPKEY = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{vol.Required("type"): WS_TYPE_APPKEY}
)
# The number of days after the moment a notification is sent that a JWT
# is valid.
JWT_VALID_DAYS = 7
KEYS_SCHEMA = vol.All(
dict,
vol.Schema(
{vol.Required(ATTR_AUTH): cv.string, vol.Required(ATTR_P256DH): cv.string}
),
)
SUBSCRIPTION_SCHEMA = vol.All(
dict,
vol.Schema(
{
# pylint: disable=no-value-for-parameter
vol.Required(ATTR_ENDPOINT): vol.Url(),
vol.Required(ATTR_KEYS): KEYS_SCHEMA,
vol.Optional(ATTR_EXPIRATIONTIME): vol.Any(None, cv.positive_int),
}
),
)
DISMISS_SERVICE_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_TARGET): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_DATA): dict,
}
)
REGISTER_SCHEMA = vol.Schema(
{
vol.Required(ATTR_SUBSCRIPTION): SUBSCRIPTION_SCHEMA,
vol.Required(ATTR_BROWSER): vol.In(["chrome", "firefox"]),
vol.Optional(ATTR_NAME): cv.string,
}
)
CALLBACK_EVENT_PAYLOAD_SCHEMA = vol.Schema(
{
vol.Required(ATTR_TAG): cv.string,
vol.Required(ATTR_TYPE): vol.In(["received", "clicked", "closed"]),
vol.Required(ATTR_TARGET): cv.string,
vol.Optional(ATTR_ACTION): cv.string,
vol.Optional(ATTR_DATA): dict,
}
)
NOTIFY_CALLBACK_EVENT = "html5_notification"
# Badge and timestamp are Chrome specific (not in official spec)
HTML5_SHOWNOTIFICATION_PARAMETERS = (
"actions",
"badge",
"body",
"dir",
"icon",
"image",
"lang",
"renotify",
"requireInteraction",
"tag",
"timestamp",
"vibrate",
)
def get_service(hass, config, discovery_info=None):
"""Get the HTML5 push notification service."""
json_path = hass.config.path(REGISTRATIONS_FILE)
registrations = _load_config(json_path)
if registrations is None:
return None
vapid_pub_key = config.get(ATTR_VAPID_PUB_KEY)
vapid_prv_key = config.get(ATTR_VAPID_PRV_KEY)
vapid_email = config.get(ATTR_VAPID_EMAIL)
def websocket_appkey(hass, connection, msg):
connection.send_message(websocket_api.result_message(msg["id"], vapid_pub_key))
hass.components.websocket_api.async_register_command(
WS_TYPE_APPKEY, websocket_appkey, SCHEMA_WS_APPKEY
)
hass.http.register_view(HTML5PushRegistrationView(registrations, json_path))
hass.http.register_view(HTML5PushCallbackView(registrations))
gcm_api_key = config.get(ATTR_GCM_API_KEY)
gcm_sender_id = config.get(ATTR_GCM_SENDER_ID)
if gcm_sender_id is not None:
add_manifest_json_key(ATTR_GCM_SENDER_ID, config.get(ATTR_GCM_SENDER_ID))
return HTML5NotificationService(
hass, gcm_api_key, vapid_prv_key, vapid_email, registrations, json_path
)
def _load_config(filename):
"""Load configuration."""
try:
return load_json(filename)
except HomeAssistantError:
pass
return {}
class HTML5PushRegistrationView(HomeAssistantView):
"""Accepts push registrations from a browser."""
url = "/api/notify.html5"
name = "api:notify.html5"
def __init__(self, registrations, json_path):
"""Init HTML5PushRegistrationView."""
self.registrations = registrations
self.json_path = json_path
async def post(self, request):
"""Accept the POST request for push registrations from a browser."""
try:
data = await request.json()
except ValueError:
return self.json_message("Invalid JSON", HTTP_BAD_REQUEST)
try:
data = REGISTER_SCHEMA(data)
except vol.Invalid as ex:
return self.json_message(humanize_error(data, ex), HTTP_BAD_REQUEST)
devname = data.get(ATTR_NAME)
data.pop(ATTR_NAME, None)
name = self.find_registration_name(data, devname)
previous_registration = self.registrations.get(name)
self.registrations[name] = data
try:
hass = request.app["hass"]
await hass.async_add_job(save_json, self.json_path, self.registrations)
return self.json_message("Push notification subscriber registered.")
except HomeAssistantError:
if previous_registration is not None:
self.registrations[name] = previous_registration
else:
self.registrations.pop(name)
return self.json_message(
"Error saving registration.", HTTP_INTERNAL_SERVER_ERROR
)
def find_registration_name(self, data, suggested=None):
"""Find a registration name matching data or generate a unique one."""
endpoint = data.get(ATTR_SUBSCRIPTION).get(ATTR_ENDPOINT)
for key, registration in self.registrations.items():
subscription = registration.get(ATTR_SUBSCRIPTION)
if subscription.get(ATTR_ENDPOINT) == endpoint:
return key
return ensure_unique_string(suggested or "unnamed device", self.registrations)
async def delete(self, request):
"""Delete a registration."""
try:
data = await request.json()
except ValueError:
return self.json_message("Invalid JSON", HTTP_BAD_REQUEST)
subscription = data.get(ATTR_SUBSCRIPTION)
found = None
for key, registration in self.registrations.items():
if registration.get(ATTR_SUBSCRIPTION) == subscription:
found = key
break
if not found:
# If not found, unregistering was already done. Return 200
return self.json_message("Registration not found.")
reg = self.registrations.pop(found)
try:
hass = request.app["hass"]
await hass.async_add_job(save_json, self.json_path, self.registrations)
except HomeAssistantError:
self.registrations[found] = reg
return self.json_message(
"Error saving registration.", HTTP_INTERNAL_SERVER_ERROR
)
return self.json_message("Push notification subscriber unregistered.")
class HTML5PushCallbackView(HomeAssistantView):
"""Accepts push registrations from a browser."""
requires_auth = False
url = "/api/notify.html5/callback"
name = "api:notify.html5/callback"
def __init__(self, registrations):
"""Init HTML5PushCallbackView."""
self.registrations = registrations
def decode_jwt(self, token):
"""Find the registration that signed this JWT and return it."""
import jwt
# 1. Check claims w/o verifying to see if a target is in there.
# 2. If target in claims, attempt to verify against the given name.
# 2a. If decode is successful, return the payload.
# 2b. If decode is unsuccessful, return a 401.
target_check = jwt.decode(token, verify=False)
if target_check.get(ATTR_TARGET) in self.registrations:
possible_target = self.registrations[target_check[ATTR_TARGET]]
key = possible_target[ATTR_SUBSCRIPTION][ATTR_KEYS][ATTR_AUTH]
try:
return jwt.decode(token, key, algorithms=["ES256", "HS256"])
except jwt.exceptions.DecodeError:
pass
return self.json_message(
"No target found in JWT", status_code=HTTP_UNAUTHORIZED
)
# The following is based on code from Auth0
# https://auth0.com/docs/quickstart/backend/python
def check_authorization_header(self, request):
"""Check the authorization header."""
import jwt
auth = request.headers.get(AUTHORIZATION, None)
if not auth:
return self.json_message(
"Authorization header is expected", status_code=HTTP_UNAUTHORIZED
)
parts = auth.split()
if parts[0].lower() != "bearer":
return self.json_message(
"Authorization header must " "start with Bearer",
status_code=HTTP_UNAUTHORIZED,
)
if len(parts) != 2:
return self.json_message(
"Authorization header must " "be Bearer token",
status_code=HTTP_UNAUTHORIZED,
)
token = parts[1]
try:
payload = self.decode_jwt(token)
except jwt.exceptions.InvalidTokenError:
return self.json_message("token is invalid", status_code=HTTP_UNAUTHORIZED)
return payload
async def post(self, request):
"""Accept the POST request for push registrations event callback."""
auth_check = self.check_authorization_header(request)
if not isinstance(auth_check, dict):
return auth_check
try:
data = await request.json()
except ValueError:
return self.json_message("Invalid JSON", HTTP_BAD_REQUEST)
event_payload = {
ATTR_TAG: data.get(ATTR_TAG),
ATTR_TYPE: data[ATTR_TYPE],
ATTR_TARGET: auth_check[ATTR_TARGET],
}
if data.get(ATTR_ACTION) is not None:
event_payload[ATTR_ACTION] = data.get(ATTR_ACTION)
if data.get(ATTR_DATA) is not None:
event_payload[ATTR_DATA] = data.get(ATTR_DATA)
try:
event_payload = CALLBACK_EVENT_PAYLOAD_SCHEMA(event_payload)
except vol.Invalid as ex:
_LOGGER.warning(
"Callback event payload is not valid: %s",
humanize_error(event_payload, ex),
)
event_name = "{}.{}".format(NOTIFY_CALLBACK_EVENT, event_payload[ATTR_TYPE])
request.app["hass"].bus.fire(event_name, event_payload)
return self.json({"status": "ok", "event": event_payload[ATTR_TYPE]})
class HTML5NotificationService(BaseNotificationService):
"""Implement the notification service for HTML5."""
def __init__(self, hass, gcm_key, vapid_prv, vapid_email, registrations, json_path):
"""Initialize the service."""
self._gcm_key = gcm_key
self._vapid_prv = vapid_prv
self._vapid_email = vapid_email
self.registrations = registrations
self.registrations_json_path = json_path
async def async_dismiss_message(service):
"""Handle dismissing notification message service calls."""
kwargs = {}
if self.targets is not None:
kwargs[ATTR_TARGET] = self.targets
elif service.data.get(ATTR_TARGET) is not None:
kwargs[ATTR_TARGET] = service.data.get(ATTR_TARGET)
kwargs[ATTR_DATA] = service.data.get(ATTR_DATA)
await self.async_dismiss(**kwargs)
hass.services.async_register(
DOMAIN,
SERVICE_DISMISS,
async_dismiss_message,
schema=DISMISS_SERVICE_SCHEMA,
)
@property
def targets(self):
"""Return a dictionary of registered targets."""
targets = {}
for registration in self.registrations:
targets[registration] = registration
return targets
def dismiss(self, **kwargs):
"""Dismisses a notification."""
data = kwargs.get(ATTR_DATA)
tag = data.get(ATTR_TAG) if data else ""
payload = {ATTR_TAG: tag, ATTR_DISMISS: True, ATTR_DATA: {}}
self._push_message(payload, **kwargs)
async def async_dismiss(self, **kwargs):
"""Dismisses a notification.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(partial(self.dismiss, **kwargs))
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
tag = str(uuid.uuid4())
payload = {
"badge": "/static/images/notification-badge.png",
"body": message,
ATTR_DATA: {},
"icon": "/static/icons/favicon-192x192.png",
ATTR_TAG: tag,
ATTR_TITLE: kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT),
}
data = kwargs.get(ATTR_DATA)
if data:
# Pick out fields that should go into the notification directly vs
# into the notification data dictionary.
data_tmp = {}
for key, val in data.items():
if key in HTML5_SHOWNOTIFICATION_PARAMETERS:
payload[key] = val
else:
data_tmp[key] = val
payload[ATTR_DATA] = data_tmp
if (
payload[ATTR_DATA].get(ATTR_URL) is None
and payload.get(ATTR_ACTIONS) is None
):
payload[ATTR_DATA][ATTR_URL] = URL_ROOT
self._push_message(payload, **kwargs)
def _push_message(self, payload, **kwargs):
"""Send the message."""
from pywebpush import WebPusher
timestamp = int(time.time())
ttl = int(kwargs.get(ATTR_TTL, DEFAULT_TTL))
priority = kwargs.get(ATTR_PRIORITY, DEFAULT_PRIORITY)
if priority not in ["normal", "high"]:
priority = DEFAULT_PRIORITY
payload["timestamp"] = timestamp * 1000 # Javascript ms since epoch
targets = kwargs.get(ATTR_TARGET)
if not targets:
targets = self.registrations.keys()
for target in list(targets):
info = self.registrations.get(target)
try:
info = REGISTER_SCHEMA(info)
except vol.Invalid:
_LOGGER.error(
"%s is not a valid HTML5 push notification" " target", target
)
continue
payload[ATTR_DATA][ATTR_JWT] = add_jwt(
timestamp,
target,
payload[ATTR_TAG],
info[ATTR_SUBSCRIPTION][ATTR_KEYS][ATTR_AUTH],
)
webpusher = WebPusher(info[ATTR_SUBSCRIPTION])
if self._vapid_prv and self._vapid_email:
vapid_headers = create_vapid_headers(
self._vapid_email, info[ATTR_SUBSCRIPTION], self._vapid_prv
)
vapid_headers.update({"urgency": priority, "priority": priority})
response = webpusher.send(
data=json.dumps(payload), headers=vapid_headers, ttl=ttl
)
else:
# Only pass the gcm key if we're actually using GCM
# If we don't, notifications break on FireFox
gcm_key = (
self._gcm_key
if "googleapis.com" in info[ATTR_SUBSCRIPTION][ATTR_ENDPOINT]
else None
)
response = webpusher.send(json.dumps(payload), gcm_key=gcm_key, ttl=ttl)
if response.status_code == 410:
_LOGGER.info("Notification channel has expired")
reg = self.registrations.pop(target)
if not save_json(self.registrations_json_path, self.registrations):
self.registrations[target] = reg
_LOGGER.error("Error saving registration")
else:
_LOGGER.info("Configuration saved")
def add_jwt(timestamp, target, tag, jwt_secret):
"""Create JWT json to put into payload."""
import jwt
jwt_exp = datetime.fromtimestamp(timestamp) + timedelta(days=JWT_VALID_DAYS)
jwt_claims = {
"exp": jwt_exp,
"nbf": timestamp,
"iat": timestamp,
ATTR_TARGET: target,
ATTR_TAG: tag,
}
return jwt.encode(jwt_claims, jwt_secret).decode("utf-8")
def create_vapid_headers(vapid_email, subscription_info, vapid_private_key):
"""Create encrypted headers to send to WebPusher."""
from py_vapid import Vapid
if vapid_email and vapid_private_key and ATTR_ENDPOINT in subscription_info:
url = urlparse(subscription_info.get(ATTR_ENDPOINT))
vapid_claims = {
"sub": "mailto:{}".format(vapid_email),
"aud": "{}://{}".format(url.scheme, url.netloc),
}
vapid = Vapid.from_string(private_key=vapid_private_key)
return vapid.sign(vapid_claims)
return None
|
|
#!/usr/bin/env python
# Source : Werkov, Github, PyQt4 examples
# URL : https://github.com/Werkov/PyQt4/tree/master/examples/dbus/chat
# This is only needed for Python v2 but is harmless for Python v3.
import sip
sip.setapi('QString', 2)
from PyQt4 import QtCore, QtGui, QtDBus
import time
import tensorflow as tf
import os
import sys
sys.path.append(os.path.join('.', 'GUI'))
sys.path.append(os.path.join('.', 'Model_ForChat'))
sys.path.append(os.path.join('..', 'Model'))
from ui_chatmainwindow import Ui_ChatMainWindow
from ui_chatsetnickname import Ui_NicknameDialog
import model_forChatBox
from model_forChatBox import seq2seq_chat
import utils
class ChatAdaptor(QtDBus.QDBusAbstractAdaptor):
QtCore.Q_CLASSINFO("D-Bus Interface", 'com.trolltech.chat')
QtCore.Q_CLASSINFO("D-Bus Introspection", ''
' <interface name="com.trolltech.chat">\n'
' <signal name="message">\n'
' <arg direction="out" type="s" name="nickname"/>\n'
' <arg direction="out" type="s" name="text"/>\n'
' </signal>\n'
' <signal name="action">\n'
' <arg direction="out" type="s" name="nickname"/>\n'
' <arg direction="out" type="s" name="text"/>\n'
' </signal>\n'
' </interface>\n'
'')
action = QtCore.pyqtSignal(str, str)
message = QtCore.pyqtSignal(str, str)
def __init__(self, parent):
super(ChatAdaptor, self).__init__(parent)
self.setAutoRelaySignals(True)
class ChatInterface(QtDBus.QDBusAbstractInterface):
action = QtCore.pyqtSignal(str, str)
message = QtCore.pyqtSignal(str, str)
def __init__(self, service, path, connection, parent=None):
super(ChatInterface, self).__init__(service, path,
'com.trolltech.chat', connection, parent)
class ChatMainWindow(QtGui.QMainWindow, Ui_ChatMainWindow):
action = QtCore.pyqtSignal(str, str)
message = QtCore.pyqtSignal(str, str)
def __init__(self):
super(ChatMainWindow, self).__init__()
self.m_nickname = "nickname"
self.m_messages = []
self.setupUi(self)
self.sendButton.setEnabled(False)
# Connects
self.messageLineEdit.textChanged.connect(self.textChangedSlot)
self.sendButton.clicked.connect(self.sendClickedSlot)
self.actionChangeNickname.triggered.connect(self.changeNickname)
self.actionAboutQt.triggered.connect(self.aboutQt)
QtGui.qApp.lastWindowClosed.connect(self.exiting)
# Add our D-Bus interface and connect to D-Bus.
ChatAdaptor(self)
QtDBus.QDBusConnection.sessionBus().registerObject('/', self)
iface = ChatInterface('', '', QtDBus.QDBusConnection.sessionBus(),
self)
QtDBus.QDBusConnection.sessionBus().connect('', '',
'com.trolltech.chat', 'message', self.messageSlot)
iface.action.connect(self.actionSlot)
dialog = NicknameDialog()
dialog.cancelButton.setVisible(False)
dialog.exec_()
self.m_nickname = dialog.nickname.text().strip()
self.action.emit(self.m_nickname, "joins the chat")
#--------------------------------
# CHAT-BOT INITIALIZATION
#--------------------------------
# Instanciates and build the model for feedforward only
self.seq2seq_bot = seq2seq_chat(buckets=[(25, 25)], forward_only=True)
self.seq2seq_bot.build()
# Restore the trained model's parameters from checkpoint file
self.sess = tf.Session()
if model_forChatBox.MODE == 'WORDS2WORDS': directory = 'model_word2word'
elif model_forChatBox.MODE == 'CHARS2CHARS': directory = 'model_char2char'
saver, summary_writer = utils.restore(self.seq2seq_bot,self.sess,save_name=os.path.join('..', 'Model', directory))
def rebuildHistory(self):
history = '\n'.join(self.m_messages)
self.chatHistory.setPlainText(history)
@QtCore.pyqtSlot(str, str)
def messageSlot(self, nickname, text):
# User's message
self.m_messages.append("[%s] - %s" % (nickname.upper(), text))
if len(self.m_messages) > 100:
self.m_messages.pop(0)
self.rebuildHistory()
QtGui.QApplication.processEvents()
# Bot's message
"""
Computes the reply
"""
theReply = self.seq2seq_bot.reply(text, self.sess)
self.m_messages.append("[%s] - %s" % ("BOT", theReply))
self.m_messages.append("")
if len(self.m_messages) > 100:
self.m_messages.pop(0)
self.rebuildHistory()
@QtCore.pyqtSlot(str, str)
def actionSlot(self, nickname, text):
self.m_messages.append("--------\n* %s %s\n--------" % (nickname, text))
if len(self.m_messages) > 100:
self.m_messages.pop(0)
self.rebuildHistory()
@QtCore.pyqtSlot(str)
def textChangedSlot(self, newText):
self.sendButton.setEnabled(newText != '')
@QtCore.pyqtSlot()
def sendClickedSlot(self):
msg = QtDBus.QDBusMessage.createSignal('/', 'com.trolltech.chat',
'message')
msg << self.m_nickname << self.messageLineEdit.text()
QtDBus.QDBusConnection.sessionBus().send(msg)
self.messageLineEdit.setText('')
@QtCore.pyqtSlot()
def changeNickname(self):
dialog = NicknameDialog(self)
if dialog.exec_() == QtGui.QDialog.Accepted:
old = self.m_nickname
self.m_nickname = dialog.nickname.text().strip()
self.action.emit(old, "is now known as %s" % self.m_nickname)
@QtCore.pyqtSlot()
def aboutQt(self):
QtGui.QMessageBox.aboutQt(self)
@QtCore.pyqtSlot()
def exiting(self):
self.action.emit(self.m_nickname, "leaves the chat")
class NicknameDialog(QtGui.QDialog, Ui_NicknameDialog):
def __init__(self, parent=None):
super(NicknameDialog, self).__init__(parent)
self.setupUi(self)
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
if not QtDBus.QDBusConnection.sessionBus().isConnected():
sys.stderr.write("Cannot connect to the D-Bus session bus.\n"
"Please check your system settings and try again.\n")
sys.exit(1)
chat = ChatMainWindow()
chat.show()
sys.exit(app.exec_())
|
|
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for dictionary manipulation."""
import collections
import copy
def IsNestedField(d, field_list):
"""Examine a dict of dicts, to see if the nested field name exists."""
for f in field_list:
try:
d = d[f]
except (KeyError, ValueError):
return False
return True
def GetByPath(d, field_list):
"""Returns the value of a nested field of a dict of dicts."""
for i, f in enumerate(field_list):
try:
d = d[f]
except (KeyError, ValueError):
# Re-raise with a more sensible error message.
raise KeyError('Can\'t find field %s.' % '.'.join(field_list[:i+1]))
return d
def SetByPath(d, field_list, value):
"""Sets the value of a nested field of a dict of dicts."""
GetByPath(d, field_list[:-1])[field_list[-1]] = value
def OrderDict(d):
"""Recursively construct an OrderedDict with alphabetically sorted keys.
Recursion will occur on dictionary values that are themselves dict or
list objects.
Args:
d: A dictionary.
Returns:
A new data structure where the encountered dict objects are replaced
by collections.OrderedDict structures with keys sorted by sorted().
"""
if isinstance(d, dict):
return collections.OrderedDict([
(k, OrderDict(v)) for k, v in sorted(d.items())
])
elif isinstance(d, list):
return [OrderDict(v) for v in d]
else:
return d
class DictMergeError(Exception):
pass
def MergeNestedDicts(dict_a, dict_b):
"""Merge two nested dictionaries.
No actual leaves can be common to the two dictionaries.
Args:
dict_a: First dictionary to merge.
dict_b: Second dictionary to merge.
Returns:
A new nested dict combining the inputs.
Raises:
DictMergeError: If there is a conflict in combining the dictionaries.
"""
if not isinstance(dict_a, dict) or not isinstance(dict_b, dict):
raise DictMergeError('Both arguments must be dictionaries.')
merged = {}
all_keys = set(dict_a.keys()) | set(dict_b.keys())
for key in all_keys:
if key in dict_a and key in dict_b:
if (isinstance(dict_a[key], dict)
and isinstance(dict_b[key], dict)):
merged[key] = MergeNestedDicts(dict_a[key], dict_b[key])
else:
raise DictMergeError('Inputs cannot both set the same value %s.' % key)
elif key in dict_a:
merged[key] = copy.deepcopy(dict_a[key])
else:
merged[key] = copy.deepcopy(dict_b[key])
return merged
def UpdateNestedDict(d, u):
"""Returns a new updated nested dictionary."""
d = copy.deepcopy(d)
for k, v in u.iteritems():
if isinstance(v, dict):
d[k] = UpdateNestedDict(d.get(k, {}), v)
else:
d[k] = v
return d
class UnreadKeysError(Exception):
pass
class MustConsumeAllDictEntries(dict):
"""Context manager requiring that all keys in a dict be accessed.
Args:
d: Dictionary to guard.
Returns:
A new dictionary that records which fields are accessed.
Raises:
UnreadKeysError: if, upon leaving the protected context, there
exist keys in the underlying dictionary, that have not been read.
"""
def __init__(self, d):
super(MustConsumeAllDictEntries, self).__init__(d)
self._access_log = set()
def __getitem__(self, key):
# Try __getitem__ before setting access log, in case key is not found.
result = dict.__getitem__(self, key)
self._access_log.add(key)
return result
def GetUnreadKeys(self):
return set(self.keys()) - self._access_log
def __enter__(self):
return self
def __exit__(self, *args):
if args[0]:
# An exception occurred.
return False
if self.GetUnreadKeys():
raise UnreadKeysError(self.GetUnreadKeys())
def GetAllDictPaths(tree_dict):
"""Obtain list of paths to all leaves in dictionary.
The last item in each list entry is the value at the leaf. For items in
dictionary that are a list of dictionaries, each list entry is indexed by a
string repesenting its position in the list.
Implementation inspired by https://stackoverflow.com/a/40555856.
Args:
tree_dict: Input dictionary.
Returns:
List of lists with all paths to leaf items in dictionary.
Example:
>> a = {'a':[1 , 2, 3],
'b':[{'c': 10},
{'d': 20}]
}
>> print get_all_dict_paths(a)
[['a', [1, 2, 3]],
['b', '0', 'c', 10],
['b', '1', 'd', 20]]
"""
if isinstance(tree_dict, list):
if isinstance(tree_dict[0], dict):
return [[str(i)] + path
for i, value in enumerate(tree_dict)
for path in GetAllDictPaths(value)]
else:
return [[tree_dict]]
elif not isinstance(tree_dict, dict):
return [[tree_dict]]
return [[key] + path
for key, value in tree_dict.items()
for path in GetAllDictPaths(value)]
|
|
#!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test label RPCs.
RPCs tested are:
- getaccountaddress
- getaddressesbyaccount/getaddressesbylabel
- listaddressgroupings
- setlabel
- sendfrom (with account arguments)
- move (with account arguments)
Run the test twice - once using the accounts API and once using the labels API.
The accounts API test can be removed in V0.18.
"""
from collections import defaultdict
from test_framework.test_framework import VergeTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
class WalletLabelsTest(VergeTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [['-deprecatedrpc=accounts'], []]
def setup_network(self):
"""Don't connect nodes."""
self.setup_nodes()
def run_test(self):
"""Run the test twice - once using the accounts API and once using the labels API."""
self.log.info("Test accounts API")
self._run_subtest(True, self.nodes[0])
self.log.info("Test labels API")
self._run_subtest(False, self.nodes[1])
def _run_subtest(self, accounts_api, node):
# Check that there's no UTXO on any of the nodes
assert_equal(len(node.listunspent()), 0)
# Note each time we call generate, all generated coins go into
# the same address, so we call twice to get two addresses w/50 each
node.generate(1)
node.generate(101)
assert_equal(node.getbalance(), 100)
# there should be 2 address groups
# each with 1 address with a balance of 50 Verges
address_groups = node.listaddressgroupings()
assert_equal(len(address_groups), 2)
# the addresses aren't linked now, but will be after we send to the
# common address
linked_addresses = set()
for address_group in address_groups:
assert_equal(len(address_group), 1)
assert_equal(len(address_group[0]), 2)
assert_equal(address_group[0][1], 50)
linked_addresses.add(address_group[0][0])
# send 50 from each address to a third address not in this wallet
# There's some fee that will come back to us when the miner reward
# matures.
common_address = "msf4WtN1YQKXvNtvdFYt9JBnUD2FB41kjr"
txid = node.sendmany(
fromaccount="",
amounts={common_address: 100},
subtractfeefrom=[common_address],
minconf=1,
)
tx_details = node.gettransaction(txid)
fee = -tx_details['details'][0]['fee']
# there should be 1 address group, with the previously
# unlinked addresses now linked (they both have 0 balance)
address_groups = node.listaddressgroupings()
assert_equal(len(address_groups), 1)
assert_equal(len(address_groups[0]), 2)
assert_equal(set([a[0] for a in address_groups[0]]), linked_addresses)
assert_equal([a[1] for a in address_groups[0]], [0, 0])
node.generate(1)
# we want to reset so that the "" label has what's expected.
# otherwise we're off by exactly the fee amount as that's mined
# and matures in the next 100 blocks
node.sendfrom("", common_address, fee)
amount_to_send = 1.0
# Create labels and make sure subsequent label API calls
# recognize the label/address associations.
labels = [Label(name, accounts_api) for name in ("a", "b", "c", "d", "e")]
for label in labels:
if accounts_api:
address = node.getaccountaddress(label.name)
else:
address = node.getnewaddress(label.name)
label.add_receive_address(address)
label.verify(node)
# Check all labels are returned by listlabels.
assert_equal(node.listlabels(), [label.name for label in labels])
# Send a transaction to each label, and make sure this forces
# getaccountaddress to generate a new receiving address.
for label in labels:
if accounts_api:
node.sendtoaddress(label.receive_address, amount_to_send)
label.add_receive_address(node.getaccountaddress(label.name))
else:
node.sendtoaddress(label.addresses[0], amount_to_send)
label.verify(node)
# Check the amounts received.
node.generate(1)
for label in labels:
assert_equal(
node.getreceivedbyaddress(label.addresses[0]), amount_to_send)
assert_equal(node.getreceivedbylabel(label.name), amount_to_send)
# Check that sendfrom label reduces listaccounts balances.
for i, label in enumerate(labels):
to_label = labels[(i + 1) % len(labels)]
if accounts_api:
node.sendfrom(label.name, to_label.receive_address, amount_to_send)
else:
node.sendfrom(label.name, to_label.addresses[0], amount_to_send)
node.generate(1)
for label in labels:
if accounts_api:
address = node.getaccountaddress(label.name)
else:
address = node.getnewaddress(label.name)
label.add_receive_address(address)
label.verify(node)
assert_equal(node.getreceivedbylabel(label.name), 2)
if accounts_api:
node.move(label.name, "", node.getbalance(label.name))
label.verify(node)
node.generate(101)
expected_account_balances = {"": 5200}
for label in labels:
expected_account_balances[label.name] = 0
if accounts_api:
assert_equal(node.listaccounts(), expected_account_balances)
assert_equal(node.getbalance(""), 5200)
# Check that setlabel can assign a label to a new unused address.
for label in labels:
address = node.getnewaddress()
node.setlabel(address, label.name)
label.add_address(address)
label.verify(node)
if accounts_api:
assert address not in node.getaddressesbyaccount("")
else:
assert_raises_rpc_error(-11, "No addresses with label", node.getaddressesbylabel, "")
# Check that addmultisigaddress can assign labels.
for label in labels:
addresses = []
for x in range(10):
addresses.append(node.getnewaddress())
multisig_address = node.addmultisigaddress(5, addresses, label.name)['address']
label.add_address(multisig_address)
label.purpose[multisig_address] = "send"
label.verify(node)
node.sendfrom("", multisig_address, 50)
node.generate(101)
if accounts_api:
for label in labels:
assert_equal(node.getbalance(label.name), 50)
# Check that setlabel can change the label of an address from a
# different label.
change_label(node, labels[0].addresses[0], labels[0], labels[1], accounts_api)
# Check that setlabel can set the label of an address already
# in the label. This is a no-op.
change_label(node, labels[2].addresses[0], labels[2], labels[2], accounts_api)
if accounts_api:
# Check that setaccount can change the label of an address which
# is the receiving address of a different label.
change_label(node, labels[0].receive_address, labels[0], labels[1], accounts_api)
# Check that setaccount can set the label of an address which is
# already the receiving address of the label. This is a no-op.
change_label(node, labels[2].receive_address, labels[2], labels[2], accounts_api)
class Label:
def __init__(self, name, accounts_api):
# Label name
self.name = name
self.accounts_api = accounts_api
# Current receiving address associated with this label.
self.receive_address = None
# List of all addresses assigned with this label
self.addresses = []
# Map of address to address purpose
self.purpose = defaultdict(lambda: "receive")
def add_address(self, address):
assert_equal(address not in self.addresses, True)
self.addresses.append(address)
def add_receive_address(self, address):
self.add_address(address)
if self.accounts_api:
self.receive_address = address
def verify(self, node):
if self.receive_address is not None:
assert self.receive_address in self.addresses
if self.accounts_api:
assert_equal(node.getaccountaddress(self.name), self.receive_address)
for address in self.addresses:
assert_equal(
node.getaddressinfo(address)['labels'][0],
{"name": self.name,
"purpose": self.purpose[address]})
if self.accounts_api:
assert_equal(node.getaccount(address), self.name)
else:
assert_equal(node.getaddressinfo(address)['label'], self.name)
assert_equal(
node.getaddressesbylabel(self.name),
{address: {"purpose": self.purpose[address]} for address in self.addresses})
if self.accounts_api:
assert_equal(set(node.getaddressesbyaccount(self.name)), set(self.addresses))
def change_label(node, address, old_label, new_label, accounts_api):
assert_equal(address in old_label.addresses, True)
if accounts_api:
node.setaccount(address, new_label.name)
else:
node.setlabel(address, new_label.name)
old_label.addresses.remove(address)
new_label.add_address(address)
# Calling setaccount on an address which was previously the receiving
# address of a different account should reset the receiving address of
# the old account, causing getaccountaddress to return a brand new
# address.
if accounts_api:
if old_label.name != new_label.name and address == old_label.receive_address:
new_address = node.getaccountaddress(old_label.name)
assert_equal(new_address not in old_label.addresses, True)
assert_equal(new_address not in new_label.addresses, True)
old_label.add_receive_address(new_address)
old_label.verify(node)
new_label.verify(node)
if __name__ == '__main__':
WalletLabelsTest().main()
|
|
#!/usr/bin/env python
# coding: utf-8
# Copyright 2013 Abram Hindle
# Copyright 2016 Boyan Peychoff
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Do not use urllib's HTTP GET and POST mechanisms.
# Write your own HTTP GET and POST
# The point is to understand what you have to send and get experience with it
import sys
import socket
import re
# you may use urllib to encode data appropriately
import urllib
def help():
print "httpclient.py [URL] [GET/POST] \n"
class HTTPResponse(object):
def __init__(self, code=200, body=""):
self.code = code
self.body = body
def __str__(self):
return "code: " + str(self.code) + "\n body :" + self.body
class HTTPClient(object):
def parseURL(self,url):
stripped = re.sub('(http://|https://)?',"",url)
path = ""
host = ""
port = 80
maybePort=""
lookForPathFlag = False
for char in stripped:
if char != '/' and lookForPathFlag == False:
host+=char
else:
lookForPathFlag = True
if(lookForPathFlag):
path+=char
try:
host,maybePort = host.split(':')
except:
print "port not specified using default: 80"
if path == '':
path = '/'
if maybePort != "":
port = int(maybePort)
return [path,host,int(port)]
def createRequestHeader(self,path,host,command="GET"):
header = command + " " + path + " HTTP/1.1 \r\n"
header+= "Accept: */* \r\n"
header+= "Host: " + host + "\r\n"
header+= "Connection: close \r\n"
return header
def parseResponse(self,response):
code = ""
body = ""
responseParts = response.split('\n')
bodyFlag = False
body = []
for element in responseParts:
if element == "":
bodyFlag = True
continue # skip \n
if bodyFlag:
body.append(element)
joinedBody = "".join(body)
code = responseParts[0].split(" ")[1]
return [code,joinedBody]
def connect(self, host, port):
clientSocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
clientSocket.connect((host,port))
return clientSocket
def get_code(self, data):
code = '500'
response = data.split('\n')
try:
code = response[0].split(" ")[1]
except IndexError:
'no body returned'
return code
def get_headers(self,data):
response = data.split("\r\n")
headers=[]
for element in response:
if element in ['','\r']:
break
else:
headers.append(element)
return headers
def get_body(self, data):
response = data.split('\n')
bodyFlag = False
body = []
for element in response:
if element in ["",'\r']:
bodyFlag = True
continue # skip \n
if bodyFlag:
body.append(element)
joinedBody = "".join(body)
return joinedBody
def recvall(self, sock):
buffer = bytearray()
done = False
while not done:
part = sock.recv(1024)
if (part):
buffer.extend(part)
#print str(buffer)
else:
done = not part
return str(buffer)
def GET(self, url, args=None):
path,host,port = self.parseURL(url)
if args != None:
if "?" in path:
encodedQuery = urllib.urlencode(args)
path+= ("&" +encodedQuery)
else:
query = "?" + urllib.urlencode(args)
path += query
headerToSend = self.createRequestHeader(path,host,"GET")
clientSocket = self.connect(host,port)
clientSocket.sendall(headerToSend + "\r\n\r\n")
httpResponse = self.recvall(clientSocket)
code = self.get_code(httpResponse)
headers= self.get_headers(httpResponse)
body = self.get_body(httpResponse)
return HTTPResponse(int(code), body)
def POST(self, url, args=None):
contentLength = 0
postBody = None
if (args != None):
postBody = urllib.urlencode(args)
contentLength = len(postBody)
path,host,port = self.parseURL(url)
headerToSend = self.createRequestHeader(path,host,"POST")
headerToSend+=('Content-Length: '+ str(contentLength) + '\r\n')
headerToSend+=('Content-Type: application/x-www-form-urlencoded\r\n')
clientSocket = self.connect(host,port)
if (postBody != None):
clientSocket.sendall(headerToSend + "\r\n" + postBody)
else:
clientSocket.sendall(headerToSend + "\r\n\r\n")
httpResponse = self.recvall(clientSocket)
code = self.get_code(httpResponse)
headers= self.get_headers(httpResponse)
body = self.get_body(httpResponse)
return HTTPResponse(int(code), body)
def command(self,url ,command="GET" ,args=None):
if (command == "POST"):
return self.POST( url, args )
else:
return self.GET( url, args )
if __name__ == "__main__":
client = HTTPClient()
command = "GET"
if (len(sys.argv) <= 1):
help()
sys.exit(1)
elif (len(sys.argv) == 3):
print client.command( sys.argv[1], sys.argv[2] )
else:
print client.command( command, sys.argv[1] )
|
|
#!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Author: Alta Fang (altafang @caltech and alta @princeton)
# Copyright (c) 2008-2015 California Institute of Technology.
# License: 3-clause BSD. The full license text is available at:
# - http://trac.mystic.cacr.caltech.edu/project/mystic/browser/mystic/LICENSE
# The following code attempts to construct something like:
# >>> from sympy import Eq, Symbol
# >>> from sympy import solve as symsol
# >>> x0 = Symbol('x0')
# >>> x1 = Symbol('x1')
# >>> x2 = Symbol('x2')
# >>> eq1 = Eq(x1, x0 - 2.)
# >>> eq2 = Eq(x1, x2*2.)
# >>> soln = symsol([eq2, eq1], [x0, x1, x2])
from mystic.tools import permutations
from mystic.tools import list_or_tuple_or_ndarray
def _classify_variables(constraints, variables='x', nvars=None):
"""Takes a string of constraint equations and determines which variables
are dependent, independent, and unconstrained. Assumes there are no duplicate
equations. Returns a dictionary with keys: 'dependent', 'independent', and
'unconstrained', and with values that enumerate the variables that match
each variable type.
Inputs:
constraints -- a string of symbolic constraints, with one constraint
equation per line. Constraints must be equality constraints only.
Standard python syntax should be followed (with the math and numpy
modules already imported).
For example:
>>> constraints = '''
... x0 = x4**2
... x2 = x3 + x4'''
>>> _classify_variables(constraints, nvars=5)
{'dependent':['x0','x2'], 'independent':['x3','x4'], 'unconstrained':['x1']}
>>> constraints = '''
... x0 = x4**2
... x4 - x3 = 0.
... x4 - x0 = x2'''
>>> _classify_variables(constraints, nvars=5)
{'dependent': ['x0','x2','x4'], 'independent': ['x3'], 'unconstrained': ['x1']}
Additional Inputs:
nvars -- number of variables. Includes variables not explicitly
given by the constraint equations (e.g. 'x1' in the example above).
variables -- desired variable name. Default is 'x'. A list of variable
name strings is also accepted for when desired variable names
don't have the same base, and can include variables that are not
found in the constraints equation string.
"""
if ">" in constraints or "<" in constraints:
raise NotImplementedError, "cannot classify inequalities"
from mystic.symbolic import replace_variables, get_variables
#XXX: use solve? or first if not in form xi = ... ?
if list_or_tuple_or_ndarray(variables):
if nvars is not None: variables = variables[:nvars]
constraints = replace_variables(constraints, variables)
varname = '$'
ndim = len(variables)
else:
varname = variables # varname used below instead of variables
myvar = get_variables(constraints, variables)
if myvar: ndim = max([int(v.strip(varname)) for v in myvar]) + 1
else: ndim = 0
if nvars is not None: ndim = nvars
eqns = constraints.splitlines()
indices = range(ndim)
dep = []
indep = []
for eqn in eqns: # find which variables are used
if eqn:
for var in range(ndim):
if indices.count(var) != 0:
if eqn.find(varname + str(var)) != -1:
indep.append(var)
indices.remove(var)
indep.sort()
_dep = []
for eqn in eqns: # find which variables are on the LHS
if eqn:
split = eqn.split('=')
for var in indep:
if split[0].find(varname + str(var)) != -1:
_dep.append(var)
indep.remove(var)
break
_dep.sort()
indep = _dep + indep # prefer variables found on LHS
for eqn in eqns: # find one dependent variable per equation
_dep = []
_indep = indep[:]
if eqn:
for var in _indep:
if eqn.find(varname + str(var)) != -1:
_dep.append(var)
_indep.remove(var)
if _dep:
dep.append(_dep[0])
indep.remove(_dep[0])
#FIXME: 'equivalent' equations not ignored (e.g. x2=x2; or x2=1, 2*x2=2)
"""These are good:
>>> constraints = '''
... x0 = x4**2
... x2 - x4 - x3 = 0.'''
>>> _classify_variables(constraints, nvars=5)
{'dependent': ['x0','x2'], 'independent': ['x3','x4'], 'unconstrained': ['x1']}
>>> constraints = '''
... x0 + x2 = 0.
... x0 + 2*x2 = 0.'''
>>> _classify_variables(constraints, nvars=5)
{'dependent': ['x0','x2'], 'independent': [], 'unconstrained': ['x1','x3','x4']}
This is a bug:
>>> constraints = '''
... x0 + x2 = 0.
... 2*x0 + 2*x2 = 0.'''
>>> _classify_variables(constraints, nvars=5)
{'dependent': ['x0','x2'], 'independent': [], 'unconstrained': ['x1','x3','x4']}
""" #XXX: should simplify first?
dep.sort()
indep.sort()
# return the actual variable names (not the indicies)
if varname == variables: # then was single variable
variables = [varname+str(i) for i in range(ndim)]
dep = [variables[i] for i in dep]
indep = [variables[i] for i in indep]
indices = [variables[i] for i in indices]
d = {'dependent':dep, 'independent':indep, 'unconstrained':indices}
return d
def _prepare_sympy(constraints, variables='x', nvars=None):
"""Parse an equation string and prepare input for sympy. Returns a tuple
of sympy-specific input: (code for variable declaration, left side of equation
string, right side of equation string, list of variables, and the number of
sympy equations).
Inputs:
constraints -- a string of symbolic constraints, with one constraint
equation per line. Constraints must be equality constraints only.
Standard python syntax should be followed (with the math and numpy
modules already imported).
For example:
>>> constraints = '''
... x0 = x4**2
... x4 - x3 = 0.
... x4 - x0 = x2'''
>>> code, lhs, rhs, vars, neqn = _prepare_sympy(constraints, nvars=5)
>>> print code
x0=Symbol('x0')
x1=Symbol('x1')
x2=Symbol('x2')
x3=Symbol('x3')
x4=Symbol('x4')
rand = Symbol('rand')
>>> print lhs, rhs
['x0 ', 'x4 - x3 ', 'x4 - x0 '] [' x4**2', ' 0.', ' x2']
print "%s in %s eqns" % (vars, neqn)
x0,x1,x2,x3,x4, in 3 eqns
Additional Inputs:
nvars -- number of variables. Includes variables not explicitly
given by the constraint equations (e.g. 'x1' in the example above).
variables -- desired variable name. Default is 'x'. A list of variable
name strings is also accepted for when desired variable names
don't have the same base, and can include variables that are not
found in the constraints equation string.
"""
if ">" in constraints or "<" in constraints:
raise NotImplementedError, "cannot simplify inequalities"
from mystic.symbolic import replace_variables, get_variables
#XXX: if constraints contain x0,x1,x3 for 'x', should x2 be in code,xlist?
if list_or_tuple_or_ndarray(variables):
if nvars is not None: variables = variables[:nvars]
constraints = replace_variables(constraints, variables, markers='_')
varname = '_'
ndim = len(variables)
else:
varname = variables # varname used below instead of variables
myvar = get_variables(constraints, variables)
if myvar: ndim = max([int(v.strip(varname)) for v in myvar]) + 1
else: ndim = 0
if nvars is not None: ndim = nvars
# split constraints_str into lists of left hand sides and right hand sides
eacheqn = constraints.splitlines()
neqns = 0
left = []
right = []
for eq in eacheqn: #XXX: Le/Ge instead of Eq; Max/Min... (NotImplemented ?)
splitlist = eq.replace('==','=').split('=') #FIXME: no inequalities
if len(splitlist) == 2: #FIXME: first convert >/< to min/max ?
# If equation is blank on one side, raise error.
if len(splitlist[0].strip()) == 0 or len(splitlist[1].strip()) == 0:
print eq, "is not an equation!" # Raise exception?
else:
left.append(splitlist[0])
right.append(splitlist[1])
neqns += 1
# If equation doesn't have one equal sign, raise error.
if len(splitlist) != 2 and len(splitlist) != 1:
print eq, "is not an equation!" # Raise exception?
# First create list of x variables
xlist = ""
for i in range(ndim):
xn = varname + str(i)
xlist += xn + ","
# Start constructing the code string
code = ""
for i in range(ndim):
xn = varname + str(i)
code += xn + '=' + "Symbol('" + xn + "')\n"
code += "rand = Symbol('rand')\n"
return code, left, right, xlist, neqns
def _solve_single(constraint, variables='x', target=None, **kwds):
"""Solve a symbolic constraints equation for a single variable.
Inputs:
constraint -- a string of symbolic constraints. Only a single constraint
equation should be provided, and must be an equality constraint.
Standard python syntax should be followed (with the math and numpy
modules already imported).
For example:
>>> equation = "x1 - 3. = x0*x2"
>>> print _solve_single(equation)
x0 = -(3.0 - x1)/x2
Additional Inputs:
variables -- desired variable name. Default is 'x'. A list of variable
name strings is also accepted for when desired variable names
don't have the same base, and can include variables that are not
found in the constraints equation string.
target -- list providing the order for which the variables will be solved.
By default, increasing order is used.
For example:
>>> equation = "x1 - 3. = x0*x2"
>>> print _solve_single(equation, target='x1')
x1 = 3.0 + x0*x2
Further Inputs:
locals -- a dictionary of additional variables used in the symbolic
constraints equations, and their desired values.
""" #XXX: an very similar version of this code is found in _solve_linear XXX#
# for now, we abort on multi-line equations or inequalities
if len(constraint.replace('==','=').split('=')) != 2:
raise NotImplementedError, "requires a single valid equation"
if ">" in constraint or "<" in constraint:
raise NotImplementedError, "cannot simplify inequalities"
nvars = None
permute = False # if True, return all permutations
warn = True # if True, don't supress warnings
verbose = False # if True, print debug info
#-----------------------undocumented-------------------------------
permute = kwds['permute'] if 'permute' in kwds else permute
warn = kwds['warn'] if 'warn' in kwds else warn
verbose = kwds['verbose'] if 'verbose' in kwds else verbose
#------------------------------------------------------------------
if target in [None, False]:
target = []
elif isinstance(target, str):
target = target.split(',')
else:
target = list(target) # not the best for ndarray, but should work
from mystic.symbolic import replace_variables, get_variables
if list_or_tuple_or_ndarray(variables):
if nvars is not None: variables = variables[:nvars]
constraints = replace_variables(constraint, variables, markers='_')
varname = '_'
ndim = len(variables)
for i in range(len(target)):
if variables.count(target[i]):
target[i] = replace_variables(target[i],variables,markers='_')
else:
constraints = constraint # constraints used below
varname = variables # varname used below instead of variables
myvar = get_variables(constraint, variables)
if myvar: ndim = max([int(v.strip(varname)) for v in myvar]) + 1
else: ndim = 0
if nvars is not None: ndim = nvars
# create function to replace "_" with original variables
def restore(variables, mystring):
if list_or_tuple_or_ndarray(variables):
vars = get_variables(mystring,'_')
indices = [int(v.strip('_')) for v in vars]
for i in range(len(vars)):
mystring = mystring.replace(vars[i],variables[indices[i]])
return mystring
# default is _locals with sympy imported
_locals = {}
locals = kwds['locals'] if 'locals' in kwds else None
if locals is None: locals = {}
try:
code = """from sympy import Eq, Symbol;"""
code += """from sympy import solve as symsol;"""
code = compile(code, '<string>', 'exec')
exec code in _locals
except ImportError: # Equation will not be simplified."
if warn: print "Warning: sympy not installed."
return constraint
# default is _locals with numpy and math imported
# numpy throws an 'AttributeError', but math passes error to sympy
code = """from numpy import *; from math import *;""" # prefer math
code += """from numpy import mean as average;""" # use np.mean not average
code += """from numpy import var as variance;""" # look like mystic.math
code += """from numpy import ptp as spread;""" # look like mystic.math
code = compile(code, '<string>', 'exec')
exec code in _locals
_locals.update(locals) #XXX: allow this?
code,left,right,xlist,neqns = _prepare_sympy(constraints, varname, ndim)
eqlist = ""
for i in range(1, neqns+1):
eqn = 'eq' + str(i)
eqlist += eqn + ","
code += eqn + '= Eq(' + left[i-1] + ',' + right[i-1] + ')\n'
eqlist = eqlist.rstrip(',')
# get full list of variables in 'targeted' order
xperms = xlist.split(',')[:-1]
targeted = target[:]
[targeted.remove(i) for i in targeted if i not in xperms]
[targeted.append(i) for i in xperms if i not in targeted]
_target = []
[_target.append(i) for i in targeted if i not in _target]
targeted = _target
targeted = tuple(targeted)
########################################################################
# solve each xi: symsol(single_equation, [x0,x1,...,xi,...,xn])
# returns: {x0: f(xn,...), x1: f(xn,...), ..., xn: f(...,x0)}
if permute or not target: #XXX: the goal is solving *only one* equation
code += '_xlist = %s\n' % ','.join(targeted)
code += '_elist = [symsol(['+eqlist+'], [i]) for i in _xlist]\n'
code += '_elist = [i if isinstance(i, dict) else {j:i[-1][-1]} for j,i in zip(_xlist,_elist)]\n'
code += 'soln = {}\n'
code += '[soln.update(i) for i in _elist if i]\n'
else:
code += 'soln = symsol([' + eqlist + '], [' + target[0] + '])\n'
#code += 'soln = symsol([' + eqlist + '], [' + targeted[0] + '])\n'
code += 'soln = soln if isinstance(soln, dict) else {' + target[0] + ': soln[-1][-1]}\n'
########################################################################
if verbose: print code
code = compile(code, '<string>', 'exec')
try:
exec code in globals(), _locals
soln = _locals['soln']
if not soln:
if warn: print "Warning: target variable is not valid"
soln = {}
except NotImplementedError: # catch 'multivariate' error for older sympy
if warn: print "Warning: could not simplify equation."
return constraint #FIXME: resolve diff with _solve_linear
except NameError, error: # catch when variable is not defined
if warn: print "Warning:", error
soln = {}
if verbose: print soln
#XXX handles multiple solutions?
soln = dict([(str(key),str(value)) for key, value in soln.iteritems()])
soln = [(i,soln[i]) for i in targeted if i in soln] #XXX: order as targeted?
solns = []; solved = ""
for key,value in soln:
solved = str(key) + ' = ' + str(value) + '\n'
if solved: solns.append( restore(variables, solved.rstrip()) )
if not permute:
return None if not solns[:1] else solns[0]
return tuple(solns)
def _solve_linear(constraints, variables='x', target=None, **kwds):
"""Solve a system of symbolic linear constraints equations.
Inputs:
constraints -- a string of symbolic constraints, with one constraint
equation per line. Constraints must be equality constraints only.
Standard python syntax should be followed (with the math and numpy
modules already imported).
For example:
>>> constraints = '''
... x0 - x2 = 2.
... x2 = x3*2.'''
>>> print _solve_linear(constraints)
x2 = 2.0*x3
x0 = 2.0 + 2.0*x3
Additional Inputs:
variables -- desired variable name. Default is 'x'. A list of variable
name strings is also accepted for when desired variable names
don't have the same base, and can include variables that are not
found in the constraints equation string.
target -- list providing the order for which the variables will be solved.
If there are "N" constraint equations, the first "N" variables given
will be selected as the dependent variables. By default, increasing
order is used.
For example:
>>> constraints = '''
... x0 - x2 = 2.
... x2 = x3*2.'''
>>> print _solve_linear(constraints, target=['x3','x2'])
x3 = -1.0 + 0.5*x0
x2 = -2.0 + x0
Further Inputs:
locals -- a dictionary of additional variables used in the symbolic
constraints equations, and their desired values.
"""
nvars = None
permute = False # if True, return all permutations
warn = True # if True, don't supress warnings
verbose = False # if True, print debug info
#-----------------------undocumented-------------------------------
permute = kwds['permute'] if 'permute' in kwds else permute
warn = kwds['warn'] if 'warn' in kwds else warn
verbose = kwds['verbose'] if 'verbose' in kwds else verbose
#------------------------------------------------------------------
if target in [None, False]:
target = []
elif isinstance(target, str):
target = target.split(',')
else:
target = list(target) # not the best for ndarray, but should work
from mystic.symbolic import replace_variables, get_variables
if list_or_tuple_or_ndarray(variables):
if nvars is not None: variables = variables[:nvars]
_constraints = replace_variables(constraints, variables, '_')
varname = '_'
ndim = len(variables)
for i in range(len(target)):
if variables.count(target[i]):
target[i] = replace_variables(target[i],variables,markers='_')
else:
_constraints = constraints
varname = variables # varname used below instead of variables
myvar = get_variables(constraints, variables)
if myvar: ndim = max([int(v.strip(varname)) for v in myvar]) + 1
else: ndim = 0
if nvars is not None: ndim = nvars
# create function to replace "_" with original variables
def restore(variables, mystring):
if list_or_tuple_or_ndarray(variables):
vars = get_variables(mystring,'_')
indices = [int(v.strip('_')) for v in vars]
for i in range(len(vars)):
mystring = mystring.replace(vars[i],variables[indices[i]])
return mystring
# default is _locals with sympy imported
_locals = {}
locals = kwds['locals'] if 'locals' in kwds else None
if locals is None: locals = {}
# if sympy not installed, return original constraints
try:
code = """from sympy import Eq, Symbol;"""
code += """from sympy import solve as symsol;"""
code = compile(code, '<string>', 'exec')
exec code in _locals
except ImportError: # Equation will not be simplified."
if warn: print "Warning: sympy not installed."
return constraints
# default is _locals with numpy and math imported
# numpy throws an 'AttributeError', but math passes error to sympy
code = """from numpy import *; from math import *;""" # prefer math
code += """from numpy import mean as average;""" # use np.mean not average
code += """from numpy import var as variance;""" # look like mystic.math
code += """from numpy import ptp as spread;""" # look like mystic.math
code = compile(code, '<string>', 'exec')
exec code in _locals
_locals.update(locals) #XXX: allow this?
code,left,right,xlist,neqns = _prepare_sympy(_constraints, varname, ndim)
eqlist = ""
for i in range(1, neqns+1):
eqn = 'eq' + str(i)
eqlist += eqn + ","
code += eqn + '= Eq(' + left[i-1] + ',' + right[i-1] + ')\n'
eqlist = eqlist.rstrip(',')
# get full list of variables in 'targeted' order
xperms = xlist.split(',')[:-1]
targeted = target[:]
[targeted.remove(i) for i in targeted if i not in xperms]
[targeted.append(i) for i in xperms if i not in targeted]
_target = []
[_target.append(i) for i in targeted if i not in _target]
targeted = _target
targeted = tuple(targeted)
if permute:
# Form constraints equations for each permutation.
# This will change the order of the x variables passed to symsol()
# to get different variables solved for.
xperms = list(permutations(xperms)) #XXX: takes a while if nvars is ~10
if target: # put the tuple with the 'targeted' order first
xperms.remove(targeted)
xperms.insert(0, targeted)
else:
xperms = [tuple(targeted)]
solns = []
for perm in xperms:
_code = code
xlist = ','.join(perm).rstrip(',') #XXX: if not all, use target ?
# solve dependent xi: symsol([linear_system], [x0,x1,...,xi,...,xn])
# returns: {x0: f(xn,...), x1: f(xn,...), ...}
_code += 'soln = symsol([' + eqlist + '], [' + xlist + '])'
#XXX: need to convert/check soln similarly as in _solve_single ?
if verbose: print _code
_code = compile(_code, '<string>', 'exec')
try:
exec _code in globals(), _locals
soln = _locals['soln']
if not soln:
if warn: print "Warning: could not simplify equation."
soln = {}
except NotImplementedError: # catch 'multivariate' error
if warn: print "Warning: could not simplify equation."
soln = {}
except NameError, error: # catch when variable is not defined
if warn: print "Warning:", error
soln = {}
if verbose: print soln
solved = ""
for key, value in soln.iteritems():
solved += str(key) + ' = ' + str(value) + '\n'
if solved: solns.append( restore(variables, solved.rstrip()) )
if not permute:
return None if not solns[:1] else solns[0]
# Remove duplicates
filter = []; results = []
for i in solns:
_eqs = '\n'.join(sorted(i.split('\n')))
if _eqs not in filter:
filter.append(_eqs)
results.append(i)
return tuple(results)
# Create strings of all permutations of the solved equations.
# Remove duplicates, then take permutations of the lines of equations
# to create equations in different orders.
# noduplicates = []
# [noduplicates.append(i) for i in solns if i not in noduplicates]
# stringperms = []
# for item in noduplicates:
# spl = item.splitlines()
# for perm in permutations(spl):
# permstring = ""
# for line in perm:
# permstring += line + '\n'
# stringperms.append(permstring.rstrip())
# return tuple(stringperms)
def solve(constraints, variables='x', target=None, **kwds):
"""Solve a system of symbolic constraints equations.
Inputs:
constraints -- a string of symbolic constraints, with one constraint
equation per line. Constraints must be equality constraints only.
Standard python syntax should be followed (with the math and numpy
modules already imported).
For example:
>>> constraints = '''
... x0 - x2 = 2.
... x2 = x3*2.'''
>>> print solve(constraints)
x2 = 2.0*x3
x0 = 2.0 + 2.0*x3
>>> constraints = '''
... spread([x0,x1]) - 1.0 = mean([x0,x1])
... mean([x0,x1,x2]) = x2'''
>>> print solve(constraints)
x0 = -0.5 + 0.5*x2
x1 = 0.5 + 1.5*x2
Additional Inputs:
variables -- desired variable name. Default is 'x'. A list of variable
name strings is also accepted for when desired variable names
don't have the same base, and can include variables that are not
found in the constraints equation string.
target -- list providing the order for which the variables will be solved.
If there are "N" constraint equations, the first "N" variables given
will be selected as the dependent variables. By default, increasing
order is used.
For example:
>>> constraints = '''
... x0 - x2 = 2.
... x2 = x3*2.'''
>>> print solve(constraints, target=['x3','x2'])
x3 = -1.0 + 0.5*x0
x2 = -2.0 + x0
Further Inputs:
locals -- a dictionary of additional variables used in the symbolic
constraints equations, and their desired values.
"""
#-----------------------undocumented-------------------------------
#kwds['permute'] = False # if True, return all permutations
kwds['warn'] = False # if True, don't supress warnings
kwds['verbose'] = False # if True, print debug info
#------------------------------------------------------------------
try:
if len(constraints.replace('==','=').split('=')) <= 2:
soln = _solve_single(constraints, variables=variables, \
target=target, **kwds)
else:
soln = _solve_linear(constraints, variables=variables, \
target=target, **kwds)
if not soln: raise ValueError
except:
soln = _solve_nonlinear(constraints, variables=variables, \
target=target, **kwds)
return soln
def _solve_nonlinear(constraints, variables='x', target=None, **kwds):
"""Build a constraints function given a string of nonlinear constraints.
Returns a constraints function.
Inputs:
constraints -- a string of symbolic constraints, with one constraint
equation per line. Constraints must be equality constraints only.
Standard python syntax should be followed (with the math and numpy
modules already imported).
For example:
>>> constraints = '''x1 = x3*3. + x0*x2'''
>>> print _solve_nonlinear(constraints)
x0 = (x1 - 3.0*x3)/x2
>>> constraints = '''
... spread([x0,x1]) - 1.0 = mean([x0,x1])
... mean([x0,x1,x2]) = x2'''
>>> print _solve_nonlinear(constraints)
x0 = -0.5 + 0.5*x2
x1 = 0.5 + 1.5*x2
Additional Inputs:
variables -- desired variable name. Default is 'x'. A list of variable
name strings is also accepted for when desired variable names
don't have the same base, and can include variables that are not
found in the constraints equation string.
target -- list providing the order for which the variables will be solved.
If there are "N" constraint equations, the first "N" variables given
will be selected as the dependent variables. By default, increasing
order is used.
For example:
>>> constraints = '''
... spread([x0,x1]) - 1.0 = mean([x0,x1])
... mean([x0,x1,x2]) = x2'''
>>> print _solve_nonlinear(constraints, target=['x1'])
x1 = -0.833333333333333 + 0.166666666666667*x2
x0 = -0.5 + 0.5*x2
Further Inputs:
locals -- a dictionary of additional variables used in the symbolic
constraints equations, and their desired values.
"""
nvars = None
permute = False # if True, return all permutations
warn = True # if True, don't supress warnings
verbose = False # if True, print details from _classify_variables
#-----------------------undocumented-------------------------------
permute = kwds['permute'] if 'permute' in kwds else permute
warn = kwds['warn'] if 'warn' in kwds else warn
verbose = kwds['verbose'] if 'verbose' in kwds else verbose
#------------------------------------------------------------------
if target in [None, False]:
target = []
elif isinstance(target, str):
target = target.split(',')
else:
target = list(target) # not the best for ndarray, but should work
from mystic.symbolic import replace_variables, get_variables
if list_or_tuple_or_ndarray(variables):
if nvars is not None: variables = variables[:nvars]
constraints = replace_variables(constraints, variables, '_')
varname = '_'
ndim = len(variables)
else:
varname = variables # varname used below instead of variables
myvar = get_variables(constraints, variables)
if myvar: ndim = max([int(v.strip(varname)) for v in myvar]) + 1
else: ndim = 0
if nvars is not None: ndim = nvars
# create function to replace "_" with original variables
def restore(variables, mystring):
if list_or_tuple_or_ndarray(variables):
vars = get_variables(mystring,'_')
indices = [int(v.strip('_')) for v in vars]
for i in range(len(vars)):
mystring = mystring.replace(vars[i],variables[indices[i]])
return mystring
locals = kwds['locals'] if 'locals' in kwds else None
if locals is None: locals = {}
eqns = constraints.splitlines()
# Remove empty strings:
actual_eqns = []
for j in range(len(eqns)):
if eqns[j].strip():
actual_eqns.append(eqns[j].strip())
orig_eqns = actual_eqns[:]
neqns = len(actual_eqns)
xperms = [varname+str(i) for i in range(ndim)]
if target:
[target.remove(i) for i in target if i not in xperms]
[target.append(i) for i in xperms if i not in target]
_target = []
[_target.append(i) for i in target if i not in _target]
target = _target
target = tuple(target)
xperms = list(permutations(xperms)) #XXX: takes a while if nvars is ~10
if target: # Try the suggested order first.
xperms.remove(target)
xperms.insert(0, target)
complete_list = []
constraints_function_list = []
# Some of the permutations will give the same answer;
# look into reducing the number of repeats?
for perm in xperms:
# Sort the list actual_eqns so any equation containing x0 is first, etc.
sorted_eqns = []
actual_eqns_copy = orig_eqns[:]
usedvars = []
for variable in perm: # range(ndim):
for eqn in actual_eqns_copy:
if eqn.find(variable) != -1:
sorted_eqns.append(eqn)
actual_eqns_copy.remove(eqn)
usedvars.append(variable)
break
if actual_eqns_copy: # Append the remaining equations
for item in actual_eqns_copy:
sorted_eqns.append(item)
actual_eqns = sorted_eqns
# Append the remaining variables to usedvars
tempusedvar = usedvars[:]
tempusedvar.sort()
nmissing = ndim - len(tempusedvar)
for m in range(nmissing):
usedvars.append(varname + str(len(tempusedvar) + m))
for i in range(neqns):
# Trying to use xi as a pivot. Loop through the equations
# looking for one containing xi.
_target = usedvars[i]
for eqn in actual_eqns[i:]:
invertedstring = _solve_single(eqn, variables=varname, target=_target, warn=warn)
if invertedstring:
warn = False
break
# substitute into the remaining equations. the equations' order
# in the list newsystem is like in a linear coefficient matrix.
newsystem = ['']*neqns
j = actual_eqns.index(eqn)
newsystem[j] = eqn
othereqns = actual_eqns[:j] + actual_eqns[j+1:]
for othereqn in othereqns:
expression = invertedstring.split("=")[1]
fixed = othereqn.replace(_target, '(' + expression + ')')
k = actual_eqns.index(othereqn)
newsystem[k] = fixed
actual_eqns = newsystem
# Invert so that it can be fed properly to generate_constraint
simplified = []
for eqn in actual_eqns:
_target = usedvars[actual_eqns.index(eqn)]
mysoln = _solve_single(eqn, variables=varname, target=_target, warn=warn)
if mysoln: simplified.append(mysoln)
simplified = restore(variables, '\n'.join(simplified).rstrip())
if permute:
complete_list.append(simplified)
continue
if verbose:
print _classify_variables(simplified, variables, ndim)
return simplified
warning='Warning: an error occurred in building the constraints.'
if warn: print warning
if verbose:
print _classify_variables(simplified, variables, ndim)
if permute: #FIXME: target='x3,x1' may order correct, while 'x1,x3' doesn't
filter = []; results = []
for i in complete_list:
_eqs = '\n'.join(sorted(i.split('\n')))
if _eqs and (_eqs not in filter):
filter.append(_eqs)
results.append(i)
return tuple(results) #FIXME: somehow 'rhs = xi' can be in results
return simplified
# EOF
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 30 09:17:36 2018
@author: barnhark
"""
import numpy as np
import pytest
from numpy.testing import assert_array_equal # , assert_array_almost_equal
from landlab import RasterModelGrid
from landlab.bmi import wrap_as_bmi
from landlab.components import LithoLayers, Lithology
def test_lithology_as_bmi():
"""Test Lithology can be wrapped with a BMI."""
wrap_as_bmi(Lithology)
def test_bad_layer_method():
"""Test passing a bad name for the layer method."""
mg = RasterModelGrid((3, 3))
mg.add_zeros("topographic__elevation", at="node")
thicknesses = [1, 2, 4, 1]
ids = [1, 2, 1, 2]
attrs = {"K_sp": {1: 0.001, 2: 0.0001}}
with pytest.raises(ValueError):
Lithology(mg, thicknesses, ids, attrs, layer_type="spam")
def test_no_topographic__elevation():
"""Test init with no topo__elevation."""
mg = RasterModelGrid((3, 3))
thicknesses = [1, 2, 4, 1]
ids = [1, 2, 1, 2]
attrs = {"K_sp": {1: 0.001, 2: 0.0001}}
with pytest.raises(ValueError):
Lithology(mg, thicknesses, ids, attrs)
def test_thickness_ids_wrong_shape():
"""Test wrong size thickness and id shapes."""
# first with thicknesses and IDs both as ndim = 1 arrays
mg = RasterModelGrid((3, 3))
mg.add_zeros("topographic__elevation", at="node")
thicknesses = [1, 2, 4, 1, 5]
ids = [1, 2, 1, 2]
attrs = {"K_sp": {1: 0.001, 2: 0.0001}}
with pytest.raises(ValueError):
Lithology(mg, thicknesses, ids, attrs)
# next as both as ndim = 2 arrays
ones = np.ones(mg.number_of_nodes)
mg = RasterModelGrid((3, 3))
mg.add_zeros("topographic__elevation", at="node")
thicknesses = [1 * ones, 2 * ones, 4 * ones, 1 * ones, 5 * ones]
ids = [1 * ones, 2 * ones, 1 * ones, 2 * ones]
attrs = {"K_sp": {1: 0.001, 2: 0.0001}}
with pytest.raises(ValueError):
Lithology(mg, thicknesses, ids, attrs)
# now with thickness as ndim 2 and id as ndim 1
ones = np.ones(mg.number_of_nodes)
mg = RasterModelGrid((3, 3))
thicknesses = [1 * ones, 2 * ones, 4 * ones, 1 * ones, 5 * ones]
ids = [1, 2, 1, 2]
attrs = {"K_sp": {1: 0.001, 2: 0.0001}}
with pytest.raises(ValueError):
Lithology(mg, thicknesses, ids, attrs)
def test_thickness_ndim3():
"""Test too many ndim for thickness."""
# next as both as ndim = 3 arrays
attrs = {"K_sp": {1: 0.001, 2: 0.0001}}
mg = RasterModelGrid((3, 3))
ones = np.ones((mg.number_of_nodes, 2))
mg.add_zeros("topographic__elevation", at="node")
thicknesses = [1 * ones, 2 * ones, 4 * ones, 1 * ones, 5 * ones]
ids = [1, 2, 1, 2]
with pytest.raises(ValueError):
Lithology(mg, thicknesses, ids, attrs)
def test_id_ndim3():
"""Test too many ndim for ids."""
# next as both as ndim = 3 arrays
attrs = {"K_sp": {1: 0.001, 2: 0.0001}}
mg = RasterModelGrid((3, 3))
ones = np.ones(mg.number_of_nodes)
extra_ones = np.ones((mg.number_of_nodes, 2))
mg.add_zeros("topographic__elevation", at="node")
thicknesses = [1 * ones, 2 * ones, 4 * ones, 1 * ones, 5 * ones]
ids = [1 * extra_ones, 2 * extra_ones, 1 * extra_ones, 2 * extra_ones]
with pytest.raises(ValueError):
Lithology(mg, thicknesses, ids, attrs)
def test_thickness_nodes_wrong_shape():
"""Test wrong size thickness and id shapes."""
mg = RasterModelGrid((3, 3))
mg.add_zeros("topographic__elevation", at="node")
ones = np.ones(mg.number_of_nodes + 1)
thicknesses = [1 * ones, 2 * ones, 4 * ones, 1 * ones, 5 * ones]
ids = [1 * ones, 2 * ones, 1 * ones, 2 * ones, 1 * ones]
attrs = {"K_sp": {1: 0.001, 2: 0.0001}}
with pytest.raises(ValueError):
Lithology(mg, thicknesses, ids, attrs)
def test_atts_lack_ids():
"""Test Lithology missing ID."""
mg = RasterModelGrid((3, 3))
mg.add_zeros("topographic__elevation", at="node")
thicknesses = [1, 2, 4, 1, 5]
ids = [1, 2, 1, 2, 1]
attrs = {"K_sp": {2: 0.0001}, "age": {1: 100, 2: 300}}
with pytest.raises(ValueError):
Lithology(mg, thicknesses, ids, attrs)
def test_erode_to_zero_thickness():
"""Test that eroding Lithology to zero thickness raises an error."""
mg = RasterModelGrid((3, 3))
mg.add_zeros("topographic__elevation", at="node")
thicknesses = [1, 2, 4, 1, 5]
ids = [1, 2, 1, 2, 1]
attrs = {"K_sp": {1: 0.001, 2: 0.0001}}
lith = Lithology(mg, thicknesses, ids, attrs)
with pytest.raises(ValueError):
lith.add_layer(-100)
def test_deposit_with_no_rock_id():
"""Test that adding a deposit to Lithology with no id raises an error."""
mg = RasterModelGrid((3, 3))
mg.add_zeros("topographic__elevation", at="node")
thicknesses = [1, 2, 4, 1, 5]
ids = [1, 2, 1, 2, 1]
attrs = {"K_sp": {1: 0.001, 2: 0.0001}}
lith = Lithology(mg, thicknesses, ids, attrs)
with pytest.raises(ValueError):
lith.add_layer(100)
def test_deposit_with_bad_rock_id():
"""Test that adding a deposit to Lithology with no id raises an error."""
mg = RasterModelGrid((3, 3))
mg.add_zeros("topographic__elevation", at="node")
thicknesses = [1, 2, 4, 1, 5]
ids = [1, 2, 1, 2, 1]
attrs = {"K_sp": {1: 0.001, 2: 0.0001}}
lith = Lithology(mg, thicknesses, ids, attrs)
with pytest.raises(ValueError):
lith.add_layer(100, rock_id=3)
ones = np.ones(mg.number_of_nodes)
new_ids = [0, 1, 3, 4, 0, 1, 0, 1, 5]
with pytest.raises(ValueError):
lith.add_layer(ones, rock_id=new_ids)
def test_adding_existing_attribute():
"""Test adding an existing attribute."""
mg = RasterModelGrid((3, 3))
mg.add_zeros("topographic__elevation", at="node")
thicknesses = [1, 2, 4, 1, 5]
ids = [1, 2, 1, 2, 1]
attrs = {"K_sp": {1: 0.001, 2: 0.0001}}
lith = Lithology(mg, thicknesses, ids, attrs)
new_attr = {"K_sp": {1: 0.001, 2: 0.0001}}
with pytest.raises(ValueError):
lith.add_property(new_attr)
def test_adding_new_attribute_missing_rock_id():
"""Test adding an new attribute missing an existing rock id."""
mg = RasterModelGrid((3, 3))
mg.add_zeros("topographic__elevation", at="node")
thicknesses = [1, 2, 4, 1, 5]
ids = [1, 2, 1, 2, 1]
attrs = {"K_sp": {1: 0.001, 2: 0.0001}}
lith = Lithology(mg, thicknesses, ids, attrs)
new_attr = {"D": {2: 0.0001}}
with pytest.raises(ValueError):
lith.add_property(new_attr)
def test_adding_new_attribute_extra_rock_id():
"""Test adding an new attribute with an extra rock id."""
mg = RasterModelGrid((3, 3))
mg.add_zeros("topographic__elevation", at="node")
thicknesses = [1, 2, 4, 1, 5]
ids = [1, 2, 1, 2, 1]
attrs = {"K_sp": {1: 0.001, 2: 0.0001}}
lith = Lithology(mg, thicknesses, ids, attrs)
new_attr = {"D": {1: 0.001, 2: 0.0001, 3: 5.3}}
with pytest.raises(ValueError):
lith.add_property(new_attr)
def test_adding_new_id_existing_rock_type():
"""Test adding an rock type that already exists."""
mg = RasterModelGrid((3, 3))
mg.add_zeros("topographic__elevation", at="node")
thicknesses = [1, 2, 4, 1, 5]
ids = [1, 2, 1, 2, 1]
attrs = {"K_sp": {1: 0.001, 2: 0.0001}}
lith = Lithology(mg, thicknesses, ids, attrs)
new_attr = {"K_sp": {1: 0.001, 5: 0.0001}}
with pytest.raises(ValueError):
lith.add_rock_type(new_attr)
def test_adding_new_id_extra_attribute():
"""Test adding an new rock type with an extra attribute."""
mg = RasterModelGrid((3, 3))
mg.add_zeros("topographic__elevation", at="node")
thicknesses = [1, 2, 4, 1, 5]
ids = [1, 2, 1, 2, 1]
attrs = {"K_sp": {1: 0.001, 2: 0.0001}}
lith = Lithology(mg, thicknesses, ids, attrs)
new_attr = {"K_sp": {4: 0.001, 5: 0.0001}, "D": {4: 0.001, 5: 0.0001}}
with pytest.raises(ValueError):
lith.add_rock_type(new_attr)
def test_adding_new_id_missing_attribute():
"""Test adding an new rock type with an extra attribute."""
mg = RasterModelGrid((3, 3))
mg.add_zeros("topographic__elevation", at="node")
thicknesses = [1, 2, 4, 1, 5]
ids = [1, 2, 1, 2, 1]
attrs = {"K_sp": {1: 0.001, 2: 0.0001}}
lith = Lithology(mg, thicknesses, ids, attrs)
new_attr = {"D": {4: 0.001, 5: 0.0001}}
with pytest.raises(ValueError):
lith.add_rock_type(new_attr)
def test_updating_attribute_that_doesnt_exist():
"""Test updating an attribute that doesn't exist."""
mg = RasterModelGrid((3, 3))
mg.add_zeros("topographic__elevation", at="node")
thicknesses = [1, 2, 4, 1, 5]
ids = [1, 2, 1, 2, 1]
attrs = {"K_sp": {1: 0.001, 2: 0.0001}}
lith = Lithology(mg, thicknesses, ids, attrs)
with pytest.raises(ValueError):
lith.update_rock_properties("spam", 1, 4)
def test_updating_rock_type_that_doesnt_exist():
"""Test adding an new rock type with an extra attribute."""
mg = RasterModelGrid((3, 3))
mg.add_zeros("topographic__elevation", at="node")
thicknesses = [1, 2, 4, 1, 5]
ids = [1, 2, 1, 2, 1]
attrs = {"K_sp": {1: 0.001, 2: 0.0001}}
lith = Lithology(mg, thicknesses, ids, attrs)
with pytest.raises(ValueError):
lith.update_rock_properties("K_sp", 3, 4)
def test_run_one_step_deposit_no_id_raises_error():
"""Test that giving the run one step method a deposit with no id raises an error."""
mg = RasterModelGrid((3, 3))
z = mg.add_zeros("topographic__elevation", at="node")
thicknesses = [1, 2, 4, 1, 5]
ids = [1, 2, 1, 2, 1]
attrs = {"K_sp": {1: 0.001, 2: 0.0001}}
lith = Lithology(mg, thicknesses, ids, attrs)
z += 1
with pytest.raises(ValueError):
lith.run_one_step()
def test_run_one_step_erodes_all_raises_error():
"""Test that eroding all material with the run one step method raises an error."""
mg = RasterModelGrid((3, 3))
z = mg.add_zeros("topographic__elevation", at="node")
thicknesses = [1, 2, 4, 1, 5]
ids = [1, 2, 1, 2, 1]
attrs = {"K_sp": {1: 0.001, 2: 0.0001}}
lith = Lithology(mg, thicknesses, ids, attrs)
z -= 30
with pytest.raises(ValueError):
lith.run_one_step()
def test_rock_block_xarray():
"""Test that the xarray method works as expected."""
sample_depths = np.arange(0, 10, 1)
mg = RasterModelGrid((3, 3))
mg.add_zeros("topographic__elevation", at="node")
layer_ids = np.tile([0, 1, 2, 3], 5)
layer_elevations = 3.0 * np.arange(-10, 10)
layer_elevations[-1] = layer_elevations[-2] + 100
attrs = {"K_sp": {0: 0.0003, 1: 0.0001, 2: 0.0002, 3: 0.0004}}
lith = LithoLayers(
mg, layer_elevations, layer_ids, function=lambda x, y: x + y, attrs=attrs
)
ds = lith.rock_cube_to_xarray(sample_depths)
expected_array = np.array(
[
[[3.0, 2.0, 2.0], [2.0, 2.0, 2.0], [2.0, 2.0, 1.0]],
[[3.0, 3.0, 2.0], [3.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
[[3.0, 3.0, 3.0], [3.0, 3.0, 2.0], [3.0, 2.0, 2.0]],
[[0.0, 3.0, 3.0], [3.0, 3.0, 3.0], [3.0, 3.0, 2.0]],
[[0.0, 0.0, 3.0], [0.0, 3.0, 3.0], [3.0, 3.0, 3.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 3.0], [0.0, 3.0, 3.0]],
[[1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 3.0]],
[[1.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[1.0, 1.0, 1.0], [1.0, 1.0, 0.0], [1.0, 0.0, 0.0]],
[[2.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 0.0]],
]
)
assert_array_equal(ds.rock_type__id.values, expected_array)
|
|
import os
from dvc.logger import Logger
from dvc.config import Config
from dvc.executor import Executor, ExecutorError
from dvc.system import System
class GitWrapperI(object):
COMMIT_LEN = 7
def __init__(self, git_dir=None, commit=None):
self._git_dir = git_dir
self._commit = commit
@property
def git_dir(self):
return self._git_dir
@property
def lock_file(self):
return os.path.join(self.git_dir_abs, '.' + Config.CONFIG + '.lock')
@property
def git_dir_abs(self):
return System.realpath(self.git_dir)
@property
def curr_dir_abs(self):
return os.path.abspath(os.curdir)
@property
def curr_commit(self):
return self._commit
def is_ready_to_go(self):
return True
@staticmethod
def git_file_statuses():
Logger.debug('[dvc-git] Getting file statuses. Command: git status --porcelain')
code, out, err = Executor.exec_cmd(['git', 'status', '--porcelain'])
if code != 0:
raise ExecutorError('[dvc-git] File status command error - {}'.format(err))
Logger.debug('[dvc-git] Getting file statuses. Success.')
return GitWrapper.parse_porcelain_files(out)
@staticmethod
def git_config_get(name):
code, out, err = Executor.exec_cmd(['git', 'config', '--get', name])
Logger.debug('[dvc-git] "git config --get {}": code({}), out({}), err({})'.format(
name, code, out, err))
if code != 0:
return None
return out
@staticmethod
def git_path_to_system_path(path):
if os.name == 'nt':
return path.replace('/', '\\')
return path
@staticmethod
def parse_porcelain_files(out):
result = []
if len(out) > 0:
lines = out.split('\n')
for line in lines:
status = line[:2]
file = GitWrapperI.git_path_to_system_path(line[3:])
result.append((status, file))
return result
def abs_paths_to_dvc(self, files):
result = []
for file in files:
result.append(os.path.relpath(os.path.abspath(file), self.git_dir_abs))
return result
def commit_all_changes_and_log_status(self, message):
pass
class GitWrapper(GitWrapperI):
def __init__(self):
super(GitWrapper, self).__init__()
def is_ready_to_go(self):
statuses = self.git_file_statuses()
if len(statuses) > 0:
Logger.error('[dvc-git] Commit all changed files before running reproducible command. Changed files:')
for status, file in statuses:
Logger.error("{} {}".format(status, file))
return False
# Sanity check to make sure we will be able to commit
name = self.git_config_get('user.name')
if name == None:
Logger.error('[dvc-git] Please setup user.name in git config')
return False
email = self.git_config_get('user.email')
if email == None:
Logger.error('[dvc-git] Please setup user.email in git config')
return False
return True
@property
def curr_dir_dvc(self):
return os.path.relpath(os.path.abspath(os.curdir), self.git_dir_abs)
@property
def git_dir(self):
if self._git_dir:
return self._git_dir
try:
Logger.debug('[dvc-git] Getting git directory. Command: git rev-parse --show-toplevel')
code, out, err = Executor.exec_cmd(['git', 'rev-parse', '--show-toplevel'])
if code != 0:
raise ExecutorError('[dvc-git] Git directory command error - {}'.format(err))
Logger.debug('[dvc-git] Getting git directory. Success.')
self._git_dir = out
return self._git_dir
except ExecutorError:
raise
except Exception as e:
raise ExecutorError('Unable to run git command: {}'.format(e))
pass
@property
def curr_commit(self):
Logger.debug('[dvc-git] Getting current git commit. Command: git rev-parse --short HEAD')
code, out, err = Executor.exec_cmd(['git', 'rev-parse', '--short', 'HEAD'])
if code != 0:
raise ExecutorError('[dvc-git] Commit command error - {}'.format(err))
Logger.debug('[dvc-git] Getting current git commit. Success.')
return out
@staticmethod
def commit_all_changes(message):
Logger.debug('[dvc-git] Commit all changes. Commands: {} && {} && {}'.format(
'git add --all', 'git status --porcelain', 'git commit -m'))
Executor.exec_cmd_only_success(['git', 'add', '--all'])
out_status = Executor.exec_cmd_only_success(['git', 'status', '--porcelain'])
Executor.exec_cmd_only_success(['git', 'commit', '-m', message])
Logger.debug('[dvc-git] Commit all changes. Success.')
return GitWrapper.parse_porcelain_files(out_status)
def commit_all_changes_and_log_status(self, message):
statuses = self.commit_all_changes(message)
Logger.debug('[dvc-git] A new commit {} was made in the current branch. Added files:'.format(
self.curr_commit))
for status, file in statuses:
Logger.debug('[dvc-git]\t{} {}'.format(status, file))
pass
@staticmethod
def abs_paths_to_relative(files):
cur_dir = System.realpath(os.curdir)
result = []
for file in files:
result.append(os.path.relpath(System.realpath(file), cur_dir))
return result
def dvc_paths_to_abs(self, files):
results = []
for file in files:
results.append(os.path.join(self.git_dir_abs, file))
return results
def were_files_changed(self, code_dependencies, path_factory, changed_files):
code_files, code_dirs = self.separate_dependency_files_and_dirs(code_dependencies)
code_files_set = set([path_factory.path(x).dvc for x in code_files])
for changed_file in changed_files:
if changed_file in code_files_set:
return True
for dir in code_dirs:
if changed_file.startswith(dir):
return True
return False
@staticmethod
def get_changed_files(target_commit):
Logger.debug('[dvc-git] Identify changes. Command: git diff --name-only HEAD {}'.format(
target_commit))
changed_files_str = Executor.exec_cmd_only_success(['git', 'diff', '--name-only', 'HEAD', target_commit])
changed_files = changed_files_str.strip('"').split('\n')
Logger.debug('[dvc-git] Identify changes. Success. Changed files: {}'.format(
', '.join(changed_files)))
return changed_files
@staticmethod
def get_target_commit(file):
try:
commit = Executor.exec_cmd_only_success(['git', 'log', '-1', '--pretty=format:"%h"', file])
return commit.strip('"')
except ExecutorError:
return None
def separate_dependency_files_and_dirs(self, code_dependencies):
code_files = []
code_dirs = []
code_dependencies_abs = self.dvc_paths_to_abs(code_dependencies)
for code in code_dependencies_abs:
if os.path.isdir(code):
code_dirs.append(code)
else:
code_files.append(code)
return code_files, code_dirs
|
|
from tkinter import *
from tkinter.filedialog import askopenfilename
from PIL import Image, ImageTk
from endeavour import endeavour_functions
import csv
import os
# Location of csv file to save result to.
log_filename = r'results_log.csv'
master = Tk()
master.title("Machine Learning for Image Recognition Purposes")
master.geometry("450x500")
global source_filename
source_filename = None
softmax_dir = '..\mnist_softmax_models\softmax_alpha=0.1/'
softmax_name = 'model-999999.meta'
mlp_dir = '..\mnist_mlp_models_regularised\mlp_nlayers=1_nunits=200/'
mlp_name = 'model.meta'
conv_dir = '..\mnist_conv_models\conv_nlayers=1_nfeatures=32/'
conv_name = 'model.meta'
def load():
global source_filename
source_filename = askopenfilename()
image = Image.open(source_filename).resize((200,200), Image.ANTIALIAS)
photo = ImageTk.PhotoImage(image)
lab_source.configure(image=photo)
lab_source.image = photo
def convert():
threshold = slide_threshold.get()
endeavour_functions.process_image(source_filename, 'processed.jpg', threshold=threshold)
image = Image.open('processed.jpg').resize((200,200))
photo = ImageTk.PhotoImage(image)
lab_dest.configure(image=photo)
lab_dest.image = photo
#lab_dest.pack()
def classify():
if var.get() == 'Softmax Classifier':
model_dir = softmax_dir
model_name = softmax_name
elif var.get() == 'MLP Classifier':
model_dir = mlp_dir
model_name = mlp_name
else:
model_dir = conv_dir
model_name = conv_name
print(var.get())
print("Using classifier ", model_dir+model_name)
digit = endeavour_functions.classify_image('processed.jpg', model_dir, model_name)
ent_class.delete(0, END)
ent_class.insert(0, str(digit[0]))
ent_dist_0.delete(0, END)
ent_dist_1.delete(0, END)
ent_dist_2.delete(0, END)
ent_dist_3.delete(0, END)
ent_dist_4.delete(0, END)
ent_dist_5.delete(0, END)
ent_dist_6.delete(0, END)
ent_dist_7.delete(0, END)
ent_dist_8.delete(0, END)
ent_dist_9.delete(0, END)
ent_dist_0.insert(0, '{0:.6f}'.format(digit[1][0]))
ent_dist_1.insert(0, '{0:.6f}'.format(digit[1][1]))
ent_dist_2.insert(0, '{0:.6f}'.format(digit[1][2]))
ent_dist_3.insert(0, '{0:.6f}'.format(digit[1][3]))
ent_dist_4.insert(0, '{0:.6f}'.format(digit[1][4]))
ent_dist_5.insert(0, '{0:.6f}'.format(digit[1][5]))
ent_dist_6.insert(0, '{0:.6f}'.format(digit[1][6]))
ent_dist_7.insert(0, '{0:.6f}'.format(digit[1][7]))
ent_dist_8.insert(0, '{0:.6f}'.format(digit[1][8]))
ent_dist_9.insert(0, '{0:.6f}'.format(digit[1][9]))
def save():
classification = ent_class.get()
probs = [ent_dist_0.get(), ent_dist_1.get(), ent_dist_2.get(), ent_dist_3.get(), ent_dist_4.get(),
ent_dist_5.get(), ent_dist_6.get(), ent_dist_7.get(), ent_dist_8.get(), ent_dist_9.get()]
classifier = var.get()
input_dir, input_file = os.path.split(source_filename)
input_par_dir = os.path.split(input_dir)[1]
fields = [classification, *probs, classifier, input_file, input_par_dir, input_dir]
with open(log_filename, 'a') as f:
writer = csv.writer(f, lineterminator='\n')
writer.writerow(fields)
print('Saved results to log')
# Place all image operations in left grid
left_frame = Frame(master)
left_frame.pack(side=LEFT)
btn_load = Button(left_frame, text="Load Image", command=load)
btn_load.grid(row=0)
lab_source = Label(left_frame)
lab_source.grid(row=1)
thresh_frame = Frame(left_frame)
thresh_frame.grid(row=2)
btn_convert = Button(left_frame, text="Convert Image", command=convert)
btn_convert.grid(row=3)
lab_dest = Label(left_frame)
lab_dest.grid(row=4)
# Threshold label and slider in thresh frame
lab_thresh = Label(thresh_frame, text="Threshold:")
lab_thresh.pack(side=LEFT)
thresh_val = IntVar()
slide_threshold = Scale(thresh_frame, from_=0, to=255, variable=thresh_val, showvalue=0,
orient=HORIZONTAL, resolution=5)
slide_threshold.set(150)
slide_threshold.pack(side=LEFT)
lab_thresh_val = Label(thresh_frame, textvariable=thresh_val)
lab_thresh_val.pack(side=LEFT)
# Place all classification operations in right grid
right_frame = Frame(master)
right_frame.pack(side=RIGHT)
model_frame = Frame(right_frame)
model_frame.grid(row=0)
btn_classify = Button(right_frame, text="Classify Image", command=classify)
btn_classify.grid(row=1)
class_frame = Frame(right_frame)
class_frame.grid(row=2, pady=10)
lab_prob = Label(right_frame, text="Output Probability Distribution:")
lab_prob.grid(row=3)
dist_frame = Frame(right_frame)
dist_frame.grid(row=4)
btn_save = Button(right_frame, text="Save Results", command=save)
btn_save.grid(row=5, pady=10)
# Model label and selector in model frame
lab_model = Label(model_frame, text="Model:")
lab_model.pack(side=LEFT)
var = StringVar(master)
var.set('Softmax Classifier')
opt_model = OptionMenu(model_frame, var, 'Softmax Classifier', 'MLP Classifier', 'Convolutional Classifier')
opt_model.pack(side=LEFT)
# Classification label and value in class frame
lab_class = Label(class_frame, text="Predicted digit:")
lab_class.pack(side=LEFT)
ent_class = Entry(class_frame, width=5)
ent_class.pack(side=LEFT)
# Distribution frame
lab_dist_0 = Label(dist_frame, text="0:").grid(row=0)
lab_dist_1 = Label(dist_frame, text="1:").grid(row=1)
lab_dist_2 = Label(dist_frame, text="2:").grid(row=2)
lab_dist_3 = Label(dist_frame, text="3:").grid(row=3)
lab_dist_4 = Label(dist_frame, text="4:").grid(row=4)
lab_dist_5 = Label(dist_frame, text="5:").grid(row=0, column=2)
lab_dist_6 = Label(dist_frame, text="6:").grid(row=1, column=2)
lab_dist_7 = Label(dist_frame, text="7:").grid(row=2, column=2)
lab_dist_8 = Label(dist_frame, text="8:").grid(row=3, column=2)
lab_dist_9 = Label(dist_frame, text="9:").grid(row=4, column=2)
ent_dist_0 = Entry(dist_frame, width=8)
ent_dist_1 = Entry(dist_frame, width=8)
ent_dist_2 = Entry(dist_frame, width=8)
ent_dist_3 = Entry(dist_frame, width=8)
ent_dist_4 = Entry(dist_frame, width=8)
ent_dist_5 = Entry(dist_frame, width=8)
ent_dist_6 = Entry(dist_frame, width=8)
ent_dist_7 = Entry(dist_frame, width=8)
ent_dist_8 = Entry(dist_frame, width=8)
ent_dist_9 = Entry(dist_frame, width=8)
ent_dist_0.grid(row=0, column=1)
ent_dist_1.grid(row=1, column=1)
ent_dist_2.grid(row=2, column=1)
ent_dist_3.grid(row=3, column=1)
ent_dist_4.grid(row=4, column=1)
ent_dist_5.grid(row=0, column=3)
ent_dist_6.grid(row=1, column=3)
ent_dist_7.grid(row=2, column=3)
ent_dist_8.grid(row=3, column=3)
ent_dist_9.grid(row=4, column=3)
"""
opt_model.pack()
slide_threshold.pack()
btn_load.pack()
btn_convert.pack()
btn_classify.pack()
lab_source.pack()
lab_dest.pack()
lab_class.pack()
lab_prob.pack()
"""
mainloop()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import six
import os, sys
import re
import inspect
import pkgutil
import importlib
import argparse
import json
import logging
import arrow
import requests
import getpass
from collections import OrderedDict
from natsort import natsorted
from .models import *
from .util import *
from .roles import Role
from . import __version__
from . import cache
from . import config as g_config
# Figure out where this script is, and change the PATH appropriately
BASE = os.path.abspath(os.path.dirname(sys.modules[__name__].__file__))
sys.path.insert(0, BASE)
# Create an argument parser
parser = argparse.ArgumentParser(description=u"Easily manage Halon nodes and clusters.")
subparsers = parser.add_subparsers(title='subcommands', dest='_mod_name', metavar='cmd')
subparsers.required = True
# All available modules and output formatters
modules = {}
formatters = {}
# Loaded configuration, configured nodes and clusters
config = {}
nodes = {}
clusters = {}
# Disable unverified HTTPS warnings - we know what we're doing
requests.packages.urllib3.disable_warnings()
# Regex that matches quick-connect nodes
quick_node_re = re.compile(r'^(?:(?P<name>[a-zA-Z0-9_-]+)=)?(?:(?P<protocol>https?)://)?(?P<data>(?P<username>[^@]+)@(?P<host>[a-zA-Z0-9\-\.]+)(?::(?P<port>[0-9]+))?)$')
def load_modules(modules_path):
'''Load all modules from the 'modules' directory.'''
# Figure out where all the modules are, then treat it as a package,
# iterating over its modules and attempting to load each of them. The
# 'module' variable in the imported module is the module instance to use -
# register that with the application. Yay, dynamic loading abuse.
for loader, name, ispkg in pkgutil.iter_modules(path=[modules_path]):
mod = loader.find_module(name).load_module(name)
if hasattr(mod, 'module'):
register_module(name.rstrip('_'), mod.module)
else:
print(u"Ignoring invalid module (missing 'module' variable): {name}".format(name=name), file=sys.stderr)
def load_formatters(formatters_path):
'''Load all formatters from the 'formatters' directory.'''
for loader, name, ispkg in pkgutil.iter_modules(path=[formatters_path]):
fmt = loader.find_module(name).load_module(name)
if hasattr(fmt, 'formatter'):
formatters[name.rstrip('_')] = fmt.formatter
else:
print(u"Ignoring invalid formatter (missing 'formatter' member): {name}".format(name=name), file=sys.stderr)
def register_module(name, mod):
'''Registers a loaded module instance'''
p = subparsers.add_parser(name, help=mod.__doc__)
p.set_defaults(_mod=mod)
mod.register_arguments(p)
modules[name] = mod
def open_config():
'''Opens a configuration file from the first found default location.'''
config_paths = [
os.path.abspath(os.path.join(BASE, '..', 'halonctl.json')),
os.path.expanduser('~/.config/halonctl.json'),
os.path.expanduser('~/halonctl.json'),
os.path.expanduser('~/.halonctl.json'),
'/etc/halonctl.json',
]
config_path = None
for p in config_paths:
if os.path.exists(p):
config_path = p
break
if not config_path:
print(u"No configuration file found!", file=sys.stderr)
print(u"", file=sys.stderr)
print(u"Please create one in one of the following locations:", file=sys.stderr)
print(u"", file=sys.stderr)
for p in config_paths:
print(" - {0}".format(p), file=sys.stderr)
print(u"", file=sys.stderr)
print(u"Or use the -C/--config flag to specify a path.", file=sys.stderr)
print(u"A sample config can be found at:", file=sys.stderr)
print(u"", file=sys.stderr)
print(u" - {0}".format(os.path.abspath(os.path.join(BASE, 'halonctl.sample.json'))), file=sys.stderr)
print(u"", file=sys.stderr)
sys.exit(1)
return open(config_path, 'rU')
def load_config(f):
'''Loads configuration data from a given file.'''
try:
conf = json.load(f, encoding='utf-8', object_pairs_hook=OrderedDict)
except ValueError as e:
sys.exit(u"Configuration Syntax Error: {0}".format(e))
f.close()
g_config.config.update(conf)
return conf
def process_config(config):
'''Processes a configuration dictionary into usable components.'''
nodes = OrderedDict([(name, Node(data, name)) for name, data in six.iteritems(config.get('nodes', {}))])
clusters = {}
for name, data in six.iteritems(config.get('clusters', {})):
cluster = NodeList()
cluster.name = name
cluster.load_data(data)
for nid in data['nodes'] if isinstance(data, dict) else data:
if not nid in nodes:
sys.exit(u"Configuration Error: Cluster '{cid}' references nonexistent node '{nid}'".format(cid=cluster.name, nid=nid))
node = nodes[nid]
node.cluster = cluster
cluster.append(node)
clusters[cluster.name] = cluster
return (nodes, clusters)
def apply_slice(list_, slice_):
if not slice_ or not list_:
return list_
offsets = [-1, 0, 0]
parts = [int(p) + offsets[i] if p else None for i, p in enumerate(slice_.split(':'))]
try:
return list_[slice(*parts)] if len(parts) > 1 else [list_[parts[0]]]
except IndexError:
return []
def apply_filter(available_nodes, available_clusters, nodes, clusters, slice_=''):
targets = OrderedDict()
if len(nodes) == 0 and len(clusters) == 0:
for node in six.itervalues(apply_slice(available_nodes, slice_)):
targets[node.name] = node
else:
for cid in clusters:
for node in apply_slice(available_clusters[cid], slice_):
targets[node.name] = node
for nid in apply_slice(nodes, slice_):
targets[nid] = available_nodes[nid]
return NodeList(targets.values())
def download_wsdl(nodes, verify):
path = cache.get_path(u"wsdl.xml")
min_mtime = arrow.utcnow().replace(hours=-12)
if not os.path.exists(path) or arrow.get(os.path.getmtime(path)) < min_mtime:
has_been_downloaded = False
for node in nodes:
try:
r = requests.get(u"{scheme}://{host}/remote/?wsdl".format(scheme=node.scheme, host=node.host), stream=True, verify=False if node.no_verify else verify)
if r.status_code == 200:
with open(path, 'wb') as f:
for chunk in r.iter_content(256):
f.write(chunk)
has_been_downloaded = True
break
except requests.exceptions.SSLError:
print_ssl_error(node)
sys.exit(1)
except:
pass
if not has_been_downloaded:
sys.exit("None of your nodes are available, can't download WSDL")
def main():
# Configure logging
logging.basicConfig(level=logging.ERROR)
logging.getLogger('suds.client').setLevel(logging.CRITICAL)
# Load modules and formatters
base_paths = [BASE, os.path.expanduser('~/halonctl'), os.path.expanduser('~/.halonctl')]
for path in base_paths:
load_modules(os.path.join(path, 'modules'))
load_formatters(os.path.join(path, 'formatters'))
# Figure out the program version
version = __version__
try:
head_path = os.path.join(os.path.dirname(__file__), '..', '.git', 'refs', 'heads', 'master')
with open(head_path) as f:
revision = f.read().strip()[:7]
version = "{version} ({revision})".format(version=__version__, revision=revision)
except IOError:
pass
# Add parser arguments
parser.add_argument('-V', '--version', action='version', version=u"halonctl {version}".format(version=version),
help=u"print version information and exit")
parser.add_argument('-C', '--config', type=argparse.FileType('rU'),
help="use specified configuration file")
parser.add_argument('-n', '--node', dest='nodes', action='append', metavar="NODES",
default=[], help=u"target nodes")
parser.add_argument('-c', '--cluster', dest='clusters', action='append', metavar="CLUSTERS",
default=[], help=u"target clusters")
parser.add_argument('-s', '--slice', dest='slice',
default='', help=u"slicing, as a Python slice expression")
parser.add_argument('-d', '--dry', dest='dry_run', action='store_true',
help=u"only list the nodes that would be affected")
parser.add_argument('-i', '--ignore-partial', action='store_true',
help=u"exit normally even for partial results")
parser.add_argument('-f', '--format', choices=list(formatters.keys()), default='table',
help=u"use the specified output format (default: table)")
parser.add_argument('-r', '--raw', action='store_true',
help=u"don't humanize the output, output it as raw as possible")
parser.add_argument('-g', '--group-by', metavar="KEY",
help=u"group output; ignored for table-like formats")
parser.add_argument('-k', '--key', dest='group_key', action='store_true',
help=u"assume grouper is unique, and key only a single value to it")
parser.add_argument('--clear-cache', action='store_true',
help=u"clear the WSDL cache")
# Parse!
args = parser.parse_args()
# Clear cache if requested
if args.clear_cache:
os.remove(cache.get_path(u"wsdl.xml"))
# Load configuration
config = load_config(args.config or open_config())
nodes, clusters = process_config(config)
# Allow wildcard cluster- and node targeting
if args.clusters == ['-']:
args.clusters = list(clusters.keys())
if args.nodes == ['-']:
args.nodes = list(nodes.keys())
# Allow slices without targeting, defaulting to each cluster
if args.slice and not args.clusters and not args.nodes:
args.clusters = list(clusters.keys())
# Allow non-configured nodes to be specified as '[name:]username@host'
quick_node_matches = [ quick_node_re.match(n) for n in args.nodes ]
quick_node_args = []
for m in [ m for m in quick_node_matches if m ]:
arg = m.group(0)
data = m.group('data')
n = Node(data, name=m.group('name') or m.group('host'))
n.no_verify = True # Don't verify SSL for quick nodes
nodes[arg] = n
quick_node_args.append(arg)
if quick_node_args:
l = NodeList([nodes[arg] for arg in quick_node_args])
for node, (code, result) in six.iteritems(l.service.login()):
if code == 401:
while True:
password = getpass.getpass(u"Password for {node.username}@{node.host}: ".format(node=node))
if not password:
break
node.password = password
code = node.service.login()[0]
if code == 200:
break
elif code == 401:
print(u"Invalid login, try again")
elif code == 0:
print(u"The node has gone away")
break
else:
print(u"An error occurred, code {0}".format(code))
break
# Validate cluster and node choices
invalid_clusters = [cid for cid in args.clusters if not cid in clusters]
if invalid_clusters:
print(u"Unknown clusters: {0}".format(', '.join(invalid_clusters)), file=sys.stderr)
print(u"Available: {0}".format(', '.join(six.iterkeys(clusters))), file=sys.stderr)
sys.exit(1)
invalid_nodes = [nid for nid in args.nodes if not nid in nodes and nid not in quick_node_args]
if invalid_nodes:
print(u"Unknown nodes: {0}".format(', '.join(invalid_nodes)), file=sys.stderr)
print(u"Available: {0}".format(', '.join(six.iterkeys(nodes))), file=sys.stderr)
sys.exit(1)
# Filter nodes to choices
target_nodes = apply_filter(nodes, clusters, args.nodes, args.clusters, args.slice)
# If this is a dry run - stop right here and just print the targets
if args.dry_run:
print(u"This action would have affected:")
for node in target_nodes:
print(u" - {name} ({cluster})".format(name=node.name, cluster=node.cluster.name))
return
# Download WSDL and create client objects
download_wsdl(target_nodes, verify=config.get('verify_ssl', True))
for node in target_nodes:
node.load_wsdl()
# Run the selected module
mod = args._mod
retval = mod.run(target_nodes, args)
# Normalize generator mods into lists (lets us detect emptiness)
if inspect.isgenerator(retval):
retval = list(retval)
# Print something, if there's anything to print
if retval:
if hasattr(retval, 'draw'):
print(retval.draw())
elif isinstance(retval, Role):
print(retval.raw() if args.raw else retval.human())
else:
print(formatters[args.format].run(retval, args))
# Let the module decide the exit code - either by explicitly setting it, or
# by marking the result as partial, in which case a standard exit code is
# returned unless the user has requested partial results to be ignored
if mod.exitcode != 0:
sys.exit(mod.exitcode)
elif mod.partial and not args.ignore_partial:
sys.exit(99)
if __name__ == '__main__':
main()
|
|
"""
Test for the various mlab source functions.
These tests are higher level than the tests testing directly the
MlabSource subclasses. They are meant to capture errors in the formatting
of the input arguments.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
import unittest
import numpy as np
from mayavi.tools import sources
################################################################################
# `BaseTestSource`
################################################################################
class BaseTestSource(unittest.TestCase):
def setUp(self):
return
def tearDown(self):
return
def all_close(self, a, b):
""" Similar to numpy's allclose, but works also for a=None.
"""
if a is None or b is None:
self.assertIsNone(a)
self.assertIsNone(b)
else:
self.assert_(np.allclose(a, a))
def check_positions(self, source, x, y, z):
""" Check that the position vectors of the source do correspond
to the given input positions
"""
self.assert_(np.allclose(source.mlab_source.x, x))
self.assert_(np.allclose(source.mlab_source.y, y))
self.assert_(np.allclose(source.mlab_source.z, z))
def check_vectors(self, source, u, v, w):
""" Check that the vector data corresponds to the given arrays.
"""
self.all_close(source.mlab_source.u, u)
self.all_close(source.mlab_source.v, v)
self.all_close(source.mlab_source.w, w)
def check_scalars(self, source, s):
""" Check that the scalar data corresponds to the given array.
"""
self.all_close(source.mlab_source.scalars, s)
################################################################################
# `TestScalarScatter`
################################################################################
class TestScalarScatter(BaseTestSource):
def test_input_args(self):
""" Check that scalar_scatter can take different input arguments """
# Check for a single number as position vectors.
ss = sources.scalar_scatter(0, 0, 0, figure=None)
self.check_positions(ss, 0, 0, 0)
self.check_scalars(ss, None)
self.check_vectors(ss, None, None, None)
# Check for a single number as scalar data, and no position
# vectors.
ss = sources.scalar_scatter(0, figure=None)
self.check_positions(ss, 0, 0, 0)
self.check_scalars(ss, 0)
self.check_vectors(ss, None, None, None)
# Check for a list as position vectors.
ss = sources.scalar_scatter([0, 1], [0, 1], [0, 1], figure=None)
self.check_positions(ss, [0, 1], [0, 1], [0, 1])
self.check_scalars(ss, None)
self.check_vectors(ss, None, None, None)
# Check for a list as scalar data, and no position vectors.
ss = sources.scalar_scatter([0, 1], figure=None)
self.check_scalars(ss, [0, 1])
self.check_vectors(ss, None, None, None)
# Check for a 1D array as position vectors.
a = np.array([0, 1])
ss = sources.scalar_scatter(a, a, a, figure=None)
self.check_positions(ss, a, a, a)
self.check_scalars(ss, None)
self.check_vectors(ss, None, None, None)
# Check for a 1D array as a scalar data, and no position vectors.
ss = sources.scalar_scatter(a, figure=None)
self.check_scalars(ss, a)
self.check_vectors(ss, None, None, None)
# Check for a 2D array as position vectors.
a = np.array([[0, 1], [2, 3]])
ss = sources.scalar_scatter(a, a, a, figure=None)
self.check_positions(ss, a, a, a)
self.check_scalars(ss, None)
self.check_vectors(ss, None, None, None)
# Check for a 2D array as scalar data, and no position vectors.
ss = sources.scalar_scatter(a, figure=None)
self.check_scalars(ss, a)
self.check_vectors(ss, None, None, None)
# Check for a 2D array as scalar data, and no position vectors.
ss = sources.scalar_scatter(a, figure=None)
self.check_scalars(ss, a)
self.check_vectors(ss, None, None, None)
################################################################################
# `TestVectorScatter`
################################################################################
class TestVectorScatter(BaseTestSource):
def test_input_args(self):
""" Check that vector_scatter can take different input arguments """
# Check for a single number as a position vector.
ss = sources.vector_scatter(0, 0, 0, 0, 0, 0, figure=None)
self.check_positions(ss, 0, 0, 0)
self.check_scalars(ss, None)
self.check_vectors(ss, 0, 0, 0)
# Check for no position vectors, and single numbers for vector
# data.
ss = sources.vector_scatter(0, 0, 0, figure=None)
self.check_positions(ss, 0, 0, 0)
self.check_scalars(ss, None)
self.check_vectors(ss, 0, 0, 0)
# Check for a list as a position vector.
ss = sources.vector_scatter([0, 1], [0, 1], [0, 1],
[0, 1], [0, 1], [0, 1], figure=None)
self.check_positions(ss, [0, 1], [0, 1], [0, 1])
self.check_scalars(ss, None)
self.check_vectors(ss, [0, 1], [0, 1], [0, 1])
# Check for a lists as a vector data, and no position vectors
ss = sources.vector_scatter([0, 1], [0, 1], [0, 1], figure=None)
self.check_scalars(ss, None)
self.check_vectors(ss, [0, 1], [0, 1], [0, 1])
# Check for a 1D array as a position vector.
a = np.array([0, 1])
ss = sources.vector_scatter(a, a, a, a, a, a, figure=None)
self.check_positions(ss, a, a, a)
self.check_scalars(ss, None)
self.check_vectors(ss, a, a, a)
# Check for a 1D array as vector data, and no position vectors.
ss = sources.vector_scatter(a, a, a, figure=None)
self.check_scalars(ss, None)
self.check_vectors(ss, a, a, a)
# Check for a 2D array as a position vector.
a = np.array([[0, 1], [2, 3]])
ss = sources.vector_scatter(a, a, a, a, a, a, figure=None)
self.check_positions(ss, a, a, a)
self.check_scalars(ss, None)
self.check_vectors(ss, a, a, a)
# Check for a 2D array as vector data, and no position vectors.
ss = sources.vector_scatter(a, a, a, figure=None)
self.check_scalars(ss, None)
self.check_vectors(ss, a, a, a)
# Check for a 3D array as a position vector.
x, y, z = np.mgrid[0:3, 0:3, 0:3]
ss = sources.vector_scatter(x, y, z, x, y, z, figure=None)
self.check_positions(ss, x, y, z)
self.check_scalars(ss, None)
self.check_vectors(ss, x, y, z)
# Check for a 3D array as vector data, and no position vectors.
x, y, z = np.mgrid[0:3, 0:3, 0:3]
ss = sources.scalar_scatter(z, figure=None)
self.check_scalars(ss, z)
X, Y, Z = np.indices(z.shape)
self.check_positions(ss, X, Y, Z)
################################################################################
# `TestArray2DSource`
################################################################################
class TestArray2DSource(BaseTestSource):
def test_input_args(self):
""" Check that array2d_source can take different input arguments """
# Check for a single number as data and no position arrays.
ss = sources.array2d_source(0, figure=None)
self.check_scalars(ss, 0)
# Check for a list as data, and no position arrays.
ss = sources.array2d_source([0, 1], figure=None)
self.check_scalars(ss, [0, 1])
# Check for a 1D array as data, and no position arrays.
a = np.array([0, 1])
ss = sources.array2d_source(a, figure=None)
self.check_scalars(ss, a)
# Check for a 2D array as data, and no position arrays.
a = np.array([[0, 1], [2, 3]])
ss = sources.array2d_source(a, figure=None)
self.check_scalars(ss, a)
# Check for 2 lists as positions vectors, and a 2D list as data
x = [0, 1]
y = [0, 1]
s = [[0, 1], [2, 3]]
ss = sources.array2d_source(x, y, s, figure=None)
self.check_scalars(ss, s)
# Check for an ogrid as position vectors, and a function for the
# scalars
x, y = np.ogrid[-3:3, -3:3]
f = lambda x, y: x**2 + y**2
ss = sources.array2d_source(x, y, f, figure=None)
self.check_scalars(ss, f(x, y))
# Check for an mgrid as position vectors, and a 2D array for the
# scalars
x, y = np.mgrid[-3:3, -3:3]
s = np.zeros_like(x)
ss = sources.array2d_source(x, y, x, figure=None)
self.check_scalars(ss, s)
################################################################################
# `TestScalarField`
################################################################################
class TestScalarField(BaseTestSource):
def test_input_args(self):
""" Check that scalar_field can take different input arguments """
# Check for 2D arrays as positions vectors, and a function for
# the data
f = lambda x, y, z: x**2 + y**2
x, y = np.mgrid[-3:3, -3:3]
z = np.zeros_like(x)
ss = sources.scalar_field(x, y, z, f, figure=None)
self.check_positions(ss, x, y, z)
s = f(x, y, z)
self.check_scalars(ss, s)
# Check for a 2D array as data, and no position vectors
s = np.random.random((10, 10))
ss = sources.scalar_field(s, figure=None)
self.check_scalars(ss, s)
# Check for a 3D array as data, and no position vectors
s = np.random.random((10, 10, 10))
ss = sources.scalar_field(s, figure=None)
self.check_scalars(ss, s)
# Check for a 3D array as data, and 3D arrays as position
x, y, z = np.mgrid[-3:3, -3:3, -3:3]
ss = sources.scalar_field(x, y, z, z, figure=None)
self.check_positions(ss, x, y, z)
self.check_scalars(ss, z)
################################################################################
# `TestVectorField`
################################################################################
class TestVectorField(BaseTestSource):
def test_input_args(self):
""" Check that vector_field can take different input arguments """
# Check for 2D arrays as positions vectors, and a function for
# the data
x, y = np.mgrid[-3:3, -3:3]
z = np.zeros_like(x)
def f(x, y, z):
return y, z, x
ss = sources.vector_field(x, y, z, f, figure=None)
self.check_scalars(ss, None)
self.check_vectors(ss, y, z, x)
# Check for a 2D array as data, and no position vectors
u = np.random.random((10, 10))
v = np.random.random((10, 10))
w = np.random.random((10, 10))
ss = sources.vector_field(u, v, w, figure=None)
self.check_scalars(ss, None)
self.check_vectors(ss, u, v, w)
# Check for a 3D array as data, and no position vectors
u = np.random.random((10, 10, 10))
v = np.random.random((10, 10, 10))
w = np.random.random((10, 10, 10))
ss = sources.vector_field(u, v, w, figure=None)
self.check_scalars(ss, None)
self.check_vectors(ss, u, v, w)
# Check for a 3D array as data, and 3D arrays as position
x, y, z = np.mgrid[-3:3, -3:3, -3:3]
ss = sources.vector_field(x, y, z, y, z, x, figure=None)
self.check_scalars(ss, None)
self.check_positions(ss, x, y, z)
self.check_vectors(ss, y, z, x)
################################################################################
# `TestLineSource`
################################################################################
class TestLineSource(BaseTestSource):
def test_input_args(self):
""" Check that vector_field can take different input arguments """
# Check for numbers as position vectors
ss = sources.line_source(0, 0, 0, figure=None)
self.check_positions(ss, 0, 0, 0)
self.check_scalars(ss, None)
# Check for lists as position vectors and as data
ss = sources.line_source([0, 1], [0, 1], [0, 1], [2, 3], figure=None)
self.check_positions(ss, [0, 1], [0, 1], [0, 1])
self.check_scalars(ss, [2, 3])
# Check for arrays as position vectors and a function as data
x, y, z = np.random.random((3, 10))
f = lambda x, y, z: x + y + z
ss = sources.line_source(x, y, z, f, figure=None)
self.check_positions(ss, x, y, z)
self.check_scalars(ss, f(x, y, z))
################################################################################
# `TestVerticalVectorsSource`
################################################################################
class TestVerticalVectorsSource(BaseTestSource):
def test_input_args(self):
""" Check that vector_field can take different input arguments """
# Check for numbers as position vectors
ss = sources.vertical_vectors_source(0, 0, 1, figure=None)
self.check_positions(ss, 0, 0, 0)
self.check_scalars(ss, 1)
self.check_vectors(ss, 0, 0, 1)
ss = sources.vertical_vectors_source(0, 0, 1, 1, figure=None)
self.check_positions(ss, 0, 0, 1)
self.check_scalars(ss, 1)
self.check_vectors(ss, 0, 0, 1)
# Check for lists as position vectors and as data
ss = sources.vertical_vectors_source([0, 1], [0, 1], [0, 1], [2, 3],
figure=None)
self.check_positions(ss, [0, 1], [0, 1], [0, 1])
self.check_scalars(ss, [2, 3])
self.check_vectors(ss, [0, 0], [0, 0], [2, 3])
# Check for arrays as position vectors and a function as data
x, y, z = np.random.random((3, 10))
zeros = np.zeros_like(x)
f = lambda x, y, z: x + y + z
ss = sources.vertical_vectors_source(x, y, z, f, figure=None)
self.check_positions(ss, x, y, z)
self.check_scalars(ss, f(x, y, z))
self.check_vectors(ss, zeros, zeros, z)
ss = sources.vertical_vectors_source(x, y, z, figure=None)
self.check_positions(ss, x, y, zeros)
self.check_scalars(ss, z)
self.check_vectors(ss, zeros, zeros, z)
################################################################################
# `TestSourceInfinite`
################################################################################
class TestVerticalVectorsSource(unittest.TestCase):
def test_infinite(self):
""" Check that passing in arrays with infinite values raises
errors """
# Some arrays
x = np.random.random((10, 3, 4))
y = np.random.random((10, 3, 4))
z = np.random.random((10, 3, 4))
u = np.random.random((10, 3, 4))
v = np.random.random((10, 3, 4))
w = np.random.random((10, 3, 4))
s = np.random.random((10, 3, 4))
# Add a few infinite values:
u[2, 2, 1] = np.inf
s[0, 0, 0] = -np.inf
# Check value errors are raised because of the infinite values
self.assertRaises(ValueError,
sources.grid_source, x[0], y[0], z[0], scalars=s[0],
figure=None)
self.assertRaises(ValueError,
sources.vertical_vectors_source, x, y, z, s,
figure=None)
self.assertRaises(ValueError,
sources.array2d_source, x[0], y[0], s[0],
figure=None)
self.assertRaises(ValueError,
sources.scalar_field, x, y, z, s,
figure=None)
self.assertRaises(ValueError,
sources.scalar_scatter, x, y, z, s,
figure=None)
self.assertRaises(ValueError,
sources.vector_scatter, x, y, z, u, v, w,
figure=None)
self.assertRaises(ValueError,
sources.vector_field, x, y, z, u, v, w,
figure=None)
self.assertRaises(ValueError,
sources.line_source, x[0, 0], y[0, 0], z[0, 0], s[0, 0],
figure=None)
if __name__ == '__main__':
unittest.main()
|
|
from __future__ import absolute_import, division, print_function, unicode_literals
__metaclass__ = type
import pickle
import os
import cv2
import numpy as np
import atexit
import copy
import time
import six
import gc
import abc
from abc import abstractmethod
from collections import Iterable
import keras
from keras.models import Sequential, Model, load_model
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization, Input, concatenate
from keras.engine.topology import InputLayer
from keras.optimizers import SGD
from keras.utils import np_utils
from keras.callbacks import ModelCheckpoint
from keras import backend as K
from .hyperopt_keras import HyperoptWrapper, getBestParams, DEFAULT_NUM_EVALS, tune
from .google_storage import upload, downloadIfAvailable
from .task import DEBUG
from .preprocess import ImageNormalizer
from .data import SCALES, DATASET_LABEL, DatasetManager
from .util import TempH5pyFile
hp = HyperoptWrapper()
NET_FILE_NAMES = {False: {SCALES[0][0]: '12net.hdf', SCALES[1][0]: '24net.hdf', SCALES[2][0]: '48net.hdf'},
True: {SCALES[0][0]: '12calibnet.hdf', SCALES[1][0]: '24calibnet.hdf', SCALES[2][0]: '48calibnet.hdf'}}
DROPOUT_PARAM_ID = 'dropout'
OPTIMIZER_PARAMS = ['lr', 'momentum', 'decay', 'nesterov']
NORMALIZATION_PARAMS = ['norm', 'flip']
TRAIN_PARAMS = ['batchSize']
OPTIMIZER = SGD
DEFAULT_BATCH_SIZE = 128
PREDICTION_BATCH_SIZE = 256
DEFAULT_NUM_EPOCHS = 300
DEFAULT_Q_SIZE = 10
DEBUG_FILE_PATH = 'debug.hdf'
PARAM_FILE_NAME_FORMAT_STR = '%sparams'
STAGE_ONE_NOT_TRAINED_ERROR = 'You must train the stage one models before moving onto stage two!'
STAGE_TWO_NOT_TRAINED_ERROR = 'You must train the stage two models before moving onto stage three!'
def _convert(data):
if isinstance(data, bytes):
return data.decode('ascii')
elif isinstance(data, (dict, tuple, list, set)):
return type(data)(map(_convert, data.items() if hasattr(data, 'items') else data))
return data
@six.add_metaclass(abc.ABCMeta)
class ObjectClassifier():
DEFAULT_DROPOUT = .3
LOSS = 'binary_crossentropy'
METRICS = ['accuracy']
def __init__(self, stageIdx):
self.imgSize = SCALES[stageIdx]
self.inputShape = (self.imgSize[1], self.imgSize[0], 3)
self.additionalNormalizers = []
self.trainedModel = None
self.bestParams = None
if stageIdx == 2:
self.PARAM_SPACE = copy.deepcopy(self.PARAM_SPACE)
self.PARAM_SPACE.update({'dropout2': hp.uniform(0, .75)})
self.update()
@abstractmethod
def __call__(self):
pass
def getStageIdx(self):
return self.stageIdx
def getParamFilePath(self):
paramFilePath = PARAM_FILE_NAME_FORMAT_STR % (os.path.splitext(self.getWeightsFilePath())[0],)
if not os.path.isfile(paramFilePath): downloadIfAvailable(paramFilePath)
return paramFilePath
def getParamSpace(self):
return self.PARAM_SPACE
def getWeightsFilePath(self):
weightsFilePath = NET_FILE_NAMES[isinstance(self, ObjectCalibrator)][SCALES[self.stageIdx][0]]
if not os.path.isfile(weightsFilePath): downloadIfAvailable(weightsFilePath)
return weightsFilePath
def getNormalizationMethod(self):
return self.bestParams['norm'] if self.wasTuned() else ImageNormalizer.STANDARD_NORMALIZATION
def getNormalizationParams(self):
params = {}
if self.wasTuned():
params = {k: v for k, v in self.bestParams.items() if k in NORMALIZATION_PARAMS}
del params['norm']
return params
def getAdditionalNormalizers(self, datasetManagerParams = None):
if not self.additionalNormalizers:
for i in np.arange(0, self.stageIdx):
self.additionalNormalizers.append(DatasetManager(MODELS[i][0], **datasetManagerParams).getNormalizer())
return self.additionalNormalizers
def getDropouts(self):
dropouts = [self.DEFAULT_DROPOUT] * len([k for k in self.getParamSpace() if k.startswith(DROPOUT_PARAM_ID)])
if self.wasTuned():
dropouts = []
for k, v in self.bestParams.items():
if k.startswith(DROPOUT_PARAM_ID):
idx = int(k.replace(DROPOUT_PARAM_ID, ''))
dropouts.insert(idx, v)
return dropouts
def getOptimizerParams(self):
return {k: v for k, v in self.bestParams.items() if k in OPTIMIZER_PARAMS} if self.wasTuned() else {}
def getBatchSize(self):
return DEFAULT_BATCH_SIZE if not self.wasTuned() else self.bestParams['batchSize']
def getSaveFilePath(self, debug = DEBUG):
return self.getWeightsFilePath() if not debug else DEBUG_FILE_PATH
def wasTuned(self):
return os.path.isfile(self.getParamFilePath())
def update(self):
if self.wasTuned():
with open(self.getParamFilePath(), 'rb') as paramFile:
loadArgs = {} if six.PY2 else {'encoding': 'bytes'}
trials = pickle.load(paramFile, **loadArgs)
trials.__dict__ = _convert(trials.__dict__)
best = _convert(trials.best_trial['misc']['vals'])
self.bestParams = getBestParams(self.getParamSpace(), best)
def compile(self, params = {}, loss = LOSS, metrics = METRICS):
if len(params) == 0 and self.bestParams: params = self.bestParams
self.model.compile(loss = loss, optimizer = OPTIMIZER(**params), metrics = metrics)
def tune(self, datasetManager, labels, metric, verbose = True, numEvals = DEFAULT_NUM_EVALS):
paramSpace = self.getParamSpace()
paramFilePath = self.getParamFilePath()
best, trials = tune(paramSpace, self, datasetManager, labels, metric, verbose = verbose, numEvals = numEvals)
if verbose:
print('Best model parameters found:', getBestParams(paramSpace, best))
with open(paramFilePath, 'wb') as modelParamFile:
pickle.dump(trials, modelParamFile, protocol = 2)
upload(paramFilePath)
self.update()
def getInputGenerator(self, X, y, normalizers, **normalizerParams):
isCalibrationNet = isinstance(self, ObjectCalibrator)
normalizerParams.update({'labels': y})
generator = normalizers[self.stageIdx if not isCalibrationNet else 0].preprocess(X, **normalizerParams)
normalizerParams.update({'batchSize': None, 'labels': None})
while True:
X, y = next(generator)
X_extended = []
if not isCalibrationNet:
for i in np.arange(0, self.stageIdx+1):
if i != self.stageIdx:
X_extended.append(normalizers[i].preprocess(np.vstack([cv2.resize(img, SCALES[i])[np.newaxis] for img in X]), **normalizerParams))
X_extended.insert(self.stageIdx, X)
yield X_extended, y
def fit(self, X_train, X_test, y_train, y_test, datasetManager, normalizer = None, numEpochs = DEFAULT_NUM_EPOCHS, saveFilePath = None, batchSize = None,
compileParams = None, dropouts = None, verbose = True, debug = DEBUG):
params = [compileParams or self.getOptimizerParams(), dropouts or self.getDropouts(), datasetManager.getParams()]
saveFilePath = saveFilePath or self.getSaveFilePath(debug)
batchSize = batchSize or self.getBatchSize()
callbacks = [ModelCheckpoint(saveFilePath, monitor = 'val_loss', save_best_only = True, verbose = int(verbose))]
model = self(*params)
y_train, y_test = (np_utils.to_categorical(vec, int(np.amax(vec) + 1)) for vec in (y_train, y_test))
normalizers = self.additionalNormalizers + [normalizer or datasetManager.getNormalizer()]
if debug: print(params, saveFilePath, batchSize, X_train.shape, X_test.shape, y_train.shape, y_test.shape)
model.fit_generator(self.getInputGenerator(X_train, y_train, normalizers, shuffle = True, batchSize = batchSize),
len(X_train)//batchSize,
epochs = numEpochs,
verbose = 2 if verbose else 0,
callbacks = callbacks,
validation_data = self.getInputGenerator(X_test, y_test, normalizers, batchSize = batchSize, useDataAugmentation = False, shuffle = False),
validation_steps = len(X_test)//batchSize,
max_queue_size = DEFAULT_Q_SIZE)
del self.trainedModel
self.trainedModel = None
K.clear_session()
gc.collect()
def loadModel(self, weightsFilePath = None):
if self.trainedModel is None:
self.trainedModel = load_model(self.getWeightsFilePath() if weightsFilePath is None else weightsFilePath)
inputLayers = []
for layer in self.trainedModel.layers:
if type(layer) is InputLayer:
inputLayers.append(layer.input)
inputLayers.append(K.learning_phase())
self.predictFunc = K.function(inputLayers, [self.trainedModel.layers[-1].output])
return self.trainedModel
def predict(self, X, normalizer = None, weightsFilePath = None, datasetManager = None):
self.loadModel(weightsFilePath)
useFastPredict = datasetManager is None
makePrediction = lambda X: self.predictFunc(X)[0]
if not useFastPredict:
batchSize = PREDICTION_BATCH_SIZE
normalizers = self.getAdditionalNormalizers(datasetManager.getParams()) + [normalizer]
inputGenerator = self.getInputGenerator(X, None, normalizers, batchSize = batchSize, shuffle = False, useDataAugmentation = False)
for i in np.arange(0, len(X), PREDICTION_BATCH_SIZE):
X_batch, y_batch = next(inputGenerator)
batches = []
for j, inputArray in enumerate(X_batch):
arraySlice = inputArray[:min(PREDICTION_BATCH_SIZE, len(X) - i)]
if j < 2:
batches.insert(0, arraySlice)
else:
batches.append(arraySlice)
batches.append(0)
predictions = makePrediction(batches)
if i == 0:
y = np.zeros((0, predictions.shape[1]))
y = np.vstack((y, predictions))
else:
y = makePrediction(X + [0])
return y
def eval(self, X_test, y_test, normalizer, metric, weightsFilePath = None, datasetManager = None, **metric_kwargs):
y_pred = self.predict(X_test, normalizer, weightsFilePath, datasetManager = datasetManager)
return metric(y_test, np.argmax(y_pred, axis = 1), **metric_kwargs)
@six.add_metaclass(abc.ABCMeta)
class ObjectCalibrator(ObjectClassifier):
LOSS = 'categorical_crossentropy'
def __init__(self, stageIdx):
super(ObjectCalibrator, self).__init__(stageIdx)
@abstractmethod
def __call__(self):
pass
class StageOneClassifier(ObjectClassifier):
HP = HyperoptWrapper()
PARAM_SPACE = {
'dropout0': HP.uniform(0, .75),
'dropout1': HP.uniform(0, .75),
'lr': HP.loguniform(1e-4, 1),
'batchSize': HP.choice(128),
'norm': HP.choice(ImageNormalizer.STANDARD_NORMALIZATION),
'flip': HP.choice(ImageNormalizer.FLIP_HORIZONTAL),
'momentum': HP.choice(.9),
'decay': HP.choice(1e-4),
'nesterov': HP.choice(True)
}
def __init__(self):
self.stageIdx = 0
super(StageOneClassifier, self).__init__(self.stageIdx)
def __call__(self, compileParams = {}, dropouts = [ObjectClassifier.DEFAULT_DROPOUT]*2, datasetManagerParams = {}, includeTop = True, compile = True):
inputLayer = Input(shape = self.inputShape)
conv2D = Conv2D(16, (3, 3), activation = 'relu')(inputLayer)
maxPool2D = MaxPooling2D(pool_size = (3,3),strides = 2)(conv2D)
firstDropout = Dropout(dropouts[0])(maxPool2D)
flattened = Flatten()(firstDropout)
fullyConnectedLayer = Dense(16, activation = 'relu')(flattened)
finalDropout = Dropout(dropouts[1])(fullyConnectedLayer)
if includeTop:
outputLayer = Dense(2, activation = 'softmax')(finalDropout)
self.model = Model(inputs = inputLayer, outputs = outputLayer if includeTop else fullyConnectedLayer)
if compile: self.compile(compileParams)
return self.model
class StageTwoClassifier(ObjectClassifier):
HP = HyperoptWrapper()
PARAM_SPACE = {
'dropout0': HP.uniform(0, .75),
'dropout1': HP.uniform(0, .75),
'lr': HP.loguniform(1e-4, 1),
'batchSize': HP.choice(512),
'norm': HP.choice(ImageNormalizer.STANDARD_NORMALIZATION),
'flip': HP.choice(ImageNormalizer.FLIP_HORIZONTAL),
'momentum': HP.choice(.9),
'decay': HP.choice(1e-4),
'nesterov': HP.choice(True)
}
def __init__(self):
self.stageIdx = 1
super(StageTwoClassifier, self).__init__(self.stageIdx)
def __call__(self, compileParams = {}, dropouts = [ObjectClassifier.DEFAULT_DROPOUT]*2, datasetManagerParams = {}, includeTop = True, compile = True):
inputLayer = Input(shape = self.inputShape)
conv2D = Conv2D(64, (5, 5), activation = 'relu')(inputLayer)
maxPool2D = MaxPooling2D(pool_size=(3,3), strides = 2)(conv2D)
firstDropout = Dropout(dropouts[0])(maxPool2D)
flattened = Flatten()(firstDropout)
fullyConnectedLayer = Dense(128, activation = 'relu')(flattened)
stageOne = StageOneClassifier()
assert os.path.isfile(stageOne.getWeightsFilePath()), STAGE_ONE_NOT_TRAINED_ERROR
stageOneModel = stageOne(datasetManagerParams = datasetManagerParams, includeTop = False, compile = False)
trainedStageOne = stageOne.loadModel()
for i, layer in enumerate(stageOneModel.layers):
layer.set_weights(trainedStageOne.layers[i].get_weights())
layer.trainable = False
if not self.additionalNormalizers:
self.additionalNormalizers.append(DatasetManager(stageOne, **datasetManagerParams).getNormalizer())
mergedFullyConnectedLayer = concatenate([fullyConnectedLayer, stageOneModel.output])
finalDropout = Dropout(dropouts[1])(mergedFullyConnectedLayer)
if includeTop:
outputLayer = Dense(2, activation = 'softmax')(finalDropout)
self.model = Model(inputs = [stageOneModel.input, inputLayer], outputs = outputLayer if includeTop else mergedFullyConnectedLayer)
if compile: self.compile(compileParams)
return self.model
class StageThreeClassifier(ObjectClassifier):
# temporary patch for HyperoptWrapper bug. need to change HyperoptWrapper class and retune everything to fix
HP = HyperoptWrapper()
PARAM_SPACE = {
'dropout0': HP.uniform(0, .75),
'dropout1': HP.uniform(0, .75),
'lr': HP.loguniform(1e-4, 1),
'batchSize': HP.choice(512),
'norm': HP.choice(ImageNormalizer.STANDARD_NORMALIZATION),
'flip': HP.choice(ImageNormalizer.FLIP_HORIZONTAL),
'momentum': HP.choice(.9),
'decay': HP.choice(1e-4),
'nesterov': HP.choice(True)
}
def __init__(self):
self.stageIdx = 2
super(StageThreeClassifier, self).__init__(self.stageIdx)
def __call__(self, compileParams = {}, dropouts = [ObjectClassifier.DEFAULT_DROPOUT]*3, datasetManagerParams = {}):
inputLayer = Input(shape = self.inputShape)
conv2D = Conv2D(64, (5, 5), activation = 'relu')(inputLayer)
maxPool2D = MaxPooling2D(pool_size=(3,3), strides = 2)(conv2D)
firstDropout = Dropout(dropouts[0])(maxPool2D)
firstBatchNorm = BatchNormalization()(firstDropout)
secondaryConv2D = Conv2D(64, (5, 5), activation = 'relu')(firstBatchNorm)
secondaryBatchNorm = BatchNormalization()(secondaryConv2D)
secondaryMaxPool2D = MaxPooling2D(pool_size=(3,3), strides = 2)(secondaryBatchNorm)
secondDropout = Dropout(dropouts[1])(secondaryMaxPool2D)
flattened = Flatten()(firstDropout)
fullyConnectedLayer = Dense(256, activation = 'relu')(flattened)
stageTwo = StageTwoClassifier()
assert os.path.isfile(stageTwo.getWeightsFilePath()), STAGE_TWO_NOT_TRAINED_ERROR
trainedStageTwo = stageTwo.loadModel()
stageTwoModel = stageTwo(datasetManagerParams = datasetManagerParams, includeTop = False, compile = False)
inputLayers = [inputLayer]
for i, layer in enumerate(stageTwoModel.layers):
layer.set_weights(trainedStageTwo.layers[i].get_weights())
layer.trainable = False
if type(layer) is InputLayer:
inputLayers.insert(0, layer.input)
if not self.additionalNormalizers:
self.additionalNormalizers.extend(stageTwo.getAdditionalNormalizers())
self.additionalNormalizers.append(DatasetManager(stageTwo, **datasetManagerParams).getNormalizer())
mergedFullyConnectedLayer = concatenate([fullyConnectedLayer, stageTwoModel.output])
thirdDropout = Dropout(dropouts[2])(mergedFullyConnectedLayer)
outputLayer = Dense(2, activation = 'softmax')(thirdDropout)
self.model = Model(inputs = inputLayers, outputs = outputLayer)
self.compile(compileParams)
return self.model
class StageOneCalibrator(ObjectCalibrator):
HP = HyperoptWrapper()
PARAM_SPACE = {
'dropout0': HP.uniform(0, .75),
'dropout1': HP.uniform(0, .75),
'lr': HP.loguniform(1e-4, 1),
'batchSize': HP.choice(512),
'norm': HP.choice(ImageNormalizer.STANDARD_NORMALIZATION),
'flip': HP.choice(None),
'momentum': HP.choice(.9),
'decay': HP.choice(1e-4),
'nesterov': HP.choice(True)
}
def __init__(self):
self.stageIdx = 0
super(StageOneCalibrator, self).__init__(self.stageIdx)
def __call__(self, compileParams = {}, dropouts = [ObjectCalibrator.DEFAULT_DROPOUT]*2, datasetManagerParams = {}):
inputLayer = Input(shape = self.inputShape)
conv2D = Conv2D(16, (3, 3), activation = 'relu')(inputLayer)
maxPool2D = MaxPooling2D(pool_size = (3,3), strides = 2)(conv2D)
firstDropout = Dropout(dropouts[0])(maxPool2D)
flattened = Flatten()(firstDropout)
fullyConnectedLayer = Dense(128, activation = 'relu')(flattened)
finalDropout = Dropout(dropouts[1])(fullyConnectedLayer)
outputLayer = Dense(45, activation = 'softmax')(finalDropout)
self.model = Model(inputs = inputLayer, outputs = outputLayer)
self.compile(compileParams)
return self.model
class StageTwoCalibrator(ObjectCalibrator):
HP = HyperoptWrapper()
PARAM_SPACE = {
'dropout0': HP.uniform(0, .75),
'dropout1': HP.uniform(0, .75),
'lr': HP.loguniform(1e-4, 1),
'batchSize': HP.choice(512),
'norm': HP.choice(ImageNormalizer.STANDARD_NORMALIZATION),
'flip': HP.choice(None),
'momentum': HP.choice(.9),
'decay': HP.choice(1e-4),
'nesterov': HP.choice(True)
}
def __init__(self):
self.stageIdx = 1
super(StageTwoCalibrator, self).__init__(self.stageIdx)
def __call__(self, compileParams = {}, dropouts = [ObjectCalibrator.DEFAULT_DROPOUT]*2, datasetManagerParams = {}):
inputLayer = Input(shape = self.inputShape)
conv2D = Conv2D(32, (5, 5), activation = 'relu')(inputLayer)
maxPool2D = MaxPooling2D(pool_size = (3,3), strides = 2)(conv2D)
firstDropout = Dropout(dropouts[0])(maxPool2D)
flattened = Flatten()(firstDropout)
fullyConnectedLayer = Dense(64, activation = 'relu')(flattened)
finalDropout = Dropout(dropouts[1])(fullyConnectedLayer)
outputLayer = Dense(45, activation = 'softmax')(finalDropout)
self.model = Model(inputs = inputLayer, outputs = outputLayer)
self.compile(compileParams)
return self.model
class StageThreeCalibrator(ObjectCalibrator):
#TODO: Fix HyperoptWrapper class
HP = HyperoptWrapper()
PARAM_SPACE = {
'dropout0': HP.uniform(0, .75),
'dropout1': HP.uniform(0, .75),
'lr': HP.loguniform(1e-9, 1),
'batchSize': HP.choice(512),
'norm': HP.choice(ImageNormalizer.STANDARD_NORMALIZATION),
'flip': HP.choice(None),
'momentum': HP.choice(.9),
'decay': HP.choice(1e-4),
'nesterov': HP.choice(True)
}
def __init__(self):
self.stageIdx = 2
super(StageThreeCalibrator, self).__init__(self.stageIdx)
def __call__(self, compileParams = {}, dropouts = [ObjectCalibrator.DEFAULT_DROPOUT]*3, datasetManagerParams = {}):
inputLayer = Input(shape = self.inputShape)
conv2D = Conv2D(64, (5, 5), activation = 'relu')(inputLayer)
maxPool2D = MaxPooling2D(pool_size = (3,3), strides = 2)(conv2D)
firstDropout = Dropout(dropouts[0])(maxPool2D)
firstBatchNorm = BatchNormalization()(firstDropout)
secondaryConv2D = Conv2D(64, (5, 5), activation = 'relu')(firstBatchNorm)
secondaryMaxPool2D = MaxPooling2D(pool_size = (3,3), strides = 2)(secondaryConv2D)
secondBatchNorm = BatchNormalization()(secondaryMaxPool2D)
secondDropout = Dropout(dropouts[1])(secondBatchNorm)
flattened = Flatten()(secondDropout)
fullyConnectedLayer = Dense(256, activation = 'relu')(flattened)
thirdDropout = Dropout(dropouts[2])(fullyConnectedLayer)
outputLayer = Dense(45, activation = 'softmax')(thirdDropout)
self.model = Model(inputs = inputLayer, outputs = outputLayer)
self.compile(compileParams)
return self.model
MODELS = {False: [StageOneClassifier(), StageTwoClassifier(), StageThreeClassifier()], True: [StageOneCalibrator(), StageTwoCalibrator(), StageThreeCalibrator()]}
|
|
#!/usr/bin/python -Wall
# ================================================================
# John Kerl
# kerl.john.r@gmail.com
# 2007-05-31
# ================================================================
from __future__ import division # 1/2 = 0.5, not 0.
from sackmat_m import *
from sackst_m import *
import random
# ----------------------------------------------------------------
def maindemo():
n = 3
for r in range(0, 5):
A = make_zero_simple_tensor(r, n); print r, n; A.printf()
n = 4
for r in range(0, 5):
A = make_I_simple_tensor(r, n); print r, n; A.printf()
n = 4
for r in range(0, 5):
A = make_ones_simple_tensor(r, n); print r, n; A.printf()
n = 2
A = make_zero_simple_tensor(3, n);
B = make_zero_simple_tensor(3, n);
for i in range(0, n):
for j in range(0, n):
for k in range(0, n):
A[i][j][k] = i+j+k + i*j*k + 1
B[i][j][k] = 2+i
C = A + B
print "A:"; A.printf()
print "B:"; B.printf()
print "A+B:"; C.printf()
A = simple_tensor([[1,2],[3,4]])
B = simple_tensor([5, 6])
C = A * B
print "A:"; A.printf()
print "B:"; B.printf()
print "A*B:"; C.printf()
A = simple_tensor([[1,2],[3,4]])
B = simple_tensor([5, 6])
C = B * A
print "A:"; A.printf()
print "B:"; B.printf()
print "B*A:"; C.printf()
A = simple_tensor([[1,2,3],[4,5,6],[7,8,9]])
B = simple_tensor([-1, -2, -3])
C = A * B
print "A:"; A.printf()
print "B:"; B.printf()
print "A*B:"; C.printf()
A = simple_tensor(2)
B = simple_tensor(3)
print "A:"; A.printf()
print "B:"; B.printf()
C = A * B
print "A*B:"; C.printf()
A = simple_tensor([[1,2,3],[4,5,6],[7,8,9]])
print "A:"; A.printf()
B = A.Alt()
print "Alt(A):"; B.printf()
B = A.Sym()
print "Sym(A):"; B.printf()
A = simple_tensor([[[1,4],[3,2]],[[9,6],[7,5]]])
print "A:"; A.printf()
B = A.Alt()
print "Alt(A):"; B.printf()
B = A.Sym()
print "Sym(A):"; B.printf()
print
X = simple_tensor([[1,2,3],[4,5,6],[7,8,9]])
Y = simple_tensor([1,-1,1])
P = X * Y
S = X & Y
A = X ^ Y
print "X:"; X.printf()
print "Y:"; Y.printf()
print "X*Y:"; P.printf()
print "XY:"; S.printf()
print "X^Y:"; A.printf()
print
X = simple_tensor([[1,2,3],[4,5,6],[7,8,9]])
Y = simple_tensor([[1,2,3],[4,5,6],[7,8,9]])
P = X * Y
S = X & Y
A = X ^ Y
print "X:"; X.printf()
print "Y:"; Y.printf()
print "X*Y:"; P.printf()
print "XY:"; S.printf()
print "X^Y:"; A.printf()
print
X = simple_tensor([[1,0,0],[0,1,0],[0,0,1]])
Y = simple_tensor([1,1,1])
P = X * Y
S = X & Y
A = X ^ Y
print "X:"; X.printf()
print "Y:"; Y.printf()
print "X*Y:"; P.printf()
print "XY:"; S.printf()
print "X^Y:"; A.printf()
print
X = make_I_simple_tensor(2, 4)
Y = make_I_simple_tensor(2, 4)
P = X * Y
S = X & Y
A = X ^ Y
print "X:"; X.printf()
print "Y:"; Y.printf()
print "X*Y:"; P.printf()
print "XY:"; S.printf()
print "X^Y:"; A.printf()
print
n = 3
for i in range(0, n):
ei = simple_tensor(sackmat_m.stdbv(i, n))
for j in range(i+1, n):
ej = simple_tensor(sackmat_m.stdbv(j, n))
eij = ei ^ ej
print "e" + str(i) + " ^ e" + str(j)
eij.printf()
n = 3
for i in range(0, n):
ei = simple_tensor(sackmat_m.stdbv(i, n))
for j in range(i+1, n):
ej = simple_tensor(sackmat_m.stdbv(j, n))
for k in range(j+1, n):
ek = simple_tensor(sackmat_m.stdbv(k, n))
eijk = ei ^ ej ^ ek
print "e" + str(i) + " ^ e" + str(j) + " ^ e" + str(k)
eijk.printf()
n = 4
for i in range(0, n):
ei = simple_tensor(sackmat_m.stdbv(i, n))
for j in range(i+1, n):
ej = simple_tensor(sackmat_m.stdbv(j, n))
eij = ei ^ ej
print "e" + str(i) + " ^ e" + str(j)
eij.printf()
n = 4
for i in range(0, n):
ei = simple_tensor(sackmat_m.stdbv(i, n))
for j in range(i+1, n):
ej = simple_tensor(sackmat_m.stdbv(j, n))
for k in range(j+1, n):
ek = simple_tensor(sackmat_m.stdbv(k, n))
eijk = ei ^ ej ^ ek
print "e" + str(i) + " ^ e" + str(j) + " ^ e" + str(k)
eijk.printf()
n = 4
for i in range(0, n):
ei = simple_tensor(sackmat_m.stdbv(i, n))
for j in range(i+1, n):
ej = simple_tensor(sackmat_m.stdbv(j, n))
for k in range(j+1, n):
ek = simple_tensor(sackmat_m.stdbv(k, n))
for l in range(k+1, n):
el = simple_tensor(sackmat_m.stdbv(l, n))
eijkl = ei ^ ej ^ ek ^ el
print "e" + str(i) + " ^ e" + str(j) + " ^ e" + str(k) + " ^ e" + str(l)
eijkl.printf()
# ----------------------------------------------------------------
#import random
#
#def foobar():
# #A = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
# #A = [[[0, 1], [0, 0]], [[0, 0], [0, 0]]]
# n=5
# A = make_zero_3_tensor(n)
#
# g = random.Random(1)
# for i in range(0, n):
# for j in range(0, n):
# for k in range(0, n):
# #A[i][j][k] = i*j*k+ i*i - j*k + 1
# A[i][j][k] = g.random()
#
#
# print_3_tensor(A)
#
# #Z = make_zero_3_tensor(2)
# #print_3_tensor(Z)
#
# S = mksymm3(A)
# print_3_tensor(S)
#
# S = mkskew3(A)
# print_3_tensor(S)
#
#foobar()
# ----------------------------------------------------------------
def apply_demo():
#T = simple_tensor([[1,0],[0,1]])
#u = [3,4]
#v = [5,6]
#T = simple_tensor([[1,0,0],[0,1,0],[0,0,1]])
#u = [3,4,5]
#v = [5,6,7]
T = simple_tensor([[0,1,1],[-1,0,1],[-1,-1,0]])
u = [3,4,5]
v = [5,6,7]
uv = simple_tensor(u) * simple_tensor(v)
t = T.of([u,v])
T.printf()
uv.printf()
print_row_vector(u)
print
print_row_vector(v)
print
print t
# ----------------------------------------------------------------
def symstuff():
r = 3
n = 3
p = n ** r
A = make_zero_simple_tensor(r, n)
v = 1
g = random.Random(1)
for i in range(0, p):
I = multidx(i, r, n)
#A[I] = v
A[I] = g.random()
v += 1
B = A.Alt()
C = A.Sym()
print "Orig:"; A.printf(); print
print "Alt :"; B.printf(); print
print "Sym :"; C.printf(); print
D = A - B - C
print "OAS :"; D.printf(); print
# ----------------------------------------------------------------
def cobtest():
g = simple_tensor([[1,0],[0,1]])
Q = sackmat([[2,0],[1,1]]).transpose()
#g = simple_tensor([[1,0,0],[0,1,0],[0,0,1]])
#Q = sackmat([[2,1,1],[0,1,2],[0,0,1]]).transpose()
print "Q:"; Q.printf(); print
print "g:"; g.printf(); print
gp = g.cov_cob(Q); gp.printf()
gp = g.ctv_cob(Q); gp.printf()
gp = g.cob(Q,[1,0]); gp.printf()
gp = g.cob(Q,[0,1]); gp.printf()
# ================================================================
#apply_demo()
symstuff()
#cobtest()
|
|
import itertools
import json
import os
from urllib.parse import unquote
from django.apps import apps
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.template import Context, Engine
from django.urls import translate_url
from django.utils.encoding import force_text
from django.utils.formats import get_format
from django.utils.http import is_safe_url
from django.utils.translation import (
LANGUAGE_SESSION_KEY, check_for_language, get_language,
)
from django.utils.translation.trans_real import DjangoTranslation
from django.views.generic import View
LANGUAGE_QUERY_PARAMETER = 'language'
def set_language(request):
"""
Redirect to a given url while setting the chosen language in the
session or cookie. The url and the language code need to be
specified in the request parameters.
Since this view changes how the user will see the rest of the site, it must
only be accessed as a POST request. If called as a GET request, it will
redirect to the page in the request (the 'next' parameter) without changing
any state.
"""
next = request.POST.get('next', request.GET.get('next'))
if ((next or not request.is_ajax()) and
not is_safe_url(url=next, allowed_hosts={request.get_host()}, require_https=request.is_secure())):
next = request.META.get('HTTP_REFERER')
if next:
next = unquote(next) # HTTP_REFERER may be encoded.
if not is_safe_url(url=next, allowed_hosts={request.get_host()}, require_https=request.is_secure()):
next = '/'
response = HttpResponseRedirect(next) if next else HttpResponse(status=204)
if request.method == 'POST':
lang_code = request.POST.get(LANGUAGE_QUERY_PARAMETER)
if lang_code and check_for_language(lang_code):
if next:
next_trans = translate_url(next, lang_code)
if next_trans != next:
response = HttpResponseRedirect(next_trans)
if hasattr(request, 'session'):
request.session[LANGUAGE_SESSION_KEY] = lang_code
else:
response.set_cookie(
settings.LANGUAGE_COOKIE_NAME, lang_code,
max_age=settings.LANGUAGE_COOKIE_AGE,
path=settings.LANGUAGE_COOKIE_PATH,
domain=settings.LANGUAGE_COOKIE_DOMAIN,
)
return response
def get_formats():
"""
Returns all formats strings required for i18n to work
"""
FORMAT_SETTINGS = (
'DATE_FORMAT', 'DATETIME_FORMAT', 'TIME_FORMAT',
'YEAR_MONTH_FORMAT', 'MONTH_DAY_FORMAT', 'SHORT_DATE_FORMAT',
'SHORT_DATETIME_FORMAT', 'FIRST_DAY_OF_WEEK', 'DECIMAL_SEPARATOR',
'THOUSAND_SEPARATOR', 'NUMBER_GROUPING',
'DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS'
)
result = {}
for attr in FORMAT_SETTINGS:
result[attr] = get_format(attr)
formats = {}
for k, v in result.items():
if isinstance(v, (int, str)):
formats[k] = force_text(v)
elif isinstance(v, (tuple, list)):
formats[k] = [force_text(value) for value in v]
return formats
js_catalog_template = r"""
{% autoescape off %}
(function(globals) {
var django = globals.django || (globals.django = {});
{% if plural %}
django.pluralidx = function(n) {
var v={{ plural }};
if (typeof(v) == 'boolean') {
return v ? 1 : 0;
} else {
return v;
}
};
{% else %}
django.pluralidx = function(count) { return (count == 1) ? 0 : 1; };
{% endif %}
/* gettext library */
django.catalog = django.catalog || {};
{% if catalog_str %}
var newcatalog = {{ catalog_str }};
for (var key in newcatalog) {
django.catalog[key] = newcatalog[key];
}
{% endif %}
if (!django.jsi18n_initialized) {
django.gettext = function(msgid) {
var value = django.catalog[msgid];
if (typeof(value) == 'undefined') {
return msgid;
} else {
return (typeof(value) == 'string') ? value : value[0];
}
};
django.ngettext = function(singular, plural, count) {
var value = django.catalog[singular];
if (typeof(value) == 'undefined') {
return (count == 1) ? singular : plural;
} else {
return value[django.pluralidx(count)];
}
};
django.gettext_noop = function(msgid) { return msgid; };
django.pgettext = function(context, msgid) {
var value = django.gettext(context + '\x04' + msgid);
if (value.indexOf('\x04') != -1) {
value = msgid;
}
return value;
};
django.npgettext = function(context, singular, plural, count) {
var value = django.ngettext(context + '\x04' + singular, context + '\x04' + plural, count);
if (value.indexOf('\x04') != -1) {
value = django.ngettext(singular, plural, count);
}
return value;
};
django.interpolate = function(fmt, obj, named) {
if (named) {
return fmt.replace(/%\(\w+\)s/g, function(match){return String(obj[match.slice(2,-2)])});
} else {
return fmt.replace(/%s/g, function(match){return String(obj.shift())});
}
};
/* formatting library */
django.formats = {{ formats_str }};
django.get_format = function(format_type) {
var value = django.formats[format_type];
if (typeof(value) == 'undefined') {
return format_type;
} else {
return value;
}
};
/* add to global namespace */
globals.pluralidx = django.pluralidx;
globals.gettext = django.gettext;
globals.ngettext = django.ngettext;
globals.gettext_noop = django.gettext_noop;
globals.pgettext = django.pgettext;
globals.npgettext = django.npgettext;
globals.interpolate = django.interpolate;
globals.get_format = django.get_format;
django.jsi18n_initialized = true;
}
}(this));
{% endautoescape %}
"""
def render_javascript_catalog(catalog=None, plural=None):
template = Engine().from_string(js_catalog_template)
def indent(s):
return s.replace('\n', '\n ')
context = Context({
'catalog_str': indent(json.dumps(
catalog, sort_keys=True, indent=2)) if catalog else None,
'formats_str': indent(json.dumps(
get_formats(), sort_keys=True, indent=2)),
'plural': plural,
})
return HttpResponse(template.render(context), 'text/javascript')
def null_javascript_catalog(request, domain=None, packages=None):
"""
Returns "identity" versions of the JavaScript i18n functions -- i.e.,
versions that don't actually do anything.
"""
return render_javascript_catalog()
class JavaScriptCatalog(View):
"""
Return the selected language catalog as a JavaScript library.
Receives the list of packages to check for translations in the `packages`
kwarg either from the extra dictionary passed to the url() function or as a
plus-sign delimited string from the request. Default is 'django.conf'.
You can override the gettext domain for this view, but usually you don't
want to do that as JavaScript messages go to the djangojs domain. This
might be needed if you deliver your JavaScript source from Django templates.
"""
domain = 'djangojs'
packages = None
def get(self, request, *args, **kwargs):
locale = get_language()
domain = kwargs.get('domain', self.domain)
# If packages are not provided, default to all installed packages, as
# DjangoTranslation without localedirs harvests them all.
packages = kwargs.get('packages', '')
packages = packages.split('+') if packages else self.packages
paths = self.get_paths(packages) if packages else None
self.translation = DjangoTranslation(locale, domain=domain, localedirs=paths)
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
def get_paths(self, packages):
allowable_packages = dict((app_config.name, app_config) for app_config in apps.get_app_configs())
app_configs = [allowable_packages[p] for p in packages if p in allowable_packages]
# paths of requested packages
return [os.path.join(app.path, 'locale') for app in app_configs]
def get_plural(self):
plural = None
if '' in self.translation._catalog:
for line in self.translation._catalog[''].split('\n'):
if line.startswith('Plural-Forms:'):
plural = line.split(':', 1)[1].strip()
if plural is not None:
# This should be a compiled function of a typical plural-form:
# Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 :
# n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2;
plural = [el.strip() for el in plural.split(';') if el.strip().startswith('plural=')][0].split('=', 1)[1]
return plural
def get_catalog(self):
pdict = {}
maxcnts = {}
catalog = {}
trans_cat = self.translation._catalog
trans_fallback_cat = self.translation._fallback._catalog if self.translation._fallback else {}
for key, value in itertools.chain(iter(trans_cat.items()), iter(trans_fallback_cat.items())):
if key == '' or key in catalog:
continue
if isinstance(key, str):
catalog[key] = value
elif isinstance(key, tuple):
msgid = key[0]
cnt = key[1]
maxcnts[msgid] = max(cnt, maxcnts.get(msgid, 0))
pdict.setdefault(msgid, {})[cnt] = value
else:
raise TypeError(key)
for k, v in pdict.items():
catalog[k] = [v.get(i, '') for i in range(maxcnts[k] + 1)]
return catalog
def get_context_data(self, **kwargs):
return {
'catalog': self.get_catalog(),
'formats': get_formats(),
'plural': self.get_plural(),
}
def render_to_response(self, context, **response_kwargs):
def indent(s):
return s.replace('\n', '\n ')
template = Engine().from_string(js_catalog_template)
context['catalog_str'] = indent(
json.dumps(context['catalog'], sort_keys=True, indent=2)
) if context['catalog'] else None
context['formats_str'] = indent(json.dumps(context['formats'], sort_keys=True, indent=2))
return HttpResponse(template.render(Context(context)), 'text/javascript')
class JSONCatalog(JavaScriptCatalog):
"""
Return the selected language catalog as a JSON object.
Receives the same parameters as JavaScriptCatalog and returns a response
with a JSON object of the following format:
{
"catalog": {
# Translations catalog
},
"formats": {
# Language formats for date, time, etc.
},
"plural": '...' # Expression for plural forms, or null.
}
"""
def render_to_response(self, context, **response_kwargs):
return JsonResponse(context)
|
|
from time import sleep
from lettuce import world, step
from nose.tools import assert_true, assert_equals
from questionnaire.features.pages.questionnaires import QuestionnairePage
from questionnaire.models import Question, QuestionOption, Theme, QuestionGroupOrder
from questionnaire.tests.factories.question_group_factory import QuestionGroupFactory
@step(u'And I have both simple and primary questions in my Question Bank')
def and_i_have_both_simple_and_primary_questions_in_my_question_bank(step):
world.theme = Theme.objects.create(name="Grid Questions Theme")
world.grid_question1 = Question.objects.create(text='Primary Option', UID='C00021', answer_type='MultiChoice',
is_primary=True, theme=world.theme)
world.option1 = QuestionOption.objects.create(text='Option A', question=world.grid_question1)
world.option2 = QuestionOption.objects.create(text='Option B', question=world.grid_question1)
world.option3 = QuestionOption.objects.create(text='Option C', question=world.grid_question1)
world.grid_question2 = Question.objects.create(text='First Column Question - Q2',
export_label='First Column Question - Q2', UID='C00022', answer_type='Number',
theme=world.theme)
world.grid_question3 = Question.objects.create(text='Second Column Question - Q3', export_label='Second Column Question - Q3', UID='C00023', answer_type='Number',
theme=world.theme)
world.grid_question4 = Question.objects.create(text='Third Column Question - Q4',
export_label='Third Column Question - Q4', UID='C00024', answer_type='Number',
theme=world.theme)
world.grid_question5 = Question.objects.create(text='Fourth Column Question - Q5',
export_label='Fourth Column Question - Q5', UID='C00025', answer_type='Number',
theme=world.theme)
world.grid_question6 = Question.objects.create(text='Sixth Column Question - Q6',
export_label='Sixth Column Question - Q6', UID='C00026', answer_type='Number',
theme=world.theme)
@step(u'And I am editing that questionnaire')
def and_i_am_editing_that_questionnaire(step):
world.page = QuestionnairePage(world.browser, world.section1)
world.page.visit_url('/questionnaire/entry/%s/section/%s/' % (world.questionnaire.id, world.section1.id))
@step(u'Then I should see an option to add a new grid question to each subsection')
def then_i_should_see_an_option_to_add_a_new_grid_question_to_each_subsection(step):
assert world.page.is_element_present_by_id('id-create-grid-%s' % world.sub_section.id)
@step(u'When I choose to create a new grid question for a particular subsection')
def when_i_choose_to_create_a_new_grid_question_for_a_particular_subsection(step):
world.page.click_by_id('id-create-grid-%s' % world.sub_section.id)
@step(u'Then I should see modal allowing me to select the grid options')
def then_i_should_see_modal_allowing_me_to_select_the_grid_options(step):
world.page.is_text_present('Create Grid in Subsection')
@step(u'When I choose to create a grid with all options shown')
def when_i_choose_to_create_a_grid_with_all_options_shown(step):
world.page.select('type', 'display_all')
@step(u'When I choose to create an add-more type of grid')
def when_i_choose_to_create_an_add_more_type_of_grid(step):
world.page.select('type', 'allow_multiples')
@step(u'When I choose to create a hybrid type of grid')
def when_i_choose_to_create_a_hybrid_type_of_grid(step):
world.page.select('type', 'hybrid')
@step(u'When I select the primary questions and columns for the all options grid')
def when_i_select_the_primary_questions_and_columns(step):
world.page.select('primary_question', world.grid_question1.id)
world.page.select_by_id('column-0', world.grid_question2.id)
sleep(1)
world.browser.find_by_id('td-0').mouse_over()
world.page.click_by_id('add-column-0')
world.browser.find_by_id('td-1').mouse_over()
world.page.click_by_id('remove-column-1')
@step(u'When I select the primary questions and columns for the add-more grid')
def when_i_select_the_primary_questions_and_columns_for_the_add_more_grid(step):
when_i_select_the_primary_questions_and_columns(step)
@step(u'And I save my grid')
def and_i_save_my_grid(step):
world.page.click_by_id('save_grid_button')
@step(u'When I close the modal')
def when_i_close_the_modal(step):
world.page.click_by_id('close-create-grid-modal')
sleep(3)
world.grid = world.grid_question1.group()
@step(u'Then I should see the all-options shown grid created')
def then_i_should_that_grid_created_in_the_subsection_of_my_questionnaire(step):
assert world.page.is_element_present_by_id('delete-grid-%d' % world.grid.id)
world.page.is_text_present(world.grid_question1.text)
for option in world.grid_question1.options.all():
world.page.is_text_present(option.text)
world.page.is_text_present(world.grid_question2.text)
@step(u'Then I should see add-more grid created')
def then_i_should_see_the_grid_with_add_more_created(step):
assert world.page.is_element_present_by_id('delete-grid-%d' % world.grid.id)
world.page.is_text_present(world.grid_question1.text)
world.page.is_text_present("Choose One")
world.page.is_text_present(world.grid_question2.text)
world.page.is_text_present('Add More')
@step(u'Then I should see the hybrid grid created')
def then_i_should_see_the_hybrid_grid_created(step):
assert_true(world.page.is_element_present_by_id('delete-grid-%d' % world.grid.id))
world.page.is_text_present('Add More')
for option in world.grid_question1.options.all():
world.page.select('MultiChoice-0-response', option.id)
for i in range(1, 5):
world.page.is_text_present(eval("world.grid_question%d" % i).text)
@step(u'When I choose to remove the grid')
def when_i_choose_to_remove_the_grid(step):
world.page.click_by_id('delete-grid-%d' % world.grid.id)
@step(u'Then I should see a delete grid confirmation prompt')
def then_i_should_see_a_delete_grid_confirmation_prompt(step):
world.page.is_text_present('Confirm Delete Grid')
world.page.is_text_present('Are you sure you want to delete this grid?')
@step(u'When I choose to continue with the deletion of the grid')
def when_i_choose_to_continue_with_the_deletion_of_the_grid(step):
world.page.click_by_id('confirm-delete-grid-%d' % world.grid.id)
@step(u'Then I should see a message that the grid was successfully removed')
def then_i_should_see_a_message_that_the_grid_was_successfully_removed(step):
world.page.is_text_present('Grid successfully removed from questionnaire')
@step(u'And I should not see the grid in the questionnaire I am editing')
def and_i_should_not_see_the_grid_in_the_questionnaire_i_am_editing(step):
assert_true(world.page.is_element_not_present_by_id('delete-grid-%d' % world.grid.id))
for i in range(1, 4):
world.page.is_text_present(eval("world.grid_question%d" % i).text, status=False)
@step(u'When I choose a theme')
def when_i_select_a_theme(step):
world.page.select('theme', world.theme.id)
@step(u'When I select the hybrid primary question')
def when_i_select_the_primary_question(step):
world.page.select('primary_question', world.grid_question1.id)
@step(u'And I select the non-primary question at row "([^"]*)" column "([^"]*)"')
def and_i_select_the_non_primary_question_at_row_group1_column_group1(step, row, column):
world.hybrid_grid_questions = [[world.grid_question2],
[world.grid_question3, world.grid_question4],
[world.grid_question5]]
world.page.select_by_id('column_%s_%s'%(row, column), world.hybrid_grid_questions[int(row)][int(column)].id)
@step(u'And I add a new element on the right of row "([^"]*)" column "([^"]*)"')
def and_i_add_a_new_element_on_the_right_of_row_group1_column_group2(step, row, column):
row_column = (int(row), int(column))
world.browser.find_by_id('column_%s_%s' % row_column).mouse_over()
world.page.click_by_id('addElement_%s_%s' % row_column)
@step(u'Then I should not see the same element at row "([^"]*)" column "([^"]*)"')
def then_i_should_not_see_the_same_element_at_row_group1_column_group1(step, row, column):
world.page.is_element_not_present_by_id('column_%s_%s'%(row, column))
@step(u'And I add a new row from row "([^"]*)" column "([^"]*)"')
def and_i_add_a_new_row_at_group1(step, row, column):
row_column = (int(row), int(column))
sleep(1)
world.browser.find_by_id('column_%s_%s' % row_column).mouse_over()
world.page.click_by_id('addRow_%s_%s' % row_column)
@step(u'And I delete the element at row "([^"]*)" column "([^"]*)"')
def and_i_delete_the_element_at_row_group1_column_group1(step, row, column):
row_column = (int(row), int(column))
sleep(1)
world.browser.find_by_id('column_%s_%s' % row_column).mouse_over()
world.page.click_by_id('remove_%s_%s' % (row, column))
@step(u'Then I should not see the element at row "([^"]*)" column "([^"]*)"')
def then_i_should_not_see_the_element_at_row_group1_column_group1(step, row, column):
world.page.is_element_not_present_by_id('column_%s_%s' % (row, column))
@step(u'When I have a display all grid')
def when_i_have_a_display_all_grid(step):
world.display_all_group = QuestionGroupFactory(grid=True, display_all=True,
subsection=world.sub_section, order=1)
world.display_all_group.question.add(world.grid_question1, world.grid_question2, world.grid_question3)
QuestionGroupOrder.objects.create(order=1, question=world.grid_question1, question_group=world.display_all_group)
world.display_all_group.orders.create(order=2, question=world.grid_question2)
world.display_all_group.orders.create(order=3, question=world.grid_question3)
@step(u'When I click edit the display all grid')
def when_i_click_edit_the_display_all_hybrid_grid(step):
world.page.click_by_id('edit-grid-%s' % world.display_all_group.id)
@step(u'And I click move first question to the right')
def and_i_click_column_move_group1_question_to_the_left(step):
sleep(1)
world.browser.find_by_id('column-0').mouse_over()
world.page.click_by_id('move-question-%s-right' % world.grid_question2.id)
@step(u'And I choose to move the same question to the left')
def and_i_choose_to_move_the_same_question_further_left(step):
world.browser.find_by_id('column-1').mouse_over()
sleep(0.5)
world.page.click_by_id('move-question-%s-left' % world.grid_question2.id)
sleep(1)
@step(u'And I click update the grid')
def and_i_click_update_the_grid(step):
world.page.click_by_id('update_grid_button')
@step(u'Then I should see that the grid was updated successfully')
def then_i_should_see_that_the_grid_was_updated_successfully(step):
world.page.is_text_present('The grid was updated successfully.')
@step(u'Then I should see it moved to the right')
def then_i_should_see_it_moved_to_the_left(step):
world.page.assert_questions_ordered_in_edit_modal([world.grid_question3, world.grid_question2])
@step(u'Then I should see it moved back')
def then_i_should_see_it_moved_back(step):
world.page.assert_questions_ordered_in_edit_modal([world.grid_question2, world.grid_question3])
@step(u'When I close the edit grid modal')
def when_i_close_the_edit_grid_modal(step):
world.page.click_by_id('close-edit-grid-modal')
sleep(3)
@step(u'Then I should see the grid questions in their new order')
def then_i_should_see_the_grid_questions_in_their_new_order(step):
assert world.page.is_element_present_by_id('delete-grid-%d' % world.display_all_group.id)
world.page.assert_questions_ordered_in_entry([world.grid_question3, world.grid_question2], world.display_all_group)
@step(u'When I have a hybrid grid in that questionnaire')
def when_i_have_a_hybrid_all_grid(step):
world.hybrid_group = QuestionGroupFactory(grid=True, allow_multiples=True, hybrid=True,
subsection=world.sub_section, order=1)
world.hybrid_group.question.add(world.grid_question1, world.grid_question2, world.grid_question6)
world.hybrid_group.orders.create(order=1, question=world.grid_question1)
world.hybrid_group.orders.create(order=2, question=world.grid_question2)
world.hybrid_group.orders.create(order=3, question=world.grid_question6)
@step(u'When I choose to edit the hybrid grid')
def when_i_click_edit_the_hybrid_grid(step):
world.page.click_by_id('edit-grid-%s' % world.hybrid_group.id)
@step(u'Then I should see the hybrid grid with its questions in their new order')
def then_i_should_see_the_hybrid_grid_with_its_questions_in_their_new_order(step):
assert world.page.is_element_present_by_id('delete-grid-%d' % world.hybrid_group.id)
world.page.assert_questions_ordered_in_hybrid_grid_entry([world.grid_question1, world.grid_question2,
world.grid_question3, world.grid_question6],
world.hybrid_group)
@step(u'And I drag the first row to the second row')
def and_i_drag_the_first_row_to_the_second_row(step):
world.page.move_draggable_id_by_this_number_of_steps('sortable-row-0', 0)
sleep(3)
@step(u'Then I should see it moved to the second row')
def then_i_should_see_it_moved_to_the_second_row(step):
sleep(4)
world.page.assert_row_moved_to(position=1)
@step(u'And I choose to drag the same row to the third row')
def and_i_choose_to_drag_the_same_row_to_the_third_row(step):
world.page.move_draggable_id_by_this_number_of_steps('sortable-row-1', 2)
sleep(1)
@step(u'Then I should see it moved to the third row')
def then_i_should_see_it_moved_to_the_third_row(step):
world.page.assert_row_moved_to(position=2)
@step(u'Then I should see the grid rows in their new order')
def then_i_should_see_the_grid_rows_in_their_new_order(step):
grid_options = world.browser.find_by_css('.grid-option')
expected_order_of_options = [world.option1.text, world.option3.text, world.option2.text]
grid_options_text = map(lambda opt: opt.text, grid_options)
assert_equals(expected_order_of_options, grid_options_text)
@step(u'And I drag the first hybrid row to the second hybrid row')
def and_i_drag_the_first_hybrid_row_to_the_second_hybrid_row(step):
world.page.move_draggable_id_by_this_number_of_steps('drag-row-0', 1)
sleep(3)
@step(u'And I drag the same hybrid row to the third hybrid row')
def and_i_drag_the_first_hybrid_row_to_the_second_hybrid_row(step):
world.page.move_draggable_id_by_this_number_of_steps('drag-row-0', 1)
sleep(3)
@step(u'Then I should see the hybrid grid rows in their new order')
def then_i_should_see_the_hybrid_grid_rows_in_their_new_order(step):
question_texts = world.browser.find_by_css('.question-text')
expected_order_of_options = [world.grid_question1.text, world.grid_question6.text, world.grid_question2.text]
grid_question_text = map(lambda opt: opt.text, question_texts)
assert_equals(expected_order_of_options, grid_question_text)
|
|
# -*- coding: utf-8 -*-
from hyper.packages.hyperframe.frame import (
Frame, DataFrame, RstStreamFrame, SettingsFrame,
PushPromiseFrame, PingFrame, WindowUpdateFrame, HeadersFrame,
ContinuationFrame, BlockedFrame, GoAwayFrame,
)
from hyper.packages.hpack.hpack_compat import Encoder, Decoder
from hyper.http20.connection import HTTP20Connection
from hyper.http20.stream import (
Stream, STATE_HALF_CLOSED_LOCAL, STATE_OPEN, MAX_CHUNK, STATE_CLOSED
)
from hyper.http20.response import HTTP20Response, HTTP20Push
from hyper.http20.exceptions import (
HPACKDecodingError, HPACKEncodingError, ProtocolError, ConnectionError,
)
from hyper.http20.window import FlowControlManager
from hyper.http20.util import (
combine_repeated_headers, split_repeated_headers, h2_safe_headers
)
from hyper.common.headers import HTTPHeaderMap
from hyper.compat import zlib_compressobj
from hyper.contrib import HTTP20Adapter
import hyper.http20.errors as errors
import errno
import os
import pytest
import socket
import zlib
from io import BytesIO
def decode_frame(frame_data):
f, length = Frame.parse_frame_header(frame_data[:9])
f.parse_body(memoryview(frame_data[9:9 + length]))
assert 9 + length == len(frame_data)
return f
class TestHyperConnection(object):
def test_connections_accept_hosts_and_ports(self):
c = HTTP20Connection(host='www.google.com', port=8080)
assert c.host =='www.google.com'
assert c.port == 8080
def test_connections_can_parse_hosts_and_ports(self):
c = HTTP20Connection('www.google.com:8080')
assert c.host == 'www.google.com'
assert c.port == 8080
def test_putrequest_establishes_new_stream(self):
c = HTTP20Connection("www.google.com")
stream_id = c.putrequest('GET', '/')
stream = c.streams[stream_id]
assert len(c.streams) == 1
assert c.recent_stream is stream
def test_putrequest_autosets_headers(self):
c = HTTP20Connection("www.google.com")
c.putrequest('GET', '/')
s = c.recent_stream
assert s.headers == [
(':method', 'GET'),
(':scheme', 'https'),
(':authority', 'www.google.com'),
(':path', '/'),
]
def test_putheader_puts_headers(self):
c = HTTP20Connection("www.google.com")
c.putrequest('GET', '/')
c.putheader('name', 'value')
s = c.recent_stream
assert s.headers == [
(':method', 'GET'),
(':scheme', 'https'),
(':authority', 'www.google.com'),
(':path', '/'),
('name', 'value'),
]
def test_endheaders_sends_data(self):
frames = []
def data_callback(frame):
frames.append(frame)
c = HTTP20Connection('www.google.com')
c._sock = DummySocket()
c._send_cb = data_callback
c.putrequest('GET', '/')
c.endheaders()
assert len(frames) == 1
f = frames[0]
assert isinstance(f, HeadersFrame)
def test_we_can_send_data_using_endheaders(self):
frames = []
def data_callback(frame):
frames.append(frame)
c = HTTP20Connection('www.google.com')
c._sock = DummySocket()
c._send_cb = data_callback
c.putrequest('GET', '/')
c.endheaders(message_body=b'hello there', final=True)
assert len(frames) == 2
assert isinstance(frames[0], HeadersFrame)
assert frames[0].flags == set(['END_HEADERS'])
assert isinstance(frames[1], DataFrame)
assert frames[1].data == b'hello there'
assert frames[1].flags == set(['END_STREAM'])
def test_that_we_correctly_send_over_the_socket(self):
sock = DummySocket()
c = HTTP20Connection('www.google.com')
c._sock = sock
c.putrequest('GET', '/')
c.endheaders(message_body=b'hello there', final=True)
# Don't bother testing that the serialization was ok, that should be
# fine.
assert len(sock.queue) == 2
# Confirm the window got shrunk.
assert c._out_flow_control_window == 65535 - len(b'hello there')
def test_we_can_read_from_the_socket(self):
sock = DummySocket()
sock.buffer = BytesIO(b'\x00\x00\x08\x00\x01\x00\x00\x00\x01testdata')
c = HTTP20Connection('www.google.com')
c._sock = sock
c.putrequest('GET', '/')
c.endheaders()
c._recv_cb()
s = c.recent_stream
assert s.data == [b'testdata']
def test_we_can_read_fitfully_from_the_socket(self):
sock = DummyFitfullySocket()
sock.buffer = BytesIO(
b'\x00\x00\x18\x00\x01\x00\x00\x00\x01'
b'testdata'
b'+payload'
)
c = HTTP20Connection('www.google.com')
c._sock = sock
c.putrequest('GET', '/')
c.endheaders()
c._recv_cb()
s = c.recent_stream
assert s.data == [b'testdata+payload']
def test_putrequest_sends_data(self):
sock = DummySocket()
c = HTTP20Connection('www.google.com')
c._sock = sock
c.request(
'GET',
'/',
body='hello',
headers={'Content-Type': 'application/json'}
)
# The socket should have received one headers frame and one body frame.
assert len(sock.queue) == 2
assert c._out_flow_control_window == 65535 - len(b'hello')
def test_closed_connections_are_reset(self):
c = HTTP20Connection('www.google.com')
c._sock = DummySocket()
encoder = c.encoder
decoder = c.decoder
wm = c.window_manager
c.request('GET', '/')
c.close()
assert c._sock is None
assert not c.streams
assert c.recent_stream is None
assert c.next_stream_id == 1
assert c.encoder is not encoder
assert c.decoder is not decoder
assert c._settings == {
SettingsFrame.INITIAL_WINDOW_SIZE: 65535,
}
assert c._out_flow_control_window == 65535
assert c.window_manager is not wm
def test_connection_doesnt_send_window_update_on_zero_length_data_frame(self):
# Prepare a socket with a data frame in it that has no length.
sock = DummySocket()
sock.buffer = BytesIO(DataFrame(1).serialize())
c = HTTP20Connection('www.google.com')
c._sock = sock
# We open a request here just to allocate a stream, but we throw away
# the frames it sends.
c.request('GET', '/')
sock.queue = []
# Read the frame.
c._recv_cb()
# No frame should have been sent on the connection.
assert len(sock.queue) == 0
def test_streams_are_cleared_from_connections_on_close(self):
# Prepare a socket so we can open a stream.
sock = DummySocket()
c = HTTP20Connection('www.google.com')
c._sock = sock
# Open a request (which creates a stream)
c.request('GET', '/')
# Close the stream.
c.streams[1].close()
# There should be nothing left, but the next stream ID should be
# unchanged.
assert not c.streams
assert c.next_stream_id == 3
def test_connections_increment_send_window_properly(self):
f = WindowUpdateFrame(0)
f.window_increment = 1000
c = HTTP20Connection('www.google.com')
c._sock = DummySocket()
# 'Receive' the WINDOWUPDATE frame.
c.receive_frame(f)
assert c._out_flow_control_window == 65535 + 1000
def test_connections_handle_resizing_header_tables_properly(self):
sock = DummySocket()
f = SettingsFrame(0)
f.settings[SettingsFrame.HEADER_TABLE_SIZE] = 1024
c = HTTP20Connection('www.google.com')
c._sock = sock
# 'Receive' the SETTINGS frame.
c.receive_frame(f)
# Confirm that the setting is stored and the header table shrunk.
assert c._settings[SettingsFrame.HEADER_TABLE_SIZE] == 1024
# Confirm we got a SETTINGS ACK.
f2 = decode_frame(sock.queue[0])
assert isinstance(f2, SettingsFrame)
assert f2.stream_id == 0
assert f2.flags == set(['ACK'])
def test_read_headers_out_of_order(self):
# If header blocks aren't decoded in the same order they're received,
# regardless of the stream they belong to, the decoder state will become
# corrupted.
e = Encoder()
h1 = HeadersFrame(1)
h1.data = e.encode({':status': 200, 'content-type': 'foo/bar'})
h1.flags |= set(['END_HEADERS', 'END_STREAM'])
h3 = HeadersFrame(3)
h3.data = e.encode({':status': 200, 'content-type': 'baz/qux'})
h3.flags |= set(['END_HEADERS', 'END_STREAM'])
sock = DummySocket()
sock.buffer = BytesIO(h1.serialize() + h3.serialize())
c = HTTP20Connection('www.google.com')
c._sock = sock
r1 = c.request('GET', '/a')
r3 = c.request('GET', '/b')
assert c.get_response(r3).headers == HTTPHeaderMap([('content-type', 'baz/qux')])
assert c.get_response(r1).headers == HTTPHeaderMap([('content-type', 'foo/bar')])
def test_headers_with_continuation(self):
e = Encoder()
header_data = e.encode(
{':status': 200, 'content-type': 'foo/bar', 'content-length': '0'}
)
h = HeadersFrame(1)
h.data = header_data[0:int(len(header_data)/2)]
c = ContinuationFrame(1)
c.data = header_data[int(len(header_data)/2):]
c.flags |= set(['END_HEADERS', 'END_STREAM'])
sock = DummySocket()
sock.buffer = BytesIO(h.serialize() + c.serialize())
c = HTTP20Connection('www.google.com')
c._sock = sock
r = c.request('GET', '/')
assert set(c.get_response(r).headers.iter_raw()) == set([(b'content-type', b'foo/bar'), (b'content-length', b'0')])
def test_receive_unexpected_frame(self):
# RST_STREAM frames are never defined on connections, so send one of
# those.
c = HTTP20Connection('www.google.com')
f = RstStreamFrame(1)
with pytest.raises(ValueError):
c.receive_frame(f)
def test_send_tolerate_peer_gone(self):
class ErrorSocket(DummySocket):
def send(self, data):
raise socket.error(errno.EPIPE)
c = HTTP20Connection('www.google.com')
c._sock = ErrorSocket()
f = SettingsFrame(0)
with pytest.raises(socket.error):
c._send_cb(f, False)
c._sock = DummySocket()
c._send_cb(f, True) # shouldn't raise an error
def test_window_increments_appropriately(self):
e = Encoder()
h = HeadersFrame(1)
h.data = e.encode({':status': 200, 'content-type': 'foo/bar'})
h.flags = set(['END_HEADERS'])
d = DataFrame(1)
d.data = b'hi there sir'
d2 = DataFrame(1)
d2.data = b'hi there sir again'
d2.flags = set(['END_STREAM'])
sock = DummySocket()
sock.buffer = BytesIO(h.serialize() + d.serialize() + d2.serialize())
c = HTTP20Connection('www.google.com')
c._sock = sock
c.window_manager.window_size = 1000
c.window_manager.initial_window_size = 1000
c.request('GET', '/')
resp = c.get_response()
resp.read()
queue = list(map(decode_frame, map(memoryview, sock.queue)))
assert len(queue) == 3 # one headers frame, two window update frames.
assert isinstance(queue[1], WindowUpdateFrame)
assert queue[1].window_increment == len(b'hi there sir')
assert isinstance(queue[2], WindowUpdateFrame)
assert queue[2].window_increment == len(b'hi there sir again')
def test_ping_with_ack_ignored(self):
c = HTTP20Connection('www.google.com')
f = PingFrame(0)
f.flags = set(['ACK'])
f.opaque_data = b'12345678'
def data_cb(frame, tolerate_peer_gone=False):
assert False, 'should not be called'
c._send_cb = data_cb
c.receive_frame(f)
def test_ping_without_ack_gets_reply(self):
c = HTTP20Connection('www.google.com')
f = PingFrame(0)
f.opaque_data = b'12345678'
frames = []
def data_cb(frame, tolerate_peer_gone=False):
frames.append(frame)
c._send_cb = data_cb
c.receive_frame(f)
assert len(frames) == 1
assert frames[0].type == PingFrame.type
assert frames[0].flags == set(['ACK'])
assert frames[0].opaque_data == b'12345678'
def test_blocked_causes_window_updates(self):
frames = []
def data_cb(frame, *args):
frames.append(frame)
c = HTTP20Connection('www.google.com')
c._send_cb = data_cb
# Change the window size.
c.window_manager.window_size = 60000
# Provide a BLOCKED frame.
f = BlockedFrame(1)
c.receive_frame(f)
assert len(frames) == 1
assert frames[0].type == WindowUpdateFrame.type
assert frames[0].window_increment == 5535
class TestServerPush(object):
def setup_method(self, method):
self.frames = []
self.encoder = Encoder()
self.conn = None
def add_push_frame(self, stream_id, promised_stream_id, headers, end_block=True):
frame = PushPromiseFrame(stream_id)
frame.promised_stream_id = promised_stream_id
frame.data = self.encoder.encode(headers)
if end_block:
frame.flags.add('END_HEADERS')
self.frames.append(frame)
def add_headers_frame(self, stream_id, headers, end_block=True, end_stream=False):
frame = HeadersFrame(stream_id)
frame.data = self.encoder.encode(headers)
if end_block:
frame.flags.add('END_HEADERS')
if end_stream:
frame.flags.add('END_STREAM')
self.frames.append(frame)
def add_data_frame(self, stream_id, data, end_stream=False):
frame = DataFrame(stream_id)
frame.data = data
if end_stream:
frame.flags.add('END_STREAM')
self.frames.append(frame)
def request(self):
self.conn = HTTP20Connection('www.google.com', enable_push=True)
self.conn._sock = DummySocket()
self.conn._sock.buffer = BytesIO(b''.join([frame.serialize() for frame in self.frames]))
self.conn.request('GET', '/')
def assert_response(self):
self.response = self.conn.get_response()
assert self.response.status == 200
assert dict(self.response.headers) == {b'content-type': [b'text/html']}
def assert_pushes(self):
self.pushes = list(self.conn.get_pushes())
assert len(self.pushes) == 1
assert self.pushes[0].method == b'GET'
assert self.pushes[0].scheme == b'https'
assert self.pushes[0].authority == b'www.google.com'
assert self.pushes[0].path == b'/'
expected_headers = {b'accept-encoding': [b'gzip']}
assert dict(self.pushes[0].request_headers) == expected_headers
def assert_push_response(self):
push_response = self.pushes[0].get_response()
assert push_response.status == 200
assert dict(push_response.headers) == {b'content-type': [b'application/javascript']}
assert push_response.read() == b'bar'
def test_promise_before_headers(self):
self.add_push_frame(1, 2, [(':method', 'GET'), (':path', '/'), (':authority', 'www.google.com'), (':scheme', 'https'), ('accept-encoding', 'gzip')])
self.add_headers_frame(1, [(':status', '200'), ('content-type', 'text/html')])
self.add_data_frame(1, b'foo', end_stream=True)
self.add_headers_frame(2, [(':status', '200'), ('content-type', 'application/javascript')])
self.add_data_frame(2, b'bar', end_stream=True)
self.request()
assert len(list(self.conn.get_pushes())) == 0
self.assert_response()
self.assert_pushes()
assert self.response.read() == b'foo'
self.assert_push_response()
def test_promise_after_headers(self):
self.add_headers_frame(1, [(':status', '200'), ('content-type', 'text/html')])
self.add_push_frame(1, 2, [(':method', 'GET'), (':path', '/'), (':authority', 'www.google.com'), (':scheme', 'https'), ('accept-encoding', 'gzip')])
self.add_data_frame(1, b'foo', end_stream=True)
self.add_headers_frame(2, [(':status', '200'), ('content-type', 'application/javascript')])
self.add_data_frame(2, b'bar', end_stream=True)
self.request()
assert len(list(self.conn.get_pushes())) == 0
self.assert_response()
assert len(list(self.conn.get_pushes())) == 0
assert self.response.read() == b'foo'
self.assert_pushes()
self.assert_push_response()
def test_promise_after_data(self):
self.add_headers_frame(1, [(':status', '200'), ('content-type', 'text/html')])
self.add_data_frame(1, b'fo')
self.add_push_frame(1, 2, [(':method', 'GET'), (':path', '/'), (':authority', 'www.google.com'), (':scheme', 'https'), ('accept-encoding', 'gzip')])
self.add_data_frame(1, b'o', end_stream=True)
self.add_headers_frame(2, [(':status', '200'), ('content-type', 'application/javascript')])
self.add_data_frame(2, b'bar', end_stream=True)
self.request()
assert len(list(self.conn.get_pushes())) == 0
self.assert_response()
assert len(list(self.conn.get_pushes())) == 0
assert self.response.read() == b'foo'
self.assert_pushes()
self.assert_push_response()
def test_capture_all_promises(self):
self.add_push_frame(1, 2, [(':method', 'GET'), (':path', '/one'), (':authority', 'www.google.com'), (':scheme', 'https'), ('accept-encoding', 'gzip')])
self.add_headers_frame(1, [(':status', '200'), ('content-type', 'text/html')])
self.add_push_frame(1, 4, [(':method', 'GET'), (':path', '/two'), (':authority', 'www.google.com'), (':scheme', 'https'), ('accept-encoding', 'gzip')])
self.add_data_frame(1, b'foo', end_stream=True)
self.add_headers_frame(4, [(':status', '200'), ('content-type', 'application/javascript')])
self.add_headers_frame(2, [(':status', '200'), ('content-type', 'application/javascript')])
self.add_data_frame(4, b'two', end_stream=True)
self.add_data_frame(2, b'one', end_stream=True)
self.request()
assert len(list(self.conn.get_pushes())) == 0
pushes = list(self.conn.get_pushes(capture_all=True))
assert len(pushes) == 2
assert pushes[0].path == b'/one'
assert pushes[1].path == b'/two'
assert pushes[0].get_response().read() == b'one'
assert pushes[1].get_response().read() == b'two'
self.assert_response()
assert self.response.read() == b'foo'
def test_cancel_push(self):
self.add_push_frame(1, 2, [(':method', 'GET'), (':path', '/'), (':authority', 'www.google.com'), (':scheme', 'https'), ('accept-encoding', 'gzip')])
self.add_headers_frame(1, [(':status', '200'), ('content-type', 'text/html')])
self.request()
self.conn.get_response()
list(self.conn.get_pushes())[0].cancel()
f = RstStreamFrame(2)
f.error_code = 8
assert self.conn._sock.queue[-1] == f.serialize()
def test_reset_pushed_streams_when_push_disabled(self):
self.add_push_frame(1, 2, [(':method', 'GET'), (':path', '/'), (':authority', 'www.google.com'), (':scheme', 'https'), ('accept-encoding', 'gzip')])
self.add_headers_frame(1, [(':status', '200'), ('content-type', 'text/html')])
self.request()
self.conn._enable_push = False
self.conn.get_response()
f = RstStreamFrame(2)
f.error_code = 7
assert self.conn._sock.queue[-1] == f.serialize()
def test_pushed_requests_ignore_unexpected_headers(self):
headers = HTTPHeaderMap([
(':scheme', 'http'),
(':method', 'get'),
(':authority', 'google.com'),
(':path', '/'),
(':reserved', 'no'),
('no', 'no'),
])
p = HTTP20Push(headers, DummyStream(b''))
assert p.request_headers == HTTPHeaderMap([('no', 'no')])
class TestHyperStream(object):
def test_streams_have_ids(self):
s = Stream(1, None, None, None, None, None, None)
assert s.stream_id == 1
def test_streams_initially_have_no_headers(self):
s = Stream(1, None, None, None, None, None, None)
assert s.headers == []
def test_streams_can_have_headers(self):
s = Stream(1, None, None, None, None, None, None)
s.add_header("name", "value")
assert s.headers == [("name", "value")]
def test_stream_opening_sends_headers(self):
def data_callback(frame):
assert isinstance(frame, HeadersFrame)
assert frame.data == 'testkeyTestVal'
assert frame.flags == set(['END_STREAM', 'END_HEADERS'])
s = Stream(1, data_callback, None, None, NullEncoder, None, None)
s.add_header("TestKey", "TestVal")
s.open(True)
assert s.state == STATE_HALF_CLOSED_LOCAL
def test_file_objects_can_be_sent(self):
def data_callback(frame):
assert isinstance(frame, DataFrame)
assert frame.data == b'Hi there!'
assert frame.flags == set(['END_STREAM'])
s = Stream(1, data_callback, None, None, NullEncoder, None, None)
s.state = STATE_OPEN
s.send_data(BytesIO(b'Hi there!'), True)
assert s.state == STATE_HALF_CLOSED_LOCAL
assert s._out_flow_control_window == 65535 - len(b'Hi there!')
def test_large_file_objects_are_broken_into_chunks(self):
frame_count = [0]
recent_frame = [None]
def data_callback(frame):
assert isinstance(frame, DataFrame)
assert len(frame.data) <= MAX_CHUNK
frame_count[0] += 1
recent_frame[0] = frame
data = b'test' * (MAX_CHUNK + 1)
s = Stream(1, data_callback, None, None, NullEncoder, None, None)
s.state = STATE_OPEN
s.send_data(BytesIO(data), True)
assert s.state == STATE_HALF_CLOSED_LOCAL
assert recent_frame[0].flags == set(['END_STREAM'])
assert frame_count[0] == 5
assert s._out_flow_control_window == 65535 - len(data)
def test_bytestrings_can_be_sent(self):
def data_callback(frame):
assert isinstance(frame, DataFrame)
assert frame.data == b'Hi there!'
assert frame.flags == set(['END_STREAM'])
s = Stream(1, data_callback, None, None, NullEncoder, None, None)
s.state = STATE_OPEN
s.send_data(b'Hi there!', True)
assert s.state == STATE_HALF_CLOSED_LOCAL
assert s._out_flow_control_window == 65535 - len(b'Hi there!')
def test_long_bytestrings_are_split(self):
frame_count = [0]
recent_frame = [None]
def data_callback(frame):
assert isinstance(frame, DataFrame)
assert len(frame.data) <= MAX_CHUNK
frame_count[0] += 1
recent_frame[0] = frame
data = b'test' * (MAX_CHUNK + 1)
s = Stream(1, data_callback, None, None, NullEncoder, None, None)
s.state = STATE_OPEN
s.send_data(data, True)
assert s.state == STATE_HALF_CLOSED_LOCAL
assert recent_frame[0].flags == set(['END_STREAM'])
assert frame_count[0] == 5
assert s._out_flow_control_window == 65535 - len(data)
def test_windowupdate_frames_update_windows(self):
s = Stream(1, None, None, None, None, None, None)
f = WindowUpdateFrame(1)
f.window_increment = 1000
s.receive_frame(f)
assert s._out_flow_control_window == 65535 + 1000
def test_flow_control_manager_update_includes_padding(self):
out_frames = []
in_frames = []
def send_cb(frame):
out_frames.append(frame)
def recv_cb(s):
def inner():
s.receive_frame(in_frames.pop(0))
return inner
start_window = 65535
s = Stream(1, send_cb, None, None, None, None, FlowControlManager(start_window))
s._recv_cb = recv_cb(s)
s.state = STATE_HALF_CLOSED_LOCAL
# Provide two data frames to read.
f = DataFrame(1)
f.data = b'hi there!'
f.pad_length = 10
f.flags.add('END_STREAM')
in_frames.append(f)
data = s._read()
assert data == b'hi there!'
assert s._in_window_manager.window_size == start_window - f.pad_length - len(data) - 1
def test_blocked_frames_cause_window_updates(self):
out_frames = []
def send_cb(frame, *args):
out_frames.append(frame)
start_window = 65535
s = Stream(1, send_cb, None, None, None, None, FlowControlManager(start_window))
s._data_cb = send_cb
s.state = STATE_HALF_CLOSED_LOCAL
# Change the window size.
s._in_window_manager.window_size = 60000
# Provide a BLOCKED frame.
f = BlockedFrame(1)
s.receive_frame(f)
assert len(out_frames) == 1
assert out_frames[0].type == WindowUpdateFrame.type
assert out_frames[0].window_increment == 5535
def test_stream_reading_works(self):
out_frames = []
in_frames = []
def send_cb(frame, tolerate_peer_gone=False):
out_frames.append(frame)
def recv_cb(s):
def inner():
s.receive_frame(in_frames.pop(0))
return inner
s = Stream(1, send_cb, None, None, None, None, FlowControlManager(65535))
s._recv_cb = recv_cb(s)
s.state = STATE_HALF_CLOSED_LOCAL
# Provide a data frame to read.
f = DataFrame(1)
f.data = b'hi there!'
f.flags.add('END_STREAM')
in_frames.append(f)
data = s._read()
assert data == b'hi there!'
assert len(out_frames) == 0
def test_can_read_multiple_frames_from_streams(self):
out_frames = []
in_frames = []
def send_cb(frame, tolerate_peer_gone=False):
out_frames.append(frame)
def recv_cb(s):
def inner():
s.receive_frame(in_frames.pop(0))
return inner
s = Stream(1, send_cb, None, None, None, None, FlowControlManager(800))
s._recv_cb = recv_cb(s)
s.state = STATE_HALF_CLOSED_LOCAL
# Provide two data frames to read.
f = DataFrame(1)
f.data = b'hi there!'
in_frames.append(f)
f = DataFrame(1)
f.data = b'hi there again!'
f.flags.add('END_STREAM')
in_frames.append(f)
data = s._read()
assert data == b'hi there!hi there again!'
assert len(out_frames) == 1
assert isinstance(out_frames[0], WindowUpdateFrame)
assert out_frames[0].window_increment == len(b'hi there!')
def test_partial_reads_from_streams(self):
out_frames = []
in_frames = []
def send_cb(frame, tolerate_peer_gone=False):
out_frames.append(frame)
def recv_cb(s):
def inner():
s.receive_frame(in_frames.pop(0))
return inner
s = Stream(1, send_cb, None, None, None, None, FlowControlManager(800))
s._recv_cb = recv_cb(s)
s.state = STATE_HALF_CLOSED_LOCAL
# Provide two data frames to read.
f = DataFrame(1)
f.data = b'hi there!'
in_frames.append(f)
f = DataFrame(1)
f.data = b'hi there again!'
f.flags.add('END_STREAM')
in_frames.append(f)
# We'll get the entire first frame.
data = s._read(4)
assert data == b'hi there!'
assert len(out_frames) == 1
# Now we'll get the entire of the second frame.
data = s._read(4)
assert data == b'hi there again!'
assert len(out_frames) == 1
assert s.state == STATE_CLOSED
def test_can_receive_continuation_frame_after_end_stream(self):
s = Stream(1, None, None, None, None, None, FlowControlManager(65535))
f = HeadersFrame(1)
f.data = 'hi there'
f.flags = set('END_STREAM')
f2 = ContinuationFrame(1)
f2.data = ' sir'
f2.flags = set('END_HEADERS')
s.receive_frame(f)
s.receive_frame(f2)
def test_receive_unexpected_frame(self):
# SETTINGS frames are never defined on streams, so send one of those.
s = Stream(1, None, None, None, None, None, None)
f = SettingsFrame(0)
with pytest.raises(ValueError):
s.receive_frame(f)
def test_can_receive_trailers(self):
headers = [('a', 'b'), ('c', 'd'), (':status', '200')]
trailers = [('e', 'f'), ('g', 'h')]
s = Stream(1, None, None, None, None, FixedDecoder(headers), None)
s.state = STATE_HALF_CLOSED_LOCAL
# Provide the first HEADERS frame.
f = HeadersFrame(1)
f.data = b'hi there!'
f.flags.add('END_HEADERS')
s.receive_frame(f)
assert s.response_headers == HTTPHeaderMap(headers)
# Now, replace the dummy decoder to ensure we get a new header block.
s._decoder = FixedDecoder(trailers)
# Provide the trailers.
f = HeadersFrame(1)
f.data = b'hi there again!'
f.flags.add('END_STREAM')
f.flags.add('END_HEADERS')
s.receive_frame(f)
# Now, check the trailers.
assert s.response_trailers == HTTPHeaderMap(trailers)
# Confirm we closed the stream.
assert s.state == STATE_CLOSED
def test_cannot_receive_three_header_blocks(self):
first = [('a', 'b'), ('c', 'd'), (':status', '200')]
s = Stream(1, None, None, None, None, FixedDecoder(first), None)
s.state = STATE_HALF_CLOSED_LOCAL
# Provide the first two header frames.
f = HeadersFrame(1)
f.data = b'hi there!'
f.flags.add('END_HEADERS')
s.receive_frame(f)
f = HeadersFrame(1)
f.data = b'hi there again!'
f.flags.add('END_HEADERS')
s.receive_frame(f)
# Provide the third. This one blows up.
f = HeadersFrame(1)
f.data = b'hi there again!'
f.flags.add('END_STREAM')
f.flags.add('END_HEADERS')
with pytest.raises(ProtocolError):
s.receive_frame(f)
def test_reading_trailers_early_reads_all_data(self):
in_frames = []
headers = [('a', 'b'), ('c', 'd'), (':status', '200')]
trailers = [('e', 'f'), ('g', 'h')]
def recv_cb(s):
def inner():
s.receive_frame(in_frames.pop(0))
return inner
s = Stream(1, None, None, None, None, FixedDecoder(headers), FlowControlManager(65535))
s._recv_cb = recv_cb(s)
s.state = STATE_HALF_CLOSED_LOCAL
# Provide the first HEADERS frame.
f = HeadersFrame(1)
f.data = b'hi there!'
f.flags.add('END_HEADERS')
in_frames.append(f)
# Provide some data.
f = DataFrame(1)
f.data = b'testdata'
in_frames.append(f)
# Provide the trailers.
f = HeadersFrame(1)
f.data = b'hi there again!'
f.flags.add('END_STREAM')
f.flags.add('END_HEADERS')
in_frames.append(f)
# Begin by reading the first headers.
assert s.getheaders() == HTTPHeaderMap(headers)
# Now, replace the dummy decoder to ensure we get a new header block.
s._decoder = FixedDecoder(trailers)
# Ask for the trailers. This should also read the data frames.
assert s.gettrailers() == HTTPHeaderMap(trailers)
assert s.data == [b'testdata']
def test_can_read_single_frames_from_streams(self):
out_frames = []
in_frames = []
def send_cb(frame, tolerate_peer_gone=False):
out_frames.append(frame)
def recv_cb(s):
def inner():
s.receive_frame(in_frames.pop(0))
return inner
s = Stream(1, send_cb, None, None, None, None, FlowControlManager(800))
s._recv_cb = recv_cb(s)
s.state = STATE_HALF_CLOSED_LOCAL
# Provide two data frames to read.
f = DataFrame(1)
f.data = b'hi there!'
in_frames.append(f)
f = DataFrame(1)
f.data = b'hi there again!'
f.flags.add('END_STREAM')
in_frames.append(f)
data = s._read_one_frame()
assert data == b'hi there!'
data = s._read_one_frame()
assert data == b'hi there again!'
data = s._read_one_frame()
assert data is None
data = s._read()
assert data == b''
class TestResponse(object):
def test_status_is_stripped_from_headers(self):
headers = HTTPHeaderMap([(':status', '200')])
resp = HTTP20Response(headers, None)
assert resp.status == 200
assert not resp.headers
def test_response_transparently_decrypts_gzip(self):
headers = HTTPHeaderMap([(':status', '200'), ('content-encoding', 'gzip')])
c = zlib_compressobj(wbits=24)
body = c.compress(b'this is test data')
body += c.flush()
resp = HTTP20Response(headers, DummyStream(body))
assert resp.read() == b'this is test data'
def test_response_transparently_decrypts_real_deflate(self):
headers = HTTPHeaderMap([(':status', '200'), ('content-encoding', 'deflate')])
c = zlib_compressobj(wbits=zlib.MAX_WBITS)
body = c.compress(b'this is test data')
body += c.flush()
resp = HTTP20Response(headers, DummyStream(body))
assert resp.read() == b'this is test data'
def test_response_transparently_decrypts_wrong_deflate(self):
headers = HTTPHeaderMap([(':status', '200'), ('content-encoding', 'deflate')])
c = zlib_compressobj(wbits=-zlib.MAX_WBITS)
body = c.compress(b'this is test data')
body += c.flush()
resp = HTTP20Response(headers, DummyStream(body))
assert resp.read() == b'this is test data'
def test_response_calls_stream_close(self):
headers = HTTPHeaderMap([(':status', '200')])
stream = DummyStream('')
resp = HTTP20Response(headers, stream)
resp.close()
assert stream.closed
def test_responses_are_context_managers(self):
headers = HTTPHeaderMap([(':status', '200')])
stream = DummyStream('')
with HTTP20Response(headers, stream) as resp:
pass
assert stream.closed
def test_read_small_chunks(self):
headers = HTTPHeaderMap([(':status', '200')])
stream = DummyStream(b'1234567890')
chunks = [b'12', b'34', b'56', b'78', b'90']
resp = HTTP20Response(headers, stream)
for chunk in chunks:
assert resp.read(2) == chunk
assert resp.read() == b''
def test_read_buffered(self):
headers = HTTPHeaderMap([(':status', '200')])
stream = DummyStream(b'1234567890')
chunks = [b'12', b'34', b'56', b'78', b'90'] * 2
resp = HTTP20Response(headers, stream)
resp._data_buffer = b'1234567890'
for chunk in chunks:
assert resp.read(2) == chunk
assert resp.read() == b''
def test_getheader(self):
headers = HTTPHeaderMap([(':status', '200'), ('content-type', 'application/json')])
stream = DummyStream(b'')
resp = HTTP20Response(headers, stream)
assert resp.headers[b'content-type'] == [b'application/json']
def test_response_ignores_unknown_headers(self):
headers = HTTPHeaderMap([(':status', '200'), (':reserved', 'yes'), ('no', 'no')])
stream = DummyStream(b'')
resp = HTTP20Response(headers, stream)
assert resp.headers == HTTPHeaderMap([('no', 'no')])
def test_fileno_not_implemented(self):
headers = HTTPHeaderMap([(':status', '200')])
resp = HTTP20Response(headers, DummyStream(b''))
with pytest.raises(NotImplementedError):
resp.fileno()
def test_trailers_are_read(self):
headers = HTTPHeaderMap([(':status', '200')])
trailers = HTTPHeaderMap([('a', 'b'), ('c', 'd')])
stream = DummyStream(b'', trailers=trailers)
resp = HTTP20Response(headers, stream)
assert resp.trailers == trailers
assert resp.trailers['a'] == [b'b']
assert resp.trailers['c'] == [b'd']
def test_read_frames(self):
headers = HTTPHeaderMap([(':status', '200')])
stream = DummyStream(None)
chunks = [b'12', b'3456', b'78', b'9']
stream.data_frames = chunks
resp = HTTP20Response(headers, stream)
for recv, expected in zip(resp.read_chunked(), chunks[:]):
assert recv == expected
def test_read_compressed_frames(self):
headers = HTTPHeaderMap([(':status', '200'), ('content-encoding', 'gzip')])
c = zlib_compressobj(wbits=24)
body = c.compress(b'this is test data')
body += c.flush()
stream = DummyStream(None)
chunks = [body[x:x+2] for x in range(0, len(body), 2)]
stream.data_frames = chunks
resp = HTTP20Response(headers, stream)
received = b''
for chunk in resp.read_chunked():
received += chunk
assert received == b'this is test data'
class TestHTTP20Adapter(object):
def test_adapter_reuses_connections(self):
a = HTTP20Adapter()
conn1 = a.get_connection('http2bin.org', 80, 'http')
conn2 = a.get_connection('http2bin.org', 80, 'http')
assert conn1 is conn2
class TestUtilities(object):
def test_combining_repeated_headers(self):
test_headers = [
(b'key1', b'val1'),
(b'key2', b'val2'),
(b'key1', b'val1.1'),
(b'key3', b'val3'),
(b'key2', b'val2.1'),
(b'key1', b'val1.2'),
]
expected = [
(b'key1', b'val1\x00val1.1\x00val1.2'),
(b'key2', b'val2\x00val2.1'),
(b'key3', b'val3'),
]
assert expected == combine_repeated_headers(test_headers)
def test_splitting_repeated_headers(self):
test_headers = [
(b'key1', b'val1\x00val1.1\x00val1.2'),
(b'key2', b'val2\x00val2.1'),
(b'key3', b'val3'),
]
expected = {
b'key1': [b'val1', b'val1.1', b'val1.2'],
b'key2': [b'val2', b'val2.1'],
b'key3': [b'val3'],
}
assert expected == split_repeated_headers(test_headers)
def test_nghttp2_installs_correctly(self):
# This test is a debugging tool: if nghttp2 is being tested by Travis,
# we need to confirm it imports correctly. Hyper will normally hide the
# import failure, so let's discover it here.
# Alternatively, if we are *not* testing with nghttp2, this test should
# confirm that it's not available.
if os.environ.get('NGHTTP2'):
import nghttp2
else:
with pytest.raises(ImportError):
import nghttp2
assert True
def test_stripping_connection_header(self):
headers = [('one', 'two'), ('connection', 'close')]
stripped = [('one', 'two')]
assert h2_safe_headers(headers) == stripped
def test_stripping_related_headers(self):
headers = [
('one', 'two'), ('three', 'four'), ('five', 'six'),
('connection', 'close, three, five')
]
stripped = [('one', 'two')]
assert h2_safe_headers(headers) == stripped
def test_stripping_multiple_connection_headers(self):
headers = [
('one', 'two'), ('three', 'four'), ('five', 'six'),
('connection', 'close'),
('connection', 'three, five')
]
stripped = [('one', 'two')]
assert h2_safe_headers(headers) == stripped
def test_goaway_frame_PROTOCOL_ERROR(self):
f = GoAwayFrame(0)
# Set error code to PROTOCOL_ERROR
f.error_code = 1;
c = HTTP20Connection('www.google.com')
c._sock = DummySocket()
# 'Receive' the GOAWAY frame.
# Validate that the spec error name and description are used to throw
# the connection exception.
with pytest.raises(ConnectionError) as conn_err:
c.receive_frame(f)
err_msg = str(conn_err)
name, number, description = errors.get_data(1)
assert name in err_msg
assert number in err_msg
assert description in err_msg
def test_goaway_frame_HTTP_1_1_REQUIRED(self):
f = GoAwayFrame(0)
# Set error code to HTTP_1_1_REQUIRED
f.error_code = 13;
c = HTTP20Connection('www.google.com')
c._sock = DummySocket()
# 'Receive' the GOAWAY frame.
# Validate that the spec error name and description are used to throw
# the connection exception.
with pytest.raises(ConnectionError) as conn_err:
c.receive_frame(f)
err_msg = str(conn_err)
name, number, description = errors.get_data(13)
assert name in err_msg
assert number in err_msg
assert description in err_msg
def test_goaway_frame_NO_ERROR(self):
f = GoAwayFrame(0)
# Set error code to NO_ERROR
f.error_code = 0;
c = HTTP20Connection('www.google.com')
c._sock = DummySocket()
# 'Receive' the GOAWAY frame.
# Test makes sure no exception is raised; error code 0 means we are
# dealing with a standard and graceful shutdown.
c.receive_frame(f)
def test_goaway_frame_invalid_error_code(self):
f = GoAwayFrame(0)
# Set error code to non existing error
f.error_code = 100;
f.additional_data = 'data about non existing error code';
c = HTTP20Connection('www.google.com')
c._sock = DummySocket()
# 'Receive' the GOAWAY frame.
# If the error code does not exist in the spec then the additional
# data is used instead.
with pytest.raises(ConnectionError) as conn_err:
c.receive_frame(f)
err_msg = str(conn_err)
with pytest.raises(ValueError):
name, number, description = errors.get_data(100)
assert 'data about non existing error code' in err_msg
assert str(f.error_code) in err_msg
def test_receive_unexpected_stream_id(self):
frames = []
def data_callback(frame):
frames.append(frame)
c = HTTP20Connection('www.google.com')
c._send_cb = data_callback
f = DataFrame(2)
data = memoryview(b"hi there sir")
c._consume_frame_payload(f, data)
# If we receive an unexpected stream id then we cancel the stream
# by sending a reset stream that contains the protocol error code (1)
f = frames[0]
assert len(frames) == 1
assert f.stream_id == 2
assert isinstance(f, RstStreamFrame)
assert f.error_code == 1 # PROTOCOL_ERROR
# Some utility classes for the tests.
class NullEncoder(object):
@staticmethod
def encode(headers):
return '\n'.join("%s%s" % (name, val) for name, val in headers)
class FixedDecoder(object):
def __init__(self, result):
self.result = result
def decode(self, headers):
return self.result
class DummySocket(object):
def __init__(self):
self.queue = []
self.buffer = BytesIO()
self.can_read = False
def send(self, data):
self.queue.append(data)
def recv(self, l):
return memoryview(self.buffer.read(l))
def close(self):
pass
class DummyFitfullySocket(DummySocket):
def recv(self, l):
length = l
if l != 9 and l >= 4:
length = int(round(l / 2))
return memoryview(self.buffer.read(length))
class DummyStream(object):
def __init__(self, data, trailers=None):
self.data = data
self.data_frames = []
self.closed = False
self.response_headers = {}
self._remote_closed = False
self.trailers = trailers
if self.trailers is None:
self.trailers = []
def _read(self, *args, **kwargs):
try:
read_len = min(args[0], len(self.data))
except IndexError:
read_len = len(self.data)
d = self.data[:read_len]
self.data = self.data[read_len:]
if not self.data:
self._remote_closed = True
return d
def _read_one_frame(self):
try:
return self.data_frames.pop(0)
except IndexError:
return None
def close(self):
if not self.closed:
self.closed = True
else:
assert False
def gettrailers(self):
return self.trailers
|
|
#
# Coldcard Electrum plugin main code.
#
#
import os, time, io
import traceback
from typing import TYPE_CHECKING, Optional
import struct
from electrum_mona import bip32
from electrum_mona.bip32 import BIP32Node, InvalidMasterKeyVersionBytes
from electrum_mona.i18n import _
from electrum_mona.plugin import Device, hook, runs_in_hwd_thread
from electrum_mona.keystore import Hardware_KeyStore, KeyStoreWithMPK
from electrum_mona.transaction import PartialTransaction
from electrum_mona.wallet import Standard_Wallet, Multisig_Wallet, Abstract_Wallet
from electrum_mona.util import bfh, bh2u, versiontuple, UserFacingException
from electrum_mona.base_wizard import ScriptTypeNotSupported
from electrum_mona.logging import get_logger
from ..hw_wallet import HW_PluginBase, HardwareClientBase
from ..hw_wallet.plugin import LibraryFoundButUnusable, only_hook_if_libraries_available
_logger = get_logger(__name__)
try:
import hid
from ckcc.protocol import CCProtocolPacker, CCProtocolUnpacker
from ckcc.protocol import CCProtoError, CCUserRefused, CCBusyError
from ckcc.constants import (MAX_MSG_LEN, MAX_BLK_LEN, MSG_SIGNING_MAX_LENGTH, MAX_TXN_LEN,
AF_CLASSIC, AF_P2SH, AF_P2WPKH, AF_P2WSH, AF_P2WPKH_P2SH, AF_P2WSH_P2SH)
from ckcc.client import ColdcardDevice, COINKITE_VID, CKCC_PID, CKCC_SIMULATOR_PATH
requirements_ok = True
class ElectrumColdcardDevice(ColdcardDevice):
# avoid use of pycoin for MiTM message signature test
def mitm_verify(self, sig, expect_xpub):
# verify a signature (65 bytes) over the session key, using the master bip32 node
# - customized to use specific EC library of Electrum.
pubkey = BIP32Node.from_xkey(expect_xpub).eckey
try:
pubkey.verify_message_hash(sig[1:65], self.session_key)
return True
except:
return False
except ImportError as e:
if not (isinstance(e, ModuleNotFoundError) and e.name == 'ckcc'):
_logger.exception('error importing coldcard plugin deps')
requirements_ok = False
COINKITE_VID = 0xd13e
CKCC_PID = 0xcc10
CKCC_SIMULATED_PID = CKCC_PID ^ 0x55aa
class CKCCClient(HardwareClientBase):
def __init__(self, plugin, handler, dev_path, *, is_simulator=False):
HardwareClientBase.__init__(self, plugin=plugin)
self.device = plugin.device
self.handler = handler
# if we know what the (xfp, xpub) "should be" then track it here
self._expected_device = None
if is_simulator:
self.dev = ElectrumColdcardDevice(dev_path, encrypt=True)
else:
# open the real HID device
hd = hid.device(path=dev_path)
hd.open_path(dev_path)
self.dev = ElectrumColdcardDevice(dev=hd, encrypt=True)
# NOTE: MiTM test is delayed until we have a hint as to what XPUB we
# should expect. It's also kinda slow.
def __repr__(self):
return '<CKCCClient: xfp=%s label=%r>' % (xfp2str(self.dev.master_fingerprint),
self.label())
@runs_in_hwd_thread
def verify_connection(self, expected_xfp: int, expected_xpub=None):
ex = (expected_xfp, expected_xpub)
if self._expected_device == ex:
# all is as expected
return
if expected_xpub is None:
expected_xpub = self.dev.master_xpub
if ((self._expected_device is not None)
or (self.dev.master_fingerprint != expected_xfp)
or (self.dev.master_xpub != expected_xpub)):
# probably indicating programing error, not hacking
_logger.info(f"xpubs. reported by device: {self.dev.master_xpub}. "
f"stored in file: {expected_xpub}")
raise RuntimeError("Expecting %s but that's not what's connected?!" %
xfp2str(expected_xfp))
# check signature over session key
# - mitm might have lied about xfp and xpub up to here
# - important that we use value capture at wallet creation time, not some value
# we read over USB today
self.dev.check_mitm(expected_xpub=expected_xpub)
self._expected_device = ex
if not getattr(self, 'ckcc_xpub', None):
self.ckcc_xpub = expected_xpub
_logger.info("Successfully verified against MiTM")
def is_pairable(self):
# can't do anything w/ devices that aren't setup (this code not normally reachable)
return bool(self.dev.master_xpub)
@runs_in_hwd_thread
def close(self):
# close the HID device (so can be reused)
self.dev.close()
self.dev = None
def is_initialized(self):
return bool(self.dev.master_xpub)
def label(self):
# 'label' of this Coldcard. Warning: gets saved into wallet file, which might
# not be encrypted, so better for privacy if based on xpub/fingerprint rather than
# USB serial number.
if self.dev.is_simulator:
lab = 'Coldcard Simulator ' + xfp2str(self.dev.master_fingerprint)
elif not self.dev.master_fingerprint:
# failback; not expected
lab = 'Coldcard #' + self.dev.serial
else:
lab = 'Coldcard ' + xfp2str(self.dev.master_fingerprint)
return lab
def manipulate_keystore_dict_during_wizard_setup(self, d: dict):
master_xpub = self.dev.master_xpub
if master_xpub is not None:
try:
node = BIP32Node.from_xkey(master_xpub)
except InvalidMasterKeyVersionBytes:
raise UserFacingException(
_('Invalid xpub magic. Make sure your {} device is set to the correct chain.').format(self.device) + ' ' +
_('You might have to unplug and plug it in again.')
) from None
d['ckcc_xpub'] = master_xpub
@runs_in_hwd_thread
def has_usable_connection_with_device(self):
# Do end-to-end ping test
try:
self.ping_check()
return True
except:
return False
@runs_in_hwd_thread
def get_xpub(self, bip32_path, xtype):
assert xtype in ColdcardPlugin.SUPPORTED_XTYPES
_logger.info('Derive xtype = %r' % xtype)
xpub = self.dev.send_recv(CCProtocolPacker.get_xpub(bip32_path), timeout=5000)
# TODO handle timeout?
# change type of xpub to the requested type
try:
node = BIP32Node.from_xkey(xpub)
except InvalidMasterKeyVersionBytes:
raise UserFacingException(_('Invalid xpub magic. Make sure your {} device is set to the correct chain.')
.format(self.device)) from None
if xtype != 'standard':
xpub = node._replace(xtype=xtype).to_xpub()
return xpub
@runs_in_hwd_thread
def ping_check(self):
# check connection is working
assert self.dev.session_key, 'not encrypted?'
req = b'1234 Electrum Plugin 4321' # free up to 59 bytes
try:
echo = self.dev.send_recv(CCProtocolPacker.ping(req))
assert echo == req
except:
raise RuntimeError("Communication trouble with Coldcard")
@runs_in_hwd_thread
def show_address(self, path, addr_fmt):
# prompt user w/ address, also returns it immediately.
return self.dev.send_recv(CCProtocolPacker.show_address(path, addr_fmt), timeout=None)
@runs_in_hwd_thread
def show_p2sh_address(self, *args, **kws):
# prompt user w/ p2sh address, also returns it immediately.
return self.dev.send_recv(CCProtocolPacker.show_p2sh_address(*args, **kws), timeout=None)
@runs_in_hwd_thread
def get_version(self):
# gives list of strings
return self.dev.send_recv(CCProtocolPacker.version(), timeout=1000).split('\n')
@runs_in_hwd_thread
def sign_message_start(self, path, msg):
# this starts the UX experience.
self.dev.send_recv(CCProtocolPacker.sign_message(msg, path), timeout=None)
@runs_in_hwd_thread
def sign_message_poll(self):
# poll device... if user has approved, will get tuple: (addr, sig) else None
return self.dev.send_recv(CCProtocolPacker.get_signed_msg(), timeout=None)
@runs_in_hwd_thread
def sign_transaction_start(self, raw_psbt: bytes, *, finalize: bool = False):
# Multiple steps to sign:
# - upload binary
# - start signing UX
# - wait for coldcard to complete process, or have it refused.
# - download resulting txn
assert 20 <= len(raw_psbt) < MAX_TXN_LEN, 'PSBT is too big'
dlen, chk = self.dev.upload_file(raw_psbt)
resp = self.dev.send_recv(CCProtocolPacker.sign_transaction(dlen, chk, finalize=finalize),
timeout=None)
if resp != None:
raise ValueError(resp)
@runs_in_hwd_thread
def sign_transaction_poll(self):
# poll device... if user has approved, will get tuple: (legnth, checksum) else None
return self.dev.send_recv(CCProtocolPacker.get_signed_txn(), timeout=None)
@runs_in_hwd_thread
def download_file(self, length, checksum, file_number=1):
# get a file
return self.dev.download_file(length, checksum, file_number=file_number)
class Coldcard_KeyStore(Hardware_KeyStore):
hw_type = 'coldcard'
device = 'Coldcard'
plugin: 'ColdcardPlugin'
def __init__(self, d):
Hardware_KeyStore.__init__(self, d)
# Errors and other user interaction is done through the wallet's
# handler. The handler is per-window and preserved across
# device reconnects
self.force_watching_only = False
self.ux_busy = False
# we need to know at least the fingerprint of the master xpub to verify against MiTM
# - device reports these value during encryption setup process
# - full xpub value now optional
self.ckcc_xpub = d.get('ckcc_xpub', None)
def dump(self):
# our additions to the stored data about keystore -- only during creation?
d = Hardware_KeyStore.dump(self)
d['ckcc_xpub'] = self.ckcc_xpub
return d
def get_xfp_int(self) -> int:
xfp = self.get_root_fingerprint()
assert xfp is not None
return xfp_int_from_xfp_bytes(bfh(xfp))
def get_client(self):
# called when user tries to do something like view address, sign somthing.
# - not called during probing/setup
# - will fail if indicated device can't produce the xpub (at derivation) expected
rv = self.plugin.get_client(self)
if rv:
xfp_int = self.get_xfp_int()
rv.verify_connection(xfp_int, self.ckcc_xpub)
return rv
def give_error(self, message, clear_client=False):
self.logger.info(message)
if not self.ux_busy:
self.handler.show_error(message)
else:
self.ux_busy = False
if clear_client:
self.client = None
raise UserFacingException(message)
def wrap_busy(func):
# decorator: function takes over the UX on the device.
def wrapper(self, *args, **kwargs):
try:
self.ux_busy = True
return func(self, *args, **kwargs)
finally:
self.ux_busy = False
return wrapper
def decrypt_message(self, pubkey, message, password):
raise UserFacingException(_('Encryption and decryption are currently not supported for {}').format(self.device))
@wrap_busy
def sign_message(self, sequence, message, password):
# Sign a message on device. Since we have big screen, of course we
# have to show the message unabiguously there first!
try:
msg = message.encode('ascii', errors='strict')
assert 1 <= len(msg) <= MSG_SIGNING_MAX_LENGTH
except (UnicodeError, AssertionError):
# there are other restrictions on message content,
# but let the device enforce and report those
self.handler.show_error('Only short (%d max) ASCII messages can be signed.'
% MSG_SIGNING_MAX_LENGTH)
return b''
path = self.get_derivation_prefix() + ("/%d/%d" % sequence)
try:
cl = self.get_client()
try:
self.handler.show_message("Signing message (using %s)..." % path)
cl.sign_message_start(path, msg)
while 1:
# How to kill some time, without locking UI?
time.sleep(0.250)
resp = cl.sign_message_poll()
if resp is not None:
break
finally:
self.handler.finished()
assert len(resp) == 2
addr, raw_sig = resp
# already encoded in Bitcoin fashion, binary.
assert 40 < len(raw_sig) <= 65
return raw_sig
except (CCUserRefused, CCBusyError) as exc:
self.handler.show_error(str(exc))
except CCProtoError as exc:
self.logger.exception('Error showing address')
self.handler.show_error('{}\n\n{}'.format(
_('Error showing address') + ':', str(exc)))
except Exception as e:
self.give_error(e, True)
# give empty bytes for error cases; it seems to clear the old signature box
return b''
@wrap_busy
def sign_transaction(self, tx, password):
# Upload PSBT for signing.
# - we can also work offline (without paired device present)
if tx.is_complete():
return
client = self.get_client()
assert client.dev.master_fingerprint == self.get_xfp_int()
raw_psbt = tx.serialize_as_bytes()
try:
try:
self.handler.show_message("Authorize Transaction...")
client.sign_transaction_start(raw_psbt)
while 1:
# How to kill some time, without locking UI?
time.sleep(0.250)
resp = client.sign_transaction_poll()
if resp is not None:
break
rlen, rsha = resp
# download the resulting txn.
raw_resp = client.download_file(rlen, rsha)
finally:
self.handler.finished()
except (CCUserRefused, CCBusyError) as exc:
self.logger.info(f'Did not sign: {exc}')
self.handler.show_error(str(exc))
return
except BaseException as e:
self.logger.exception('')
self.give_error(e, True)
return
tx2 = PartialTransaction.from_raw_psbt(raw_resp)
# apply partial signatures back into txn
tx.combine_with_other_psbt(tx2)
# caller's logic looks at tx now and if it's sufficiently signed,
# will send it if that's the user's intent.
@staticmethod
def _encode_txin_type(txin_type):
# Map from Electrum code names to our code numbers.
return {'standard': AF_CLASSIC, 'p2pkh': AF_CLASSIC,
'p2sh': AF_P2SH,
'p2wpkh-p2sh': AF_P2WPKH_P2SH,
'p2wpkh': AF_P2WPKH,
'p2wsh-p2sh': AF_P2WSH_P2SH,
'p2wsh': AF_P2WSH,
}[txin_type]
@wrap_busy
def show_address(self, sequence, txin_type):
client = self.get_client()
address_path = self.get_derivation_prefix()[2:] + "/%d/%d"%sequence
addr_fmt = self._encode_txin_type(txin_type)
try:
try:
self.handler.show_message(_("Showing address ..."))
dev_addr = client.show_address(address_path, addr_fmt)
# we could double check address here
finally:
self.handler.finished()
except CCProtoError as exc:
self.logger.exception('Error showing address')
self.handler.show_error('{}\n\n{}'.format(
_('Error showing address') + ':', str(exc)))
except BaseException as exc:
self.logger.exception('')
self.handler.show_error(exc)
@wrap_busy
def show_p2sh_address(self, M, script, xfp_paths, txin_type):
client = self.get_client()
addr_fmt = self._encode_txin_type(txin_type)
try:
try:
self.handler.show_message(_("Showing address ..."))
dev_addr = client.show_p2sh_address(M, xfp_paths, script, addr_fmt=addr_fmt)
# we could double check address here
finally:
self.handler.finished()
except CCProtoError as exc:
self.logger.exception('Error showing address')
self.handler.show_error('{}.\n{}\n\n{}'.format(
_('Error showing address'),
_('Make sure you have imported the correct wallet description '
'file on the device for this multisig wallet.'),
str(exc)))
except BaseException as exc:
self.logger.exception('')
self.handler.show_error(exc)
class ColdcardPlugin(HW_PluginBase):
keystore_class = Coldcard_KeyStore
minimum_library = (0, 7, 7)
DEVICE_IDS = [
(COINKITE_VID, CKCC_PID),
(COINKITE_VID, CKCC_SIMULATED_PID)
]
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
self.device_manager().register_devices(self.DEVICE_IDS, plugin=self)
self.device_manager().register_enumerate_func(self.detect_simulator)
def get_library_version(self):
import ckcc
try:
version = ckcc.__version__
except AttributeError:
version = 'unknown'
if requirements_ok:
return version
else:
raise LibraryFoundButUnusable(library_version=version)
def detect_simulator(self):
# if there is a simulator running on this machine,
# return details about it so it's offered as a pairing choice
fn = CKCC_SIMULATOR_PATH
if os.path.exists(fn):
return [Device(path=fn,
interface_number=-1,
id_=fn,
product_key=(COINKITE_VID, CKCC_SIMULATED_PID),
usage_page=0,
transport_ui_string='simulator')]
return []
@runs_in_hwd_thread
def create_client(self, device, handler):
if handler:
self.handler = handler
# We are given a HID device, or at least some details about it.
# Not sure why not we aren't just given a HID library handle, but
# the 'path' is unabiguous, so we'll use that.
try:
rv = CKCCClient(self, handler, device.path,
is_simulator=(device.product_key[1] == CKCC_SIMULATED_PID))
return rv
except Exception as e:
self.logger.exception('late failure connecting to device?')
return None
def setup_device(self, device_info, wizard, purpose):
device_id = device_info.device.id_
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
return client
def get_xpub(self, device_id, derivation, xtype, wizard):
# this seems to be part of the pairing process only, not during normal ops?
# base_wizard:on_hw_derivation
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
client.ping_check()
xpub = client.get_xpub(derivation, xtype)
return xpub
@runs_in_hwd_thread
def get_client(self, keystore, force_pair=True, *,
devices=None, allow_user_interaction=True) -> Optional['CKCCClient']:
# Acquire a connection to the hardware device (via USB)
client = super().get_client(keystore, force_pair,
devices=devices,
allow_user_interaction=allow_user_interaction)
if client is not None:
client.ping_check()
return client
@staticmethod
def export_ms_wallet(wallet: Multisig_Wallet, fp, name):
# Build the text file Coldcard needs to understand the multisig wallet
# it is participating in. All involved Coldcards can share same file.
assert isinstance(wallet, Multisig_Wallet)
print('# Exported from Electrum', file=fp)
print(f'Name: {name:.20s}', file=fp)
print(f'Policy: {wallet.m} of {wallet.n}', file=fp)
print(f'Format: {wallet.txin_type.upper()}', file=fp)
xpubs = []
for xpub, ks in zip(wallet.get_master_public_keys(), wallet.get_keystores()): # type: str, KeyStoreWithMPK
fp_bytes, der_full = ks.get_fp_and_derivation_to_be_used_in_partial_tx(der_suffix=[], only_der_suffix=False)
fp_hex = fp_bytes.hex().upper()
der_prefix_str = bip32.convert_bip32_intpath_to_strpath(der_full)
xpubs.append((fp_hex, xpub, der_prefix_str))
# Before v3.2.1 derivation didn't matter too much to the Coldcard, since it
# could use key path data from PSBT or USB request as needed. However,
# derivation data is now required.
print('', file=fp)
assert len(xpubs) == wallet.n
for xfp, xpub, der_prefix in xpubs:
print(f'Derivation: {der_prefix}', file=fp)
print(f'{xfp}: {xpub}\n', file=fp)
def show_address(self, wallet, address, keystore: 'Coldcard_KeyStore' = None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
txin_type = wallet.get_txin_type(address)
# Standard_Wallet => not multisig, must be bip32
if type(wallet) is Standard_Wallet:
sequence = wallet.get_address_index(address)
keystore.show_address(sequence, txin_type)
elif type(wallet) is Multisig_Wallet:
assert isinstance(wallet, Multisig_Wallet) # only here for type-hints in IDE
# More involved for P2SH/P2WSH addresses: need M, and all public keys, and their
# derivation paths. Must construct script, and track fingerprints+paths for
# all those keys
pubkey_deriv_info = wallet.get_public_keys_with_deriv_info(address)
pubkey_hexes = sorted([pk.hex() for pk in list(pubkey_deriv_info)])
xfp_paths = []
for pubkey_hex in pubkey_hexes:
pubkey = bytes.fromhex(pubkey_hex)
ks, der_suffix = pubkey_deriv_info[pubkey]
fp_bytes, der_full = ks.get_fp_and_derivation_to_be_used_in_partial_tx(der_suffix, only_der_suffix=False)
xfp_int = xfp_int_from_xfp_bytes(fp_bytes)
xfp_paths.append([xfp_int] + list(der_full))
script = bfh(wallet.pubkeys_to_scriptcode(pubkey_hexes))
keystore.show_p2sh_address(wallet.m, script, xfp_paths, txin_type)
else:
keystore.handler.show_error(_('This function is only available for standard wallets when using {}.').format(self.device))
return
def xfp_int_from_xfp_bytes(fp_bytes: bytes) -> int:
return int.from_bytes(fp_bytes, byteorder="little", signed=False)
def xfp2str(xfp: int) -> str:
# Standardized way to show an xpub's fingerprint... it's a 4-byte string
# and not really an integer. Used to show as '0x%08x' but that's wrong endian.
return struct.pack('<I', xfp).hex().lower()
# EOF
|
|
# MIT License
#
# Copyright (c) 2020-2021 CNRS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
import random
import warnings
from typing import List, Optional, Text, Tuple
import matplotlib.pyplot as plt
import numpy as np
from torchmetrics import AUROC
from typing_extensions import Literal
from pyannote.audio.core.io import Audio, AudioFile
from pyannote.audio.core.task import Problem
from pyannote.audio.utils.random import create_rng_for_worker
from pyannote.core import Annotation, Segment, SlidingWindow, SlidingWindowFeature
class SegmentationTaskMixin:
"""Methods common to most segmentation tasks"""
def setup(self, stage: Optional[str] = None):
# ==================================================================
# PREPARE TRAINING DATA
# ==================================================================
self._train = []
self._train_metadata = dict()
for f in self.protocol.train():
file = dict()
for key, value in f.items():
# keep track of unique labels in self._train_metadata["annotation"]
if key == "annotation":
for label in value.labels():
self._train_metadata.setdefault("annotation", set()).add(label)
# pass "audio" entry as it is
elif key == "audio":
pass
# remove segments shorter than chunks from "annotated" entry
elif key == "annotated":
value = [
segment for segment in value if segment.duration > self.duration
]
file["_annotated_duration"] = sum(
segment.duration for segment in value
)
# keey track of unique text-like entries (incl. "uri" and "database")
# and pass them as they are
elif isinstance(value, Text):
self._train_metadata.setdefault(key, set()).add(value)
# pass score-like entries as they are
elif isinstance(value, SlidingWindowFeature):
pass
else:
msg = (
f"Protocol '{self.protocol.name}' defines a '{key}' entry of type {type(value)} "
f"which we do not know how to handle."
)
warnings.warn(msg)
file[key] = value
self._train.append(file)
self._train_metadata = {
key: sorted(values) for key, values in self._train_metadata.items()
}
# ==================================================================
# PREPARE VALIDATION DATA
# ==================================================================
if not self.has_validation:
return
self._validation = []
for f in self.protocol.development():
for segment in f["annotated"]:
if segment.duration < self.duration:
continue
num_chunks = round(segment.duration // self.duration)
for c in range(num_chunks):
start_time = segment.start + c * self.duration
chunk = Segment(start_time, start_time + self.duration)
self._validation.append((f, chunk))
random.shuffle(self._validation)
def setup_validation_metric(self):
"""Setup default validation metric
Use macro-average of area under the ROC curve
"""
if self.specifications.problem in [
Problem.BINARY_CLASSIFICATION,
Problem.MULTI_LABEL_CLASSIFICATION,
]:
num_classes = 1
else:
num_classes = len(self.specifications.classes)
return AUROC(num_classes, pos_label=1, average="macro", compute_on_step=False)
def prepare_y(self, one_hot_y: np.ndarray) -> np.ndarray:
raise NotImplementedError(
f"{self.__class__.__name__} must implement the `prepare_y` method."
)
@property
def chunk_labels(self) -> Optional[List[Text]]:
"""Ordered list of labels
Override this method to make `prepare_chunk` use a specific
ordered list of labels when extracting frame-wise labels.
See `prepare_chunk` source code for details.
"""
return None
def prepare_chunk(
self,
file: AudioFile,
chunk: Segment,
duration: float = None,
stage: Literal["train", "val"] = "train",
) -> Tuple[np.ndarray, np.ndarray, List[Text]]:
"""Extract audio chunk and corresponding frame-wise labels
Parameters
----------
file : AudioFile
Audio file.
chunk : Segment
Audio chunk.
duration : float, optional
Fix chunk duration to avoid rounding errors. Defaults to self.duration
stage : {"train", "val"}
"train" for training step, "val" for validation step
Returns
-------
sample : dict
Dictionary with the following keys:
X : np.ndarray
Audio chunk as (num_samples, num_channels) array.
y : np.ndarray
Frame-wise labels as (num_frames, num_labels) array.
...
"""
sample = dict()
# ==================================================================
# X = "audio" crop
# ==================================================================
sample["X"], _ = self.model.audio.crop(
file,
chunk,
duration=self.duration if duration is None else duration,
)
# ==================================================================
# y = "annotation" crop (with corresponding "labels")
# ==================================================================
# use model introspection to predict how many frames it will output
num_samples = sample["X"].shape[1]
num_frames, _ = self.model.introspection(num_samples)
# crop "annotation" and keep track of corresponding list of labels if needed
annotation: Annotation = file["annotation"].crop(chunk)
labels = annotation.labels() if self.chunk_labels is None else self.chunk_labels
y = np.zeros((num_frames, len(labels)), dtype=np.int8)
frames = SlidingWindow(
start=chunk.start,
duration=self.duration / num_frames,
step=self.duration / num_frames,
)
for label in annotation.labels():
try:
k = labels.index(label)
except ValueError:
warnings.warn(
f"File {file['uri']} contains unexpected label '{label}'."
)
continue
segments = annotation.label_timeline(label)
for start, stop in frames.crop(segments, mode="center", return_ranges=True):
y[start:stop, k] += 1
# handle corner case when the same label is active more than once
sample["y"] = np.minimum(y, 1, out=y)
sample["labels"] = labels
# ==================================================================
# additional metadata
# ==================================================================
for key, value in file.items():
# those keys were already dealt with
if key in ["audio", "annotation", "annotated"]:
pass
# replace text-like entries by their integer index
elif isinstance(value, Text):
try:
sample[key] = self._train_metadata[key].index(value)
except ValueError as e:
if stage == "val":
sample[key] = -1
else:
raise e
# crop score-like entries
elif isinstance(value, SlidingWindowFeature):
sample[key] = value.crop(chunk, fixed=duration, mode="center")
return sample
def train__iter__helper(self, rng: random.Random, **domain_filter):
"""Iterate over training samples with optional domain filtering
Parameters
----------
rng : random.Random
Random number generator
domain_filter : dict, optional
When provided (as {domain_key: domain_value} dict), filter training files so that
only files such as file[domain_key] == domain_value are used for generating chunks.
Yields
------
chunk : dict
Training chunks.
"""
train = self._train
try:
domain_key, domain_value = domain_filter.popitem()
except KeyError:
domain_key = None
if domain_key is not None:
train = [f for f in train if f[domain_key] == domain_value]
while True:
# select one file at random (with probability proportional to its annotated duration)
file, *_ = rng.choices(
train,
weights=[f["_annotated_duration"] for f in train],
k=1,
)
# select one annotated region at random (with probability proportional to its duration)
segment, *_ = rng.choices(
file["annotated"],
weights=[s.duration for s in file["annotated"]],
k=1,
)
# select one chunk at random (with uniform distribution)
start_time = rng.uniform(segment.start, segment.end - self.duration)
chunk = Segment(start_time, start_time + self.duration)
yield self.prepare_chunk(file, chunk, duration=self.duration, stage="train")
def train__iter__(self):
"""Iterate over training samples
Yields
------
dict:
X: (time, channel)
Audio chunks.
y: (frame, )
Frame-level targets. Note that frame < time.
`frame` is infered automagically from the
example model output.
...
"""
# create worker-specific random number generator
rng = create_rng_for_worker(self.model.current_epoch)
balance = getattr(self, "balance", None)
overlap = getattr(self, "overlap", dict())
overlap_probability = overlap.get("probability", 0.0)
if overlap_probability > 0:
overlap_snr_min = overlap.get("snr_min", 0.0)
overlap_snr_max = overlap.get("snr_max", 0.0)
if balance is None:
chunks = self.train__iter__helper(rng)
else:
chunks_by_domain = {
domain: self.train__iter__helper(rng, **{balance: domain})
for domain in self._train_metadata[balance]
}
while True:
if balance is not None:
domain = rng.choice(self._train_metadata[balance])
chunks = chunks_by_domain[domain]
# generate random chunk
sample = next(chunks)
if rng.random() > overlap_probability:
try:
sample["y"] = self.prepare_y(sample["y"])
except ValueError:
# if a ValueError is raised by prepare_y, skip this sample.
# see pyannote.audio.tasks.segmentation.Segmentation.prepare_y
# to understand why this might happen.
continue
_ = sample.pop("labels")
yield sample
continue
# generate another random chunk
other_sample = next(chunks)
# sum both chunks with random SNR
random_snr = (
overlap_snr_max - overlap_snr_min
) * rng.random() + overlap_snr_min
alpha = np.exp(-np.log(10) * random_snr / 20)
combined_X = Audio.power_normalize(
sample["X"]
) + alpha * Audio.power_normalize(other_sample["X"])
# combine labels
y, labels = sample["y"], sample.pop("labels")
other_y, other_labels = other_sample["y"], other_sample.pop("labels")
y_mapping = {label: i for i, label in enumerate(labels)}
num_combined_labels = len(y_mapping)
for label in other_labels:
if label not in y_mapping:
y_mapping[label] = num_combined_labels
num_combined_labels += 1
# combined_labels = [
# label
# for label, _ in sorted(y_mapping.items(), key=lambda item: item[1])
# ]
# combine targets
combined_y = np.zeros_like(y, shape=(len(y), num_combined_labels))
for i, label in enumerate(labels):
combined_y[:, y_mapping[label]] += y[:, i]
for i, label in enumerate(other_labels):
combined_y[:, y_mapping[label]] += other_y[:, i]
# handle corner case when the same label is active at the same time in both chunks
combined_y = np.minimum(combined_y, 1, out=combined_y)
try:
combined_y = self.prepare_y(combined_y)
except ValueError:
# if a ValueError is raised by prepare_y, skip this sample.
# see pyannote.audio.tasks.segmentation.Segmentation.prepare_y
# to understand why this might happen.
continue
combined_sample = {
"X": combined_X,
"y": combined_y,
}
for key, value in sample.items():
# those keys were already dealt with
if key in ["X", "y"]:
pass
# text-like entries have been replaced by their integer index in prepare_chunk.
# we (somewhat arbitrarily) combine i and j into i + j x (num_values + 1) to avoid
# any conflict with pure i or pure j samples
elif isinstance(value, int):
combined_sample[key] = sample[key] + other_sample[key] * (
len(self._train_metadata[key]) + 1
)
# score-like entries have been chunked into numpy array in prepare_chunk
# we (somewhat arbitrarily) average them using the same alpha as for X
elif isinstance(value, np.ndarray):
combined_sample[key] = (sample[key] + alpha * other_sample[key]) / (
1 + alpha
)
yield combined_sample
def train__len__(self):
# Number of training samples in one epoch
duration = sum(file["_annotated_duration"] for file in self._train)
return max(self.batch_size, math.ceil(duration / self.duration))
def val__getitem__(self, idx):
f, chunk = self._validation[idx]
sample = self.prepare_chunk(f, chunk, duration=self.duration, stage="val")
sample["y"] = self.prepare_y(sample["y"])
_ = sample.pop("labels")
return sample
def val__len__(self):
return len(self._validation)
def validation_postprocess(self, y, y_pred):
return y_pred
def validation_step(self, batch, batch_idx: int):
"""Compute validation area under the ROC curve
Parameters
----------
batch : dict of torch.Tensor
Current batch.
batch_idx: int
Batch index.
"""
X, y = batch["X"], batch["y"]
# X = (batch_size, num_channels, num_samples)
# y = (batch_size, num_frames, num_classes) or (batch_size, num_frames)
y_pred = self.model(X)
_, num_frames, _ = y_pred.shape
# y_pred = (batch_size, num_frames, num_classes)
# postprocess
y_pred = self.validation_postprocess(y, y_pred)
# - remove warm-up frames
# - downsample remaining frames
warm_up_left = round(self.warm_up[0] / self.duration * num_frames)
warm_up_right = round(self.warm_up[1] / self.duration * num_frames)
preds = y_pred[:, warm_up_left : num_frames - warm_up_right : 10]
target = y[:, warm_up_left : num_frames - warm_up_right : 10]
# torchmetrics tries to be smart about the type of machine learning problem
# pyannote.audio is more explicit so we have to reshape target and preds for
# torchmetrics to be happy... more details can be found here:
# https://torchmetrics.readthedocs.io/en/latest/references/modules.html#input-types
if self.specifications.problem == Problem.BINARY_CLASSIFICATION:
# target: shape (batch_size, num_frames), type binary
# preds: shape (batch_size, num_frames, 1), type float
# torchmetrics expects:
# target: shape (N,), type binary
# preds: shape (N,), type float
self.model.validation_metric(preds.reshape(-1), target.reshape(-1))
elif self.specifications.problem == Problem.MULTI_LABEL_CLASSIFICATION:
# target: shape (batch_size, num_frames, num_classes), type binary
# preds: shape (batch_size, num_frames, num_classes), type float
# torchmetrics expects
# target: shape (N, ), type binary
# preds: shape (N, ), type float
self.model.validation_metric(preds.reshape(-1), target.reshape(-1))
elif self.specifications.problem == Problem.MONO_LABEL_CLASSIFICATION:
# target: shape (batch_size, num_frames, num_classes), type binary
# preds: shape (batch_size, num_frames, num_classes), type float
# torchmetrics expects:
# target: shape (N, ), type int
# preds: shape (N, num_classes), type float
# TODO: implement when pyannote.audio gets its first mono-label segmentation task
raise NotImplementedError()
self.model.log(
f"{self.ACRONYM}@val_auroc",
self.model.validation_metric,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True,
)
# log first batch visualization every 2^n epochs.
if (
self.model.current_epoch == 0
or math.log2(self.model.current_epoch) % 1 > 0
or batch_idx > 0
):
return
# visualize first 9 validation samples of first batch in Tensorboard
X = X.cpu().numpy()
y = y.float().cpu().numpy()
y_pred = y_pred.cpu().numpy()
# prepare 3 x 3 grid (or smaller if batch size is smaller)
num_samples = min(self.batch_size, 9)
nrows = math.ceil(math.sqrt(num_samples))
ncols = math.ceil(num_samples / nrows)
fig, axes = plt.subplots(
nrows=3 * nrows, ncols=ncols, figsize=(15, 10), squeeze=False
)
# reshape target so that there is one line per class when plottingit
y[y == 0] = np.NaN
if len(y.shape) == 2:
y = y[:, :, np.newaxis]
y *= np.arange(y.shape[2])
# plot each sample
for sample_idx in range(num_samples):
# find where in the grid it should be plotted
row_idx = sample_idx // nrows
col_idx = sample_idx % ncols
# plot waveform
ax_wav = axes[row_idx * 3 + 0, col_idx]
sample_X = np.mean(X[sample_idx], axis=0)
ax_wav.plot(sample_X)
ax_wav.set_xlim(0, len(sample_X))
ax_wav.get_xaxis().set_visible(False)
ax_wav.get_yaxis().set_visible(False)
# plot target
ax_ref = axes[row_idx * 3 + 1, col_idx]
sample_y = y[sample_idx]
ax_ref.plot(sample_y)
ax_ref.set_xlim(0, len(sample_y))
ax_ref.set_ylim(-1, sample_y.shape[1])
ax_ref.get_xaxis().set_visible(False)
ax_ref.get_yaxis().set_visible(False)
# plot prediction
ax_hyp = axes[row_idx * 3 + 2, col_idx]
sample_y_pred = y_pred[sample_idx]
ax_hyp.axvspan(0, warm_up_left, color="k", alpha=0.5, lw=0)
ax_hyp.axvspan(
num_frames - warm_up_right, num_frames, color="k", alpha=0.5, lw=0
)
ax_hyp.plot(sample_y_pred)
ax_hyp.set_ylim(-0.1, 1.1)
ax_hyp.set_xlim(0, len(sample_y))
ax_hyp.get_xaxis().set_visible(False)
plt.tight_layout()
self.model.logger.experiment.add_figure(
f"{self.ACRONYM}@val_samples", fig, self.model.current_epoch
)
plt.close(fig)
@property
def val_monitor(self):
"""Maximize validation area under ROC curve"""
return f"{self.ACRONYM}@val_auroc", "max"
|
|
from mpi4py import MPI
import mpiunittest as unittest
import arrayimpl
from functools import reduce
prod = lambda sequence,start=1: reduce(lambda x, y: x*y, sequence, start)
def skip_op(typecode, op):
if typecode in 'FDG':
if op in (MPI.MAX, MPI.MIN):
return True
return False
def maxvalue(a):
try:
typecode = a.typecode
except AttributeError:
typecode = a.dtype.char
if typecode == ('f'):
return 1e30
elif typecode == ('d'):
return 1e300
else:
return 2 ** (a.itemsize * 7) - 1
class BaseTestCCOBuf(object):
COMM = MPI.COMM_NULL
def testBarrier(self):
self.COMM.Barrier()
def testBcast(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
for root in range(size):
if rank == root:
buf = array(root, typecode, root)
else:
buf = array( -1, typecode, root)
self.COMM.Bcast(buf.as_mpi(), root=root)
for value in buf:
self.assertEqual(value, root)
def testGather(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
for root in range(size):
sbuf = array(root, typecode, root+1)
if rank == root:
rbuf = array(-1, typecode, (size,root+1))
else:
rbuf = array([], typecode)
self.COMM.Gather(sbuf.as_mpi(), rbuf.as_mpi(),
root=root)
if rank == root:
for value in rbuf.flat:
self.assertEqual(value, root)
def testScatter(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
for root in range(size):
rbuf = array(-1, typecode, size)
if rank == root:
sbuf = array(root, typecode, (size, size))
else:
sbuf = array([], typecode)
self.COMM.Scatter(sbuf.as_mpi(), rbuf.as_mpi(),
root=root)
for value in rbuf:
self.assertEqual(value, root)
def testAllgather(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
for root in range(size):
sbuf = array(root, typecode, root+1)
rbuf = array( -1, typecode, (size, root+1))
self.COMM.Allgather(sbuf.as_mpi(), rbuf.as_mpi())
for value in rbuf.flat:
self.assertEqual(value, root)
def testAlltoall(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
for root in range(size):
sbuf = array(root, typecode, (size, root+1))
rbuf = array( -1, typecode, (size, root+1))
self.COMM.Alltoall(sbuf.as_mpi(), rbuf.as_mpi_c(root+1))
for value in rbuf.flat:
self.assertEqual(value, root)
def assertAlmostEqual(self, first, second):
num = complex(second-first)
den = complex(second+first)/2 or 1.0
if (abs(num/den) > 1e-2):
raise self.failureException('%r != %r' % (first, second))
def testReduce(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN):
if skip_op(typecode, op): continue
for root in range(size):
sbuf = array(range(size), typecode)
rbuf = array(-1, typecode, size)
self.COMM.Reduce(sbuf.as_mpi(),
rbuf.as_mpi(),
op, root)
max_val = maxvalue(rbuf)
for i, value in enumerate(rbuf):
if rank != root:
self.assertEqual(value, -1)
continue
if op == MPI.SUM:
if (i * size) < max_val:
self.assertAlmostEqual(value, i*size)
elif op == MPI.PROD:
if (i ** size) < max_val:
self.assertAlmostEqual(value, i**size)
elif op == MPI.MAX:
self.assertEqual(value, i)
elif op == MPI.MIN:
self.assertEqual(value, i)
def testAllreduce(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD):
if skip_op(typecode, op): continue
sbuf = array(range(size), typecode)
rbuf = array(0, typecode, size)
self.COMM.Allreduce(sbuf.as_mpi(),
rbuf.as_mpi(),
op)
max_val = maxvalue(rbuf)
for i, value in enumerate(rbuf):
if op == MPI.SUM:
if (i * size) < max_val:
self.assertAlmostEqual(value, i*size)
elif op == MPI.PROD:
if (i ** size) < max_val:
self.assertAlmostEqual(value, i**size)
elif op == MPI.MAX:
self.assertEqual(value, i)
elif op == MPI.MIN:
self.assertEqual(value, i)
def testReduceScatter(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD):
if skip_op(typecode, op): continue
rcnt = list(range(1,size+1))
sbuf = array([rank+1]*sum(rcnt), typecode)
rbuf = array(-1, typecode, rank+1)
self.COMM.Reduce_scatter(sbuf.as_mpi(),
rbuf.as_mpi(),
None, op)
max_val = maxvalue(rbuf)
for i, value in enumerate(rbuf):
if op == MPI.SUM:
redval = sum(range(size))+size
if redval < max_val:
self.assertAlmostEqual(value, redval)
elif op == MPI.PROD:
redval = prod(range(1,size+1))
if redval < max_val:
self.assertAlmostEqual(value, redval)
elif op == MPI.MAX:
self.assertEqual(value, size)
elif op == MPI.MIN:
self.assertEqual(value, 1)
rbuf = array(-1, typecode, rank+1)
self.COMM.Reduce_scatter(sbuf.as_mpi(),
rbuf.as_mpi(),
rcnt, op)
max_val = maxvalue(rbuf)
for i, value in enumerate(rbuf):
if op == MPI.SUM:
redval = sum(range(size))+size
if redval < max_val:
self.assertAlmostEqual(value, redval)
elif op == MPI.PROD:
redval = prod(range(1,size+1))
if redval < max_val:
self.assertAlmostEqual(value, redval)
elif op == MPI.MAX:
self.assertEqual(value, size)
elif op == MPI.MIN:
self.assertEqual(value, 1)
def testReduceScatterBlock(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD):
if skip_op(typecode, op): continue
for rcnt in range(1, size+1):
sbuf = array([rank]*rcnt*size, typecode)
rbuf = array(-1, typecode, rcnt)
if op == MPI.PROD:
sbuf = array([rank+1]*rcnt*size, typecode)
self.COMM.Reduce_scatter_block(sbuf.as_mpi(),
rbuf.as_mpi(),
op)
max_val = maxvalue(rbuf)
v_sum = (size*(size-1))/2
v_prod = 1
for i in range(1,size+1): v_prod *= i
v_max = size-1
v_min = 0
for i, value in enumerate(rbuf):
if op == MPI.SUM:
if v_sum <= max_val:
self.assertAlmostEqual(value, v_sum)
elif op == MPI.PROD:
if v_prod <= max_val:
self.assertAlmostEqual(value, v_prod)
elif op == MPI.MAX:
self.assertEqual(value, v_max)
elif op == MPI.MIN:
self.assertEqual(value, v_min)
def testScan(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
# --
for array, typecode in arrayimpl.subTest(self):
for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN):
if skip_op(typecode, op): continue
sbuf = array(range(size), typecode)
rbuf = array(0, typecode, size)
self.COMM.Scan(sbuf.as_mpi(),
rbuf.as_mpi(),
op)
max_val = maxvalue(rbuf)
for i, value in enumerate(rbuf):
if op == MPI.SUM:
if (i * (rank + 1)) < max_val:
self.assertAlmostEqual(value, i * (rank + 1))
elif op == MPI.PROD:
if (i ** (rank + 1)) < max_val:
self.assertAlmostEqual(value, i ** (rank + 1))
elif op == MPI.MAX:
self.assertEqual(value, i)
elif op == MPI.MIN:
self.assertEqual(value, i)
def testExscan(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN):
if skip_op(typecode, op): continue
sbuf = array(range(size), typecode)
rbuf = array(0, typecode, size)
try:
self.COMM.Exscan(sbuf.as_mpi(),
rbuf.as_mpi(),
op)
except NotImplementedError:
self.skipTest('mpi-exscan')
if rank == 1:
for i, value in enumerate(rbuf):
self.assertEqual(value, i)
elif rank > 1:
max_val = maxvalue(rbuf)
for i, value in enumerate(rbuf):
if op == MPI.SUM:
if (i * rank) < max_val:
self.assertAlmostEqual(value, i * rank)
elif op == MPI.PROD:
if (i ** rank) < max_val:
self.assertAlmostEqual(value, i ** rank)
elif op == MPI.MAX:
self.assertEqual(value, i)
elif op == MPI.MIN:
self.assertEqual(value, i)
def testBcastTypeIndexed(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
datatype = array.TypeMap[typecode]
for root in range(size):
#
if rank == root:
buf = array(range(10), typecode).as_raw()
else:
buf = array(-1, typecode, 10).as_raw()
indices = list(range(0, len(buf), 2))
newtype = datatype.Create_indexed_block(1, indices)
newtype.Commit()
newbuf = (buf, 1, newtype)
self.COMM.Bcast(newbuf, root=root)
newtype.Free()
if rank != root:
for i, value in enumerate(buf):
if (i % 2):
self.assertEqual(value, -1)
else:
self.assertEqual(value, i)
#
if rank == root:
buf = array(range(10), typecode).as_raw()
else:
buf = array(-1, typecode, 10).as_raw()
indices = list(range(1, len(buf), 2))
newtype = datatype.Create_indexed_block(1, indices)
newtype.Commit()
newbuf = (buf, 1, newtype)
self.COMM.Bcast(newbuf, root)
newtype.Free()
if rank != root:
for i, value in enumerate(buf):
if not (i % 2):
self.assertEqual(value, -1)
else:
self.assertEqual(value, i)
class BaseTestCCOBufInplace(object):
def testGather(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
for root in range(size):
count = root+3
if rank == root:
sbuf = MPI.IN_PLACE
buf = array(-1, typecode, (size, count))
#buf.flat[(rank*count):((rank+1)*count)] = \
# array(root, typecode, count)
s, e = rank*count, (rank+1)*count
for i in range(s, e): buf.flat[i] = root
rbuf = buf.as_mpi()
else:
buf = array(root, typecode, count)
sbuf = buf.as_mpi()
rbuf = None
self.COMM.Gather(sbuf, rbuf, root=root)
for value in buf.flat:
self.assertEqual(value, root)
if rank == root:
sbuf = None
self.COMM.Gather(sbuf, rbuf, root=root)
for value in buf.flat:
self.assertEqual(value, root)
@unittest.skipMPI('msmpi(==10.0.0)')
def testScatter(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
for root in range(size):
for count in range(1, 10):
if rank == root:
buf = array(root, typecode, (size, count))
sbuf = buf.as_mpi()
rbuf = MPI.IN_PLACE
else:
buf = array(-1, typecode, count)
sbuf = None
rbuf = buf.as_mpi()
self.COMM.Scatter(sbuf, rbuf, root=root)
for value in buf.flat:
self.assertEqual(value, root)
if rank == root:
rbuf = None
self.COMM.Scatter(sbuf, rbuf, root=root)
for value in buf.flat:
self.assertEqual(value, root)
def testAllgather(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
for count in range(1, 10):
buf = array(-1, typecode, (size, count))
s, e = rank*count, (rank+1)*count
for i in range(s, e): buf.flat[i] = count
self.COMM.Allgather(MPI.IN_PLACE, buf.as_mpi())
for value in buf.flat:
self.assertEqual(value, count)
self.COMM.Allgather(None, buf.as_mpi())
for value in buf.flat:
self.assertEqual(value, count)
def assertAlmostEqual(self, first, second):
num = complex(second-first)
den = complex(second+first)/2 or 1.0
if (abs(num/den) > 1e-2):
raise self.failureException('%r != %r' % (first, second))
def testReduce(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN):
if skip_op(typecode, op): continue
for root in range(size):
count = size
if rank == root:
buf = array(range(size), typecode)
sbuf = MPI.IN_PLACE
rbuf = buf.as_mpi()
else:
buf = array(range(size), typecode)
buf2 = array(range(size), typecode)
sbuf = buf.as_mpi()
rbuf = buf2.as_mpi()
self.COMM.Reduce(sbuf, rbuf, op, root)
if rank == root:
max_val = maxvalue(buf)
for i, value in enumerate(buf):
if op == MPI.SUM:
if (i * size) < max_val:
self.assertAlmostEqual(value, i*size)
elif op == MPI.PROD:
if (i ** size) < max_val:
self.assertAlmostEqual(value, i**size)
elif op == MPI.MAX:
self.assertEqual(value, i)
elif op == MPI.MIN:
self.assertEqual(value, i)
def testAllreduce(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD):
if skip_op(typecode, op): continue
buf = array(range(size), typecode)
sbuf = MPI.IN_PLACE
rbuf = buf.as_mpi()
self.COMM.Allreduce(sbuf, rbuf, op)
max_val = maxvalue(buf)
for i, value in enumerate(buf):
if op == MPI.SUM:
if (i * size) < max_val:
self.assertAlmostEqual(value, i*size)
elif op == MPI.PROD:
if (i ** size) < max_val:
self.assertAlmostEqual(value, i**size)
elif op == MPI.MAX:
self.assertEqual(value, i)
elif op == MPI.MIN:
self.assertEqual(value, i)
def testReduceScatterBlock(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
# one of the ranks would fail as of OpenMPI 4.1.1
if unittest.is_mpi_gpu('openmpi', array): continue
for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD):
if skip_op(typecode, op): continue
for rcnt in range(size):
if op == MPI.PROD:
rbuf = array([rank+1]*rcnt*size, typecode)
else:
rbuf = array([rank]*rcnt*size, typecode)
self.COMM.Reduce_scatter_block(MPI.IN_PLACE,
rbuf.as_mpi(),
op)
max_val = maxvalue(rbuf)
for i, value in enumerate(rbuf):
if i >= rcnt:
if op == MPI.PROD:
self.assertEqual(value, rank+1)
else:
self.assertEqual(value, rank)
else:
if op == MPI.SUM:
redval = sum(range(size))
if redval < max_val:
self.assertAlmostEqual(value, redval)
elif op == MPI.PROD:
redval = prod(range(1,size+1))
if redval < max_val:
self.assertAlmostEqual(value, redval)
elif op == MPI.MAX:
self.assertEqual(value, size-1)
elif op == MPI.MIN:
self.assertEqual(value, 0)
@unittest.skipMPI('MVAPICH2')
def testReduceScatter(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
for op in (MPI.SUM, MPI.MAX, MPI.MIN, MPI.PROD):
if skip_op(typecode, op): continue
rcnt = list(range(1, size+1))
if op == MPI.PROD:
rbuf = array([rank+1]*sum(rcnt), typecode)
else:
rbuf = array([rank]*sum(rcnt), typecode)
self.COMM.Reduce_scatter(MPI.IN_PLACE,
rbuf.as_mpi(),
rcnt, op)
max_val = maxvalue(rbuf)
for i, value in enumerate(rbuf):
if i >= rcnt[rank]:
if op == MPI.PROD:
self.assertEqual(value, rank+1)
else:
self.assertEqual(value, rank)
else:
if op == MPI.SUM:
redval = sum(range(size))
if redval < max_val:
self.assertAlmostEqual(value, redval)
elif op == MPI.PROD:
redval = prod(range(1,size+1))
if redval < max_val:
self.assertAlmostEqual(value, redval)
elif op == MPI.MAX:
self.assertEqual(value, size-1)
elif op == MPI.MIN:
self.assertEqual(value, 0)
@unittest.skipMPI('openmpi(<=1.8.4)')
def testScan(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
# --
for array, typecode in arrayimpl.subTest(self):
for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN):
if skip_op(typecode, op): continue
buf = array(range(size), typecode)
self.COMM.Scan(MPI.IN_PLACE,
buf.as_mpi(),
op)
max_val = maxvalue(buf)
for i, value in enumerate(buf):
if op == MPI.SUM:
if (i * (rank + 1)) < max_val:
self.assertAlmostEqual(value, i * (rank + 1))
elif op == MPI.PROD:
if (i ** (rank + 1)) < max_val:
self.assertAlmostEqual(value, i ** (rank + 1))
elif op == MPI.MAX:
self.assertEqual(value, i)
elif op == MPI.MIN:
self.assertEqual(value, i)
@unittest.skipMPI('msmpi(<=4.2.0)')
@unittest.skipMPI('openmpi(<=1.8.4)')
def testExscan(self):
size = self.COMM.Get_size()
rank = self.COMM.Get_rank()
for array, typecode in arrayimpl.subTest(self):
for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN):
if skip_op(typecode, op): continue
buf = array(range(size), typecode)
try:
self.COMM.Exscan(MPI.IN_PLACE,
buf.as_mpi(),
op)
except NotImplementedError:
self.skipTest('mpi-exscan')
if rank == 1:
for i, value in enumerate(buf):
self.assertEqual(value, i)
elif rank > 1:
max_val = maxvalue(buf)
for i, value in enumerate(buf):
if op == MPI.SUM:
if (i * rank) < max_val:
self.assertAlmostEqual(value, i * rank)
elif op == MPI.PROD:
if (i ** rank) < max_val:
self.assertAlmostEqual(value, i ** rank)
elif op == MPI.MAX:
self.assertEqual(value, i)
elif op == MPI.MIN:
self.assertEqual(value, i)
class TestReduceLocal(unittest.TestCase):
def testReduceLocal(self):
for array, typecode in arrayimpl.subTest(self):
# segfault as of OpenMPI 4.1.1
if unittest.is_mpi_gpu('openmpi', array): continue
for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN):
if skip_op(typecode, op): continue
size = 5
sbuf = array(range(1,size+1), typecode)
rbuf = array(range(0,size+0), typecode)
try:
op.Reduce_local(sbuf.as_mpi(), rbuf.as_mpi())
except NotImplementedError:
self.skipTest('mpi-op-reduce_local')
for i, value in enumerate(rbuf):
self.assertEqual(sbuf[i], i+1)
if op == MPI.SUM:
self.assertAlmostEqual(value, i+(i+1))
elif op == MPI.PROD:
self.assertAlmostEqual(value, i*(i+1))
elif op == MPI.MAX:
self.assertEqual(value, i+1)
elif op == MPI.MIN:
self.assertEqual(value, i)
def testReduceLocalBadCount(self):
for array, typecode in arrayimpl.subTest(self):
for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN):
sbuf = array(range(3), typecode)
rbuf = array(range(3), typecode)
def f(): op.Reduce_local(sbuf.as_mpi_c(2),
rbuf.as_mpi_c(3))
self.assertRaises(ValueError, f)
def f(): op.Reduce_local([sbuf.as_raw(), 1, MPI.INT],
[rbuf.as_raw(), 1, MPI.SHORT])
self.assertRaises(ValueError, f)
class TestCCOBufSelf(BaseTestCCOBuf, unittest.TestCase):
COMM = MPI.COMM_SELF
class TestCCOBufWorld(BaseTestCCOBuf, unittest.TestCase):
COMM = MPI.COMM_WORLD
@unittest.skipMPI('MPICH1')
@unittest.skipMPI('LAM/MPI')
@unittest.skipIf(MPI.IN_PLACE == MPI.BOTTOM, 'mpi-in-place')
class TestCCOBufInplaceSelf(BaseTestCCOBufInplace, unittest.TestCase):
COMM = MPI.COMM_SELF
@unittest.skipMPI('MPICH1')
@unittest.skipMPI('LAM/MPI')
@unittest.skipIf(MPI.IN_PLACE == MPI.BOTTOM, 'mpi-in-place')
class TestCCOBufInplaceWorld(BaseTestCCOBufInplace, unittest.TestCase):
COMM = MPI.COMM_WORLD
@unittest.skipMPI('IntelMPI', MPI.COMM_WORLD.Get_size() > 1)
def testReduceScatter(self):
super(TestCCOBufInplaceWorld, self).testReduceScatter()
class TestCCOBufSelfDup(TestCCOBufSelf):
def setUp(self):
self.COMM = MPI.COMM_SELF.Dup()
def tearDown(self):
self.COMM.Free()
@unittest.skipMPI('openmpi(<1.4.0)', MPI.Query_thread() > MPI.THREAD_SINGLE)
class TestCCOBufWorldDup(TestCCOBufWorld):
def setUp(self):
self.COMM = MPI.COMM_WORLD.Dup()
def tearDown(self):
self.COMM.Free()
if __name__ == '__main__':
unittest.main()
|
|
"""
The pyro wire protocol message.
Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net).
"""
from __future__ import with_statement
import hashlib
import hmac
import struct
import logging
import sys
from Pyro4 import errors, constants
import Pyro4.constants
__all__ = ["Message"]
log = logging.getLogger("Pyro4.message")
MSG_CONNECT = 1
MSG_CONNECTOK = 2
MSG_CONNECTFAIL = 3
MSG_INVOKE = 4
MSG_RESULT = 5
MSG_PING = 6
FLAGS_EXCEPTION = 1 << 0
FLAGS_COMPRESSED = 1 << 1
FLAGS_ONEWAY = 1 << 2
FLAGS_BATCH = 1 << 3
SERIALIZER_SERPENT = 1
SERIALIZER_JSON = 2
SERIALIZER_MARSHAL = 3
SERIALIZER_PICKLE = 4
class Message(object):
"""
Pyro write protocol message.
Wire messages contains of a fixed size header, an optional set of annotation chunks,
and then the payload data. This class doesn't deal with the payload data:
(de)serialization and handling of that data is done elsewhere.
Annotation chunks are only parsed, except the 'HMAC' chunk: that is created
and validated because it is used as a message digest.
The header format is::
4 id ('PYRO')
2 protocol version
2 message type
2 message flags
2 sequence number
4 data length
2 data serialization format (serializer id)
2 annotations length (total of all chunks, 0 if no annotation chunks present)
2 (reserved)
2 checksum
After the header, zero or more annotation chunks may follow, of the format::
4 id (ASCII)
2 chunk length
x annotation chunk databytes
After that, the actual payload data bytes follow.
The sequencenumber is used to check if response messages correspond to the
actual request message. This prevents the situation where Pyro would perhaps return
the response data from another remote call (which would not result in an error otherwise!)
This could happen for instance if the socket data stream gets out of sync, perhaps due To
some form of signal that interrupts I/O.
The header checksum is a simple sum of the header fields to make reasonably sure
that we are dealing with an actual correct PYRO protocol header and not some random
data that happens to start with the 'PYRO' protocol identifier.
An 'HMAC' annotation chunk contains the hmac digest of the message data bytes and
all of the annotation chunk data bytes (except those of the HMAC chunk itself).
"""
__slots__ = ["type", "flags", "seq", "data", "data_size", "serializer_id", "annotations", "annotations_size", "hmac_key"]
header_format = '!4sHHHHiHHHH'
header_size = struct.calcsize(header_format)
checksum_magic = 0x34E9
def __init__(self, msgType, databytes, serializer_id, flags, seq, annotations=None, hmac_key=None):
self.type = msgType
self.flags = flags
self.seq = seq
self.data = databytes
self.data_size = len(self.data)
self.serializer_id = serializer_id
self.annotations = annotations or {}
self.hmac_key = hmac_key
if self.hmac_key:
self.annotations["HMAC"] = self.hmac()
self.annotations_size = sum([6 + len(v) for v in self.annotations.values()])
if 0 < Pyro4.config.MAX_MESSAGE_SIZE < (self.data_size + self.annotations_size):
raise errors.ProtocolError("max message size exceeded (%d where max=%d)" % (self.data_size + self.annotations_size, Pyro4.config.MAX_MESSAGE_SIZE))
def __repr__(self):
return "<%s.%s at %x, type=%d flags=%d seq=%d datasize=%d #ann=%d>" % (self.__module__, self.__class__.__name__, id(self), self.type, self.flags, self.seq, self.data_size, len(self.annotations))
def to_bytes(self):
"""creates a byte stream containing the header followed by annotations (if any) followed by the data"""
return self.__header_bytes() + self.__annotations_bytes() + self.data
def __header_bytes(self):
checksum = (self.type + constants.PROTOCOL_VERSION + self.data_size + self.annotations_size + self.serializer_id + self.flags + self.seq + self.checksum_magic) & 0xffff
return struct.pack(self.header_format, b"PYRO", constants.PROTOCOL_VERSION, self.type, self.flags, self.seq, self.data_size, self.serializer_id, self.annotations_size, 0, checksum)
def __annotations_bytes(self):
if self.annotations:
a = []
for k, v in self.annotations.items():
if len(k) != 4:
raise errors.ProtocolError("annotation key must be of length 4")
if sys.version_info >= (3, 0):
k = k.encode("ASCII")
a.append(struct.pack("!4sH", k, len(v)))
a.append(v)
if sys.platform == "cli":
return "".join(a)
return b"".join(a)
return b""
# Note: this 'chunked' way of sending is not used because it triggers Nagle's algorithm
# on some systems (linux). This causes massive delays, unless you change the socket option
# TCP_NODELAY to disable the algorithm. What also works, is sending all the message bytes
# in one go: connection.send(message.to_bytes())
# def send(self, connection):
# """send the message as bytes over the connection"""
# connection.send(self.__header_bytes())
# if self.annotations:
# connection.send(self.__annotations_bytes())
# connection.send(self.data)
@classmethod
def from_header(cls, headerData):
"""Parses a message header. Does not yet process the annotations chunks and message data."""
if not headerData or len(headerData) != cls.header_size:
raise errors.ProtocolError("header data size mismatch")
tag, ver, msg_type, flags, seq, data_size, serializer_id, annotations_size, _, checksum = struct.unpack(cls.header_format, headerData)
if tag != b"PYRO" or ver != constants.PROTOCOL_VERSION:
raise errors.ProtocolError("invalid data or unsupported protocol version")
if checksum != (msg_type + ver + data_size + annotations_size + flags + serializer_id + seq + cls.checksum_magic) & 0xffff:
raise errors.ProtocolError("header checksum mismatch")
msg = Message(msg_type, b"", serializer_id, flags, seq)
msg.data_size = data_size
msg.annotations_size = annotations_size
return msg
@classmethod
def recv(cls, connection, requiredMsgTypes=None, hmac_key=None):
"""
Receives a pyro message from a given connection.
Accepts the given message types (None=any, or pass a sequence).
Also reads annotation chunks and the actual payload data.
Validates a HMAC chunk if present.
"""
msg = cls.from_header(connection.recv(cls.header_size))
msg.hmac_key = hmac_key
if 0 < Pyro4.config.MAX_MESSAGE_SIZE < (msg.data_size + msg.annotations_size):
errorMsg = "max message size exceeded (%d where max=%d)" % (msg.data_size + msg.annotations_size, Pyro4.config.MAX_MESSAGE_SIZE)
log.error("connection " + str(connection) + ": " + errorMsg)
connection.close() # close the socket because at this point we can't return the correct sequence number for returning an error message
raise errors.ProtocolError(errorMsg)
if requiredMsgTypes and msg.type not in requiredMsgTypes:
err = "invalid msg type %d received" % msg.type
log.error(err)
raise errors.ProtocolError(err)
if msg.annotations_size:
# read annotation chunks
annotations_data = connection.recv(msg.annotations_size)
msg.annotations = {}
i = 0
while i < msg.annotations_size:
anno, length = struct.unpack("!4sH", annotations_data[i:i + 6])
if sys.version_info >= (3, 0):
anno = anno.decode("ASCII")
msg.annotations[anno] = annotations_data[i + 6:i + 6 + length]
i += 6 + length
# read data
msg.data = connection.recv(msg.data_size)
if "HMAC" in msg.annotations and hmac_key:
if msg.annotations["HMAC"] != msg.hmac():
raise errors.SecurityError("message hmac mismatch")
elif ("HMAC" in msg.annotations) != bool(hmac_key):
# Not allowed: message contains hmac but hmac_key is not set, or vice versa.
err = "hmac key config not symmetric"
log.warning(err)
raise errors.SecurityError(err)
return msg
def hmac(self):
"""returns the hmac of the data and the annotation chunk values (except HMAC chunk itself)"""
mac = hmac.new(self.hmac_key, self.data, digestmod=hashlib.sha1)
for k, v in self.annotations.items():
if k != "HMAC":
mac.update(v)
return mac.digest()
@staticmethod
def ping(pyroConnection, hmac_key=None):
"""Convenience method to send a 'ping' message and wait for the 'pong' response"""
ping = Message(MSG_PING, b"ping", 42, 0, 0, hmac_key=hmac_key)
pyroConnection.send(ping.to_bytes())
Message.recv(pyroConnection, [MSG_PING])
|
|
#
# Copyright 2014 Telefonica Investigacion y Desarrollo, S.A.U
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""SCIM Schemeas"""
import sys
SERVICE_PROVIDER_CONFIGS = {
"schemas": "",
'documentationUrl': 'https://github.com/telefonicaid/fiware-keystone-scim/blob/master/README.md',
'patch': {
'supported': True
},
'information': {
"totalUsers": "",
"totalUserOrganizations": "",
"totalCloudOrganizations": "",
"totalResources": "",
"trialUsers": "",
"basicUsers": "",
"communityUsers": ""
},
'bulk': {
'supported': False,
'maxOperations': 0,
'maxPayloadSize': 0
},
'filter': {
'supported': True,
'maxResults': sys.maxint
},
'changePassword': {
'supported': True
},
'sort': {
'supported': False
},
'etag': {
'supported': False
},
'xmlDataFormat': {
'supported': False
},
'authenticationSchemes': [
{
'name': 'Keytone Authentication',
'description': 'Authentication using Keystone',
'specUrl': 'http://specs.openstack.org/openstack/keystone-specs',
'documentationUrl': 'http://keystone.openstack.org/',
'type': 'keystonetoken',
'primary': True
}
]
}
SCHEMAS = [
{
"id": "urn:scim:schemas:core:1.0:User",
"name": "User",
"description": "Keystone User",
"schema": "urn:scim:schemas:core:1.0",
"endpoint": "/Users",
"attributes": [
{
"name": "id",
"type": "string",
"multiValued": False,
"description": "Unique identifier for the SCIM resource as "
"defined by the Service Provider. Each representation of "
"the resource MUST include a non-empty id value. This "
"identifier MUST be unique across the Service Provider's "
"entire set of resources. It MUST be a stable, "
"non-reassignable identifier that does not change when the "
"same resource is returned in subsequent requests. The "
"value of the id attribute is always issued by the Service "
"Provider and MUST never be specified by the Service "
"Consumer. REQUIRED.",
"schema": "urn:scim:schemas:core:1.0",
"readOnly": True,
"required": True,
"caseExact": True
},
{
"name": "userName",
"type": "string",
"multiValued": False,
"description": "Unique identifier for the User typically used "
"by the user to directly authenticate to the service "
"provider. Each User MUST include a non-empty userName "
"value. This identifier MUST be unique across the Service "
"Consumer's entire set of Users. REQUIRED",
"schema": "urn:scim:schemas:core:1.0",
"readOnly": True,
"required": True,
"caseExact": True
},
{
"name": "password",
"type": "string",
"multiValued": False,
"description": "The User's clear text password. This "
"attribute is intended to be used as a means to specify an "
"initial password when creating a new User or to reset an "
"existing User's password.",
"schema": "urn:scim:schemas:core:1.0",
"readOnly": True,
"required": False,
"caseExact": True
},
{
"name": "emails",
"type": "complex",
"multiValued": True,
"multiValuedAttributeChildName": "email",
"description": "E-mail addresses for the user. The value "
"SHOULD be canonicalized by the Service Provider, e.g. "
"bjensen@example.com instead of bjensen@EXAMPLE.COM. "
"Canonical Type values of work, home, and other.",
"schema": "urn:scim:schemas:core:1.0",
"readOnly": False,
"required": False,
"caseExact": True,
"subAttributes": [
{
"name": "value",
"type": "string",
"multiValued": False,
"description": "E-mail addresses for the user. The "
"value SHOULD be canonicalized by the Service "
"Provider, e.g. bjensen@example.com instead of "
"bjensen@EXAMPLE.COM. Canonical Type values of "
"work, home, and other.",
"readOnly": False,
"required": False,
"caseExact": True
}
]
},
{
"name": "active",
"type": "boolean",
"multiValued": False,
"description": "A Boolean value indicating the User's"
"administrative status.",
"schema": "urn:scim:schemas:core:1.0",
"readOnly": False,
"required": False,
"caseExact": False
},
{
"name": "domain_id",
"type": "string",
"multiValued": False,
"description": "User's domain",
"schema": "urn:scim:schemas:extension:keystone:1.0",
"readOnly": False,
"required": True,
"caseExact": True
}
]
},
{
"id": "urn:scim:schemas:core:1.0:Group",
"name": "Group",
"description": "Keystone Group",
"schema": "urn:scim:schemas:core:1.0",
"endpoint": "/Groups",
"attributes": [
{
"name": "id",
"type": "string",
"multiValued": False,
"description": "Unique identifier for the SCIM resource",
"schema": "urn:scim:schemas:core:1.0",
"readOnly": True,
"required": True,
"caseExact": True
},
{
"name": "displayName",
"type": "string",
"multiValued": False,
"description": "Unique identifier for the Group",
"schema": "urn:scim:schemas:core:1.0",
"readOnly": True,
"required": True,
"caseExact": True
},
{
"name": "domain_id",
"type": "string",
"multiValued": False,
"description": "Role's domain",
"schema": "urn:scim:schemas:extension:keystone:1.0",
"readOnly": False,
"required": True,
"caseExact": True
}
]
},
{
"id": "urn:scim:schemas:extension:keystone:1.0:Role",
"name": "Role",
"description": "Keystone Role SCIM (domain aware)",
"schema": "urn:scim:schemas:extension:keystone:1.0",
"endpoint": "/Roles",
"attributes": [
{
"name": "id",
"type": "string",
"multiValued": False,
"description": "Unique identifier for the SCIM Resource.",
"schema": "urn:scim:schemas:core:1.0",
"readOnly": True,
"required": True,
"caseExact": True
},
{
"name": "name",
"type": "string",
"multiValued": False,
"description": "Role name",
"schema": "urn:scim:schemas:extension:keystone:1.0",
"readOnly": True,
"required": True,
"caseExact": True
},
{
"name": "domain_id",
"type": "string",
"multiValued": False,
"description": "Role's domain",
"schema": "urn:scim:schemas:extension:keystone:1.0",
"readOnly": False,
"required": True,
"caseExact": True
}
]
},
{
"id": "urn:scim:schemas:core:2.0:Organization",
"name": "Organization",
"description": "Keystone Organization",
"schema": "urn:scim:schemas:core:2.0",
"endpoint": "/Organization",
"attributes": [
{
"name": "id",
"type": "string",
"multiValued": False,
"description": "Unique identifier for the SCIM resource",
"schema": "urn:scim:schemas:core:2.0",
"readOnly": True,
"required": True,
"caseExact": True
},
{
"name": "name",
"type": "string",
"multiValued": False,
"description": "Organization name",
"schema": "urn:scim:schemas:core:2.0",
"readOnly": True,
"required": True,
"caseExact": True
},
{
"name": "description",
"type": "string",
"multiValued": False,
"description": "Organization description",
"schema": "urn:scim:schemas:core:2.0",
"readOnly": False,
"required": True,
"caseExact": True
},
{
"name": "active",
"type": "boolean",
"multiValued": False,
"description": "A Boolean value indicating the User's"
"administrative status.",
"schema": "urn:scim:schemas:core:2.0",
"readOnly": False,
"required": False,
"caseExact": False
},
{
"name": "domain_id",
"type": "string",
"multiValued": False,
"description": "Organization's domain",
"schema": "urn:scim:schemas:extension:keystone:2.0",
"readOnly": False,
"required": True,
"caseExact": True
},
{
"name": "is_default",
"type": "boolean",
"multiValued": False,
"description": "A Boolean value indicating the Organization's"
"default status",
"schema": "urn:scim:schemas:core:2.0",
"readOnly": False,
"required": False,
"caseExact": False
},
]
},
]
|
|
# Copyright (c) 2011-2012 Vit Suchomel and Jan Pomikalek
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
"""Character encoding detection library."""
import os
import sys
import struct
ENCODE_REPLACEMENT_CHARACTER = '\x00'
MODEL_VERSION = '1.3'
def list_models():
"Returns a list of inbuilt models."
models = []
models_dir = os.path.join(
os.path.dirname(sys.modules['chared'].__file__), 'models')
for filename in os.listdir(models_dir):
if filename.endswith('.edm'):
models.append(filename.rsplit('.', 1)[0])
return sorted(models)
def get_model_path(model_id):
"""
Returns the full path to the model with given id or None if no model with
the ID exists.
"""
models_dir = os.path.join(
os.path.dirname(sys.modules['chared'].__file__), 'models')
filepath = os.path.join(models_dir, model_id + '.edm')
if os.path.isfile(filepath):
return filepath
else:
return None
def scalar_product(vec1, vec2):
"Returns a scalar product of the two vectors."
result = 0
for key in vec1.keys():
if vec2.has_key(key):
result += vec1[key] * vec2[key]
return result
def replace_by_zero(error):
"""
Replaces unknown bytes while encoding/decoding.
The function has to be registered using codecs.register_error.
"""
if isinstance(error, UnicodeEncodeError):
return (unicode(ENCODE_REPLACEMENT_CHARACTER), error.end)
elif isinstance(error, UnicodeDecodeError):
return (u'\ufffd', error.end)
raise error
class EncodingDetector(object):
VECTOR_TUPLE_LENGTH = 3
def __init__(self, version=MODEL_VERSION, vectors={}, enc_order=()):
self._version = version
self._vectors = vectors
self._encodings_order = enc_order
def get_version(self):
return self._version
def save(self, path):
"""
Saves the model to the specified path.
File format:
general row: <verison><TAB><tuple length><TAB><encodings count>
for each encoding:
info row: <name><TAB><order><TAB><vector length>
vector row: <key><packed value>...
"""
with open(path, 'wb') as fp:
#basic attributes
fp.write('%s\t%d\t%d\n' %
(self._version, self.VECTOR_TUPLE_LENGTH, len(self._vectors)))
#vectors
for enc, vector in self._vectors.iteritems():
#encoding name, encoding order
vect_len = len(vector)
enc_order = self.get_encoding_order(enc)
fp.write('%s\t%d\t%d\n' % (enc, enc_order, vect_len))
#vector keys & values
for k, v in vector.iteritems():
fp.write('%s%s' % (k, struct.pack('=I', v)))
fp.write('\n')
@classmethod
def load(cls, path):
"""
Loads the model from the specified path.
Returns a new instance of EncodingDetector.
"""
version = ''
vectors = {}
enc_order = {}
with open(path, 'rb') as fp:
#basic attributes
version, vect_tuple_length, enc_count = fp.readline().split('\t')
if MODEL_VERSION != version:
sys.stderr.write('WARNING: Potentially incompatible model versions!\n')
sys.stderr.write('\t%s: %s\n\tthis module: %s\n' % (path, version, MODEL_VERSION))
vect_tuple_length = int(vect_tuple_length)
#vectors
for i in range(int(enc_count)):
#encoding name, encoding order
enc, order, vect_len = fp.readline().split('\t')
enc_order[int(order)] = enc
#vector keys & values
vectors[enc] = {}
for j in range(int(vect_len)):
key = fp.read(vect_tuple_length)
vectors[enc][key] = struct.unpack('=I', fp.read(4))[0]
fp.read(1)
return EncodingDetector(version, vectors, enc_order.values())
def vectorize(self, string):
"""
Transforms the input strings into a frequency vector of n-grams of
contained characters.
Omits vector keys containing the encoding replacement character.
"""
str_len = len(string)
if self.VECTOR_TUPLE_LENGTH > str_len:
return {}
vector = {}
for i in range(str_len - self.VECTOR_TUPLE_LENGTH + 1):
key = string[i:i + self.VECTOR_TUPLE_LENGTH]
if ENCODE_REPLACEMENT_CHARACTER not in key:
vector[key] = vector.get(key, 0) + 1
return vector
def train(self, string, encoding):
"Trains the detector. The input must be a string and its encoding."
self._vectors[encoding] = self.vectorize(string)
def set_encodings_order(self, encodings):
"""
Defines the order (importance / frequency of use) of the encodings
the classifier has been trained on. The input must be a list or a
tuple of encodings. The first is the most important and the last is
the least important.
"""
if not isinstance(encodings, (tuple, list)):
raise TypeError
self._encodings_order = tuple(encodings)
def get_encoding_order(self, encoding):
"""
Returns the order of the encoding or sys.maxint if no order is
defined for it.
"""
if encoding in self._encodings_order:
return self._encodings_order.index(encoding)
return sys.maxint
def classify(self, string):
"""
Returns the predicted character encoding(s) for the input string as
a list. The list may contain more than one element if there are
multiple equally likely candidates. In this case, the candidates are
returned in the order of importance (see set_encodings_order). Empty
list may be returned if there are no valid candidates.
"""
input_vector = self.vectorize(string)
classification = []
for clas, vector in self._vectors.iteritems():
score = scalar_product(input_vector, vector)
clas_info = {'clas': clas, 'score': score,
'order': self.get_encoding_order(clas)}
classification.append(clas_info)
if not classification:
return []
#order result classes
# 1.) by vector similarity score (higher score is better)
# 2.) by the encoding order (lower index is better)
classification.sort(lambda x, y:
cmp(y['score'], x['score']) or cmp(x['order'], y['order']))
#return a list of the top classes
# the top classes have the same score and order as the first one
first = classification[0]
result = []
for clas in classification:
if first['score'] == clas['score']:
result.append(clas['clas'])
return result
def reduce_vectors(self):
"""
Remove the common parts of all vectors. Should be called after all
training data has been loaded. Provided the training has been performed
on the same data for all encodings, reducing vectors increases both
efficiency and accuracy of the classification.
"""
#get frequencies of (key, value) pairs
key_value_count = {}
for vect in self._vectors.values():
for key, value in vect.iteritems():
key_value_count[(key, value)] = key_value_count.get(
(key, value), 0) + 1
#remove common parts of vectors (the (key, value) pairs with the
#frequency equal to the number of vectors)
encodings_count = len(self._vectors)
for (key, value), count in key_value_count.iteritems():
if count >= encodings_count:
for vect in self._vectors.values():
if vect.has_key(key):
del vect[key]
|
|
import math
import numpy
import statsmodels.api as sm
lowess= sm.nonparametric.lowess
import esutil
from galpy.util import bovy_coords, bovy_plot
from scipy.interpolate import interp1d,UnivariateSpline
import apogee.tools.read as apread
import isodist
import numpy as np
import matplotlib.pyplot as plt
import os
import pickle
import apogee.tools.read as apread
from apogee.select import apogeeSelect
from astropy.io import fits
from astropy.table import Table, join
_R0= 8. # kpc
_Z0= 0.025 # kpc
_FEHTAG= 'FE_H'
_AFETAG= 'AVG_ALPHAFE'
_AFELABEL= r'$[\left([\mathrm{O+Mg+Si+S+Ca}]/5\right)/\mathrm{Fe}]$'
catpath = '../catalogues/'
selectFile= '../savs/selfunc-nospdata.sav'
if os.path.exists(selectFile):
with open(selectFile,'rb') as savefile:
apo= pickle.load(savefile)
def get_rgbsample(loggcut = [1.8, 3.0],
teffcut = [0, 10000],
add_ages = False,
agetype='Martig',
apply_corrections=False,
distance_correction=False,
verbose = False):
"""
Get a clean sample of dr12 APOGEE data with Michael Haydens distances
---
INPUT:
None
OUTPUT:
Clean rgb sample with added distances
HISTORY:
Started - Mackereth 02/06/16
"""
#get the allStar catalogue using apogee python (exlude all bad flags etc)
allStar = apread.allStar(rmcommissioning=True,
exclude_star_bad=True,
exclude_star_warn=True,
main=True,
ak=True,
adddist=False)
#cut to a 'sensible' logg range (giants which are not too high on the RGB)
allStar = allStar[(allStar['LOGG'] > loggcut[0])&(allStar['LOGG'] < loggcut[1])&
(allStar['TEFF'] > teffcut[0])&(allStar['TEFF'] < teffcut[1])]
if verbose == True:
print str(len(allStar))+' Stars before Distance catalogue join (after Log(g) cut)'
#load the distance VAC
dists = fits.open(catpath+'DR12_DIST_R-GC.fits')[1].data
#convert to astropy Table
allStar_tab = Table(data=allStar)
dists_tab = Table(data=dists)
#join table
tab = join(allStar_tab, dists_tab, keys='APOGEE_ID', uniq_col_name='{col_name}{table_name}', table_names=['','2'])
data = tab.as_array()
data= esutil.numpy_util.add_fields(data,[('M_J', float),
('M_H', float),
('M_K', float),
('MH50_DIST', float),
('MH50_GALR', float),
('MH50_GALZ', float),
('MH50_GALPHI', float),
('AVG_ALPHAFE', float)])
data['MH50_DIST'] = (10**((data['HAYDEN_DISTMOD_50']+5)/5))/1e3
if distance_correction == True:
data['MH50_DIST'] *= 1.05
XYZ= bovy_coords.lbd_to_XYZ(data['GLON'],
data['GLAT'],
data['MH50_DIST'],
degree=True)
RphiZ= bovy_coords.XYZ_to_galcencyl(XYZ[:,0],
XYZ[:,1],
XYZ[:,2],
Xsun=8.,Zsun=0.025)
data['MH50_GALR']= RphiZ[:,0]
data['MH50_GALPHI']= RphiZ[:,1]
data['MH50_GALZ']= RphiZ[:,2]
data['M_J'] = data['J0']-data['HAYDEN_DISTMOD_50']
data['M_H'] = data['H0']-data['HAYDEN_DISTMOD_50']
data['M_K'] = data['K0']-data['HAYDEN_DISTMOD_50']
data['AVG_ALPHAFE'] = avg_alphafe_dr12(data)
data[_FEHTAG] += -0.1
#remove locations not in the apogee selection function (FIND OUT WHATS UP HERE)
data = data[np.in1d(data['LOCATION_ID'], apo.list_fields())]
# Remove locations outside of the Pan-STARRS dust map
# In the Southern hemisphere
data= data[data['LOCATION_ID'] != 4266] #240,-18
data= data[data['LOCATION_ID'] != 4331] #5.5,-14.2
data= data[data['LOCATION_ID'] != 4381] #5.2,-12.2
data= data[data['LOCATION_ID'] != 4332] #1,-4
data= data[data['LOCATION_ID'] != 4329] #0,-5
data= data[data['LOCATION_ID'] != 4351] #0,-2
data= data[data['LOCATION_ID'] != 4353] #358,0
data= data[data['LOCATION_ID'] != 4385] #358.6,1.4
# Close to the ecliptic pole where there's no data (is it the ecliptic pole?
data= data[data['LOCATION_ID'] != 4528] #120,30
data= data[data['LOCATION_ID'] != 4217] #123,22.4
#remove any non-finite magnitudes
data = data[np.isfinite(data['M_H'])]
if verbose == True:
print str(len(data))+' Stars with distance measures (and in good fields...)'
if add_ages == True:
if agetype == 'Martig':
ages = fits.open(catpath+'DR12_martigages_vizier.fits')[1].data
idtag = '2MASS_ID'
if agetype == 'Cannon':
ages = fits.open(catpath+'RGB_Cannon_Ages.fits')[1].data
ages = esutil.numpy_util.add_fields(ages,[('Age', float)])
ages['Age'] = np.exp(ages['ln_age'])
idtag = 'ID'
ages_tab = Table(data=ages)
ages_tab.rename_column(idtag, 'APOGEE_ID')
tab = join( ages_tab,data, keys='APOGEE_ID', uniq_col_name='{col_name}{table_name}', table_names=['','2'])
allStar_full = tab.as_array()
data = allStar_full
if verbose == True:
print str(len(data))+' Stars with ages'
if apply_corrections == True:
#martig1 = np.genfromtxt(catpath+'martig2016_table1.txt', dtype=None, names=True, skip_header=2)
martig1 = fits.open(catpath+'martig_table1.fits')
fit = lowess(np.log10(martig1['Age_out']),np.log10(martig1['Age_in']))
xs = np.linspace(-0.3,1.2,100)
xsinterpolate = interp1d(xs,xs)
fys = fit[:,0]-xsinterpolate(fit[:,1])
interp = UnivariateSpline(fit[:,1], fys)
corr_age = np.log10(data['Age'])+(interp(np.log10(data['Age'])))
corr_age = 10**corr_age
data['Age'] = corr_age
return data
def avg_alphafe_dr12(data):
weight_o= np.ones(len(data))
weight_s= np.ones(len(data))
weight_si= np.ones(len(data))
weight_ca= np.ones(len(data))
weight_mg= np.ones(len(data))
weight_o[data['O_H'] == -9999.0]= 0.
weight_s[data['S_H'] == -9999.0]= 0.
weight_si[data['SI_H'] == -9999.0]= 0.
weight_ca[data['CA_H'] == -9999.0]= 0.
weight_mg[data['MG_H'] == -9999.0]= 0.
return (weight_o*data['O_H']+weight_s*data['S_H']
+weight_si*data['SI_H']+weight_ca*data['CA_H']
+weight_mg*data['MG_H'])/(weight_o+weight_s
+weight_si+weight_ca
+weight_mg)\
-data['FE_H']-0.05
# Define the low-alpha, low-iron sample
def _lowlow_lowfeh(afe):
# The low metallicity edge
return -0.6
def _lowlow_highfeh(afe):
# The high metallicity edge
return -0.25
def _lowlow_lowafe(feh):
# The low alpha edge (-0.15,-0.075) to (-0.5,0)
return (0--0.075)/(-0.5--0.15)*(feh+0.1--0.15)-0.075
def _lowlow_highafe(feh):
# The high alpha edge (-0.15,0.075) to (-0.5,0.15)
return (0.15-0.075)/(-0.5--0.15)*(feh+0.1--0.15)+0.075
def get_lowlowsample():
"""
NAME:
get_lowlowsample
PURPOSE:
get the RGB sample at low alpha, low iron
INPUT:
None so far
OUTPUT:
sample
HISTORY:
2015-03-18 - Started - Bovy (IAS)
2016-07-02 - modification - Mackereth (LJMU)
"""
# Get the full sample first
data= get_rgbsample()
# Now cut it
lowfeh= _lowlow_lowfeh(0.)
highfeh= _lowlow_highfeh(0.)
indx= (data[_FEHTAG] > lowfeh)*(data[_FEHTAG] <= highfeh)\
*(data[_AFETAG] > _lowlow_lowafe(data[_FEHTAG]))\
*(data[_AFETAG] <= _lowlow_highafe(data[_FEHTAG]))
return data[indx]
# Define the high-alpha sample
def _highalpha_lowfeh(afe):
# The low metallicity edge
return -0.8
def _highalpha_highfeh(afe):
# The high metallicity edge
return -0.2
def _highalpha_lowafe(feh):
# The low alpha edge (-0.125,0.115) to (-0.6,0.215)
return (0.2-0.1)/(-0.6--0.125)*(feh+0.1--0.125)+0.115
def _highalpha_highafe(feh):
# The high alpha edge (-0.125,0.19) to (-0.6,0.29)
return (0.275-0.175)/(-0.6--0.125)*(feh+0.1--0.125)+0.19
def get_highalphasample():
"""
NAME:
get_highalphasample
PURPOSE:
get the RC sample at high alpha
INPUT:
None so far
OUTPUT:
sample
HISTORY:
2015-03-24 - Started - Bovy (IAS)
"""
# Get the full sample first
data= get_rcsample()
# Now cut it
lowfeh= _highalpha_lowfeh(0.)
highfeh= _highalpha_highfeh(0.)
indx= (data[_FEHTAG] > lowfeh)*(data[_FEHTAG] <= highfeh)\
*(data[_AFETAG] > _highalpha_lowafe(data[_FEHTAG]))\
*(data[_AFETAG] <= _highalpha_highafe(data[_FEHTAG]))
return data[indx]
# Define the solar sample
def _solar_lowfeh(afe):
# The low metallicity edge
return -0.2
def _solar_highfeh(afe):
# The high metallicity edge
return 0.
def _solar_lowafe(feh):
# The low alpha edge (0.1,-0.075) to (-0.1,-0.075)
return -0.075
def _solar_highafe(feh):
# The high alpha edge (-0.15,0.1) to (0.1,0.05)
return (0.1-0.05)/(-0.15-0.1)*(feh+0.1-0.1)+0.05
def get_solarsample():
"""
NAME:
get_solarsample
PURPOSE:
get the RC sample at solar abundances
INPUT:
None so far
OUTPUT:
sample
HISTORY:
2015-03-18 - Started - Bovy (IAS)
2016-07-02 - modification - Mackereth (LJMU)
"""
# Get the full sample first
data= get_rgbsample()
# Now cut it
lowfeh= _solar_lowfeh(0.)
highfeh= _solar_highfeh(0.)
indx= (data[_FEHTAG] > lowfeh)*(data[_FEHTAG] <= highfeh)\
*(data[_AFETAG] > _solar_lowafe(data[_FEHTAG]))\
*(data[_AFETAG] <= _solar_highafe(data[_FEHTAG]))
return data[indx]
# Define the high metallicity sample
def _highfeh_lowfeh(afe):
# The low metallicity edge
return 0.05
def _highfeh_highfeh(afe):
# The high metallicity edge
return 0.3
def _highfeh_lowafe(feh):
# The low alpha edge (0.1,-0.075) to (-0.1,-0.075)
return -0.075
def _highfeh_highafe(feh):
# The high alpha edge (-0.15,0.1) to (0.1,0.05)
return 0.05
def get_highfehsample():
"""
NAME:
get_highfehsample
PURPOSE:
get the RC sample at high [Fe/H]
INPUT:
None so far
OUTPUT:
sample
HISTORY:
2015-03-18 - Started - Bovy (IAS)
2016-07-02 - modification - Mackereth (LJMU)
"""
# Get the full sample first
data= get_rgbsample()
# Now cut it
lowfeh= _highfeh_lowfeh(0.)
highfeh= _highfeh_highfeh(0.)
indx= (data[_FEHTAG] > lowfeh)*(data[_FEHTAG] <= highfeh)\
*(data[_AFETAG] > _highfeh_lowafe(data[_FEHTAG]))\
*(data[_AFETAG] <= _highfeh_highafe(data[_FEHTAG]))
return data[indx]
def alphaedge(fehs):
edge = np.zeros(len(fehs))
edge[fehs < 0] = (0.12/-0.6)*fehs[fehs < 0]+0.03
edge[fehs >= 0] = 0.03
return edge
def highalphaedge(fehs):
edge = np.zeros(len(fehs))
edge[fehs < 0] = (-0.13/0.6)*fehs[fehs < 0]+0.04
edge[fehs >= 0] = 0.04
return edge
def lowalphaedge(fehs):
edge = np.zeros(len(fehs))
edge[fehs < 0] = (-0.10/0.6)*fehs[fehs < 0]+0.01
edge[fehs >= 0] = 0.01
return edge
def get_fehage(agebin = [0.,1.], fehbin = [0.,0.2], afebin = 'low', dr=None, agetype='Martig', apply_corrections=False, distance_correction=False):
data = get_rgbsample(add_ages=True, agetype=agetype, apply_corrections=apply_corrections, distance_correction=distance_correction)
if afebin == 'low':
indx = (data['Age'] >= agebin[0])*(data['Age'] < agebin[1])\
*(data[_FEHTAG] >= fehbin[0])*(data[_FEHTAG] < fehbin[1])*(data[_AFETAG] < alphaedge(data[_FEHTAG]))
if afebin == 'high':
indx = (data['Age'] >= agebin[0])*(data['Age'] < agebin[1])\
*(data[_FEHTAG] >= fehbin[0])*(data[_FEHTAG] < fehbin[1])*(data[_AFETAG] >= alphaedge(data[_FEHTAG]))
if afebin == 'highclean':
indx = (data['Age'] >= agebin[0])*(data['Age'] < agebin[1])\
*(data[_FEHTAG] >= fehbin[0])*(data[_FEHTAG] < fehbin[1])*(data[_AFETAG] >= highalphaedge(data[_FEHTAG]))
if afebin == 'lowclean':
indx = (data['Age'] >= agebin[0])*(data['Age'] < agebin[1])\
*(data[_FEHTAG] >= fehbin[0])*(data[_FEHTAG] < fehbin[1])*(data[_AFETAG] <= lowalphaedge(data[_FEHTAG]))
if afebin == 'lownew':
indx = (data['Age'] >= agebin[0])*(data['Age'] < agebin[1])\
*(data[_FEHTAG] >= fehbin[0])*(data[_FEHTAG] < fehbin[1])*(data[_AFETAG] <= alphaedge(data[_FEHTAG])-0.025)
if afebin == 'highnew':
indx = (data['Age'] >= agebin[0])*(data['Age'] < agebin[1])\
*(data[_FEHTAG] >= fehbin[0])*(data[_FEHTAG] < fehbin[1])*(data[_AFETAG] >= alphaedge(data[_FEHTAG])+0.025)
if afebin == None:
indx = (data['Age'] >= agebin[0])*(data['Age'] < agebin[1])*(data[_FEHTAG] >= fehbin[0])*(data[_FEHTAG] < fehbin[1])
return data[indx]
def highalphalocus():
data= get_rgbsample()
indx= (data[_AFETAG] > (0.2-0.1)/(-0.6--0.125)*(data[_FEHTAG]+0.1--0.125)+0.11)\
*(data[_FEHTAG] < -0.225)\
+(data[_AFETAG] > 0.05/(-0.6--0.125)*(data[_FEHTAG]+0.1--0.125)+0.11)\
*(data[_FEHTAG] >= -0.225)*(data[_FEHTAG] < 0.125)\
+(data[_FEHTAG] >= 0.125)
return lowess(data[_AFETAG][indx],data[_FEHTAG][indx],frac=0.6)
def lowalphalocus():
data= get_rgbsample()
indx= (data[_AFETAG] > (0.2-0.1)/(-0.6--0.125)*(data[_FEHTAG]+0.1--0.125)+0.11)\
*(data[_FEHTAG] < -0.025)\
+(data[_AFETAG] > 0.05/(-0.6--0.125)*(data[_FEHTAG]+0.1--0.125)+0.11)\
*(data[_FEHTAG] >= -0.225)*(data[_FEHTAG] < 0.125)
return lowess(data[_AFETAG][True-indx],data[_FEHTAG][True-indx],frac=0.6)
class MAPs:
"""Class that pixelizes the data sample in [Fe/H] and [a/Fe]"""
def __init__(self,data=None,dfeh=0.1,dafe=0.05,fehmin=-0.75,fehmax=0.35,
afemin=-0.075,afemax=0.275):
"""
NAME:
__init__
PURPOSE:
initialize the MAPs
INPUT:
data= (None) the data sample; if None, whole stat. RC sample
dfeh, dafe= pixel size
fehmin, fehmax, afemin, afemax= minimum and maximum FeH and AFe
OUTPUT:
object with pixelized data
HISTORY:
2015-04-06 - Written - Bovy (IAS)
"""
if data is None: data= get_rcsample()
self.data= data
self.dx= dfeh
self.dy= dafe
self.xmin= fehmin
self.xmax= fehmax
self.ymin= afemin
self.ymax= afemax
# edges in X and Y
self.xedges= numpy.arange(self.xmin,self.xmax+0.01,self.dx)
self.yedges= numpy.arange(self.ymin,self.ymax+0.01,self.dy)
# X and Y
self.x= data[_FEHTAG]
self.y= data[_AFETAG]
return None
def __call__(self,*args,**kwargs):
"""
NAME:
__call__
PURPOSE:
return the part of the sample in a (feh,afe) pixel
INPUT:
[Fe/H]
[a/Fe]
OUTPUT:
returns data recarray in the bin that feh and afe are in
HISTORY:
2015-04-06 - Written - Bovy (IAS)
"""
#Find bin
xbin= int(math.floor((args[0]-self.xmin)/self.dx))
ybin= int(math.floor((args[1]-self.ymin)/self.dy))
#Return data
return self.data[(self.x > self.xedges[xbin])\
*(self.x <= self.xedges[xbin+1])\
*(self.y > self.yedges[ybin])\
*(self.y <= self.yedges[ybin+1])]
def map(self):
"""
NAME:
map
PURPOSE:
yield a map
INPUT:
(none)
OUTPUT:
iterates over the MAPs
HISTORY:
2015-04-06 - Written - Bovy (IAS)
"""
nx= int((self.xmax-self.xmin)/self.dx)
ny= int((self.ymax-self.ymin)/self.dy)
gx= numpy.linspace(self.xmin+self.dx/2.,self.xmax-self.dx/2.,nx)
gy= numpy.linspace(self.ymin+self.dy/2.,self.ymax-self.dy/2.,ny)
for ii in range(nx):
for jj in range(ny):
yield self(gx[ii],gy[jj])
def callIndx(self,*args,**kwargs):
"""
NAME:
callIndx
PURPOSE:
return index of the part of the sample in an [Fe/H] and [a/Fe] pixel
INPUT:
[Fe/H]
[a/Fe]
OUTPUT:
returns index into data recarray in the bin that [Fe/H] and [a/Fe] are in
HISTORY:
2015-04-06 - Written - Bovy (IAS)
"""
#Find bin
xbin= int(math.floor((args[0]-self.xmin)/self.dx))
ybin= int(math.floor((args[1]-self.ymin)/self.dy))
#Return data
return (self.x > self.xedges[xbin])\
*(self.x <= self.xedges[xbin+1])\
*(self.y > self.yedges[ybin])\
*(self.y <= self.yedges[ybin+1])
def xindx(self,x):
"""
NAME:
xindx
PURPOSE:
return the index corresponding to a [Fe/H] value
INPUT:
[Fe/H]
OUTPUT:
index
HISTORY:
2015-04-06 - Written - Bovy (IAS)
"""
return int(math.floor((x-self.xmin)/self.dx))
def yindx(self,y):
"""
NAME:
yindx
PURPOSE:
return the index corresponding to a [a/Fe] value
INPUT:
[a/Fe]
OUTPUT:
index
HISTORY:
2015-04-06 - Written - Bovy (IAS)
"""
return int(math.floor((y-self.ymin)/self.dy))
def plot(self,quant,func=numpy.median,minnstar=20.,submediany=False,
returnz=False,justcalc=False,
**kwargs):
"""
NAME:
plot
PURPOSE:
make a plot of a quantity as a function of X and Y
INPUT:
quant - the quantity (string that returns the quantity, like
'METALS') or a function of the data
func - function of quantity to plot
minnstar= minimum number of stars (20)
submeany= subtract the median y
justcalc= (False) if True, do not plot
bovy_plot.bovy_dens2d kwargs
OUTPUT:
plot to output device
HISTORY:
2015-04-06 - Written - Bovy (IAS)
"""
#First create 2D
nx= int((self.xmax-self.xmin)/self.dx)
ny= int((self.ymax-self.ymin)/self.dy)
gx= numpy.linspace(self.xmin+self.dx/2.,self.xmax-self.dx/2.,nx)
gy= numpy.linspace(self.ymin+self.dy/2.,self.ymax-self.dy/2.,ny)
z2d= numpy.empty((nx,ny))
if isinstance(quant,numpy.ndarray):
z2d= numpy.reshape(quant,(nx,ny))
for ii in range(z2d.shape[0]):
for jj in range(z2d.shape[1]):
tdata= self(gx[ii],gy[jj])
if len(tdata) < minnstar:
z2d[ii,jj]= numpy.nan
else:
nbins= 0
for ii in range(z2d.shape[0]):
for jj in range(z2d.shape[1]):
tdata= self(gx[ii],gy[jj])
if len(tdata) < minnstar:
z2d[ii,jj]= numpy.nan
else:
nbins+= 1
if hasattr(quant, '__call__'):
z2d[ii,jj]= func(quant(tdata))
else:
z2d[ii,jj]= func(tdata[quant])
if submediany:
z2d[ii,:]-= \
numpy.median(z2d[ii,True-numpy.isnan(z2d[ii,:])])
if justcalc:
if returnz:
return z2d
else:
return None
#Now plot
xrange= kwargs.pop('xrange',[self.xmin,self.xmax])
yrange= kwargs.pop('yrange',[self.ymin,self.ymax])
if not kwargs.has_key('colorbar'):
kwargs['colorbar']= True
if not kwargs.has_key('shrink'):
kwargs['shrink']= 0.78
if not kwargs.has_key('vmin'):
kwargs['vmin']= numpy.nanmin(z2d)
if not kwargs.has_key('vmax'):
kwargs['vmax']= numpy.nanmax(z2d)
xlabel= r'$[\mathrm{Fe/H}]$'
ylabel= _AFELABEL
cmap= kwargs.pop('cmap','coolwarm')
out= bovy_plot.bovy_dens2d(z2d.T,origin='lower',cmap=cmap,
interpolation='nearest',
xlabel=xlabel,ylabel=ylabel,
xrange=xrange,yrange=yrange,
**kwargs)
if returnz:
return z2d
else:
return out
|
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import distributions
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'shape': [(3, 2), (1,)],
'is_variable': [True, False],
}))
@testing.fix_random()
class TestKLDivergence(unittest.TestCase):
def check_kl(self, dist1, dist2):
kl = chainer.kl_divergence(dist1, dist2).data
if isinstance(kl, cuda.ndarray):
kl = kl.get()
sample = dist1.sample(300000)
mc_kl = dist1.log_prob(sample).data - dist2.log_prob(sample).data
if isinstance(mc_kl, cuda.ndarray):
mc_kl = mc_kl.get()
mc_kl = numpy.nanmean(mc_kl, axis=0)
testing.assert_allclose(kl, mc_kl, atol=1e-2, rtol=1e-2)
def encode_params(self, params, is_gpu=False):
if is_gpu:
params = {k: cuda.to_gpu(v) for k, v in params.items()}
if self.is_variable:
params = {k: chainer.Variable(v) for k, v in params.items()}
return params
def make_bernoulli_dist(self, is_gpu=False):
p = numpy.random.uniform(0, 1, self.shape).astype(numpy.float32)
params = self.encode_params({"p": p}, is_gpu)
return distributions.Bernoulli(**params)
def make_beta_dist(self, is_gpu=False):
a = numpy.random.uniform(0.5, 10, self.shape).astype(numpy.float32)
b = numpy.random.uniform(0.5, 10, self.shape).astype(numpy.float32)
params = self.encode_params({"a": a, "b": b}, is_gpu)
return distributions.Beta(**params)
def make_categorical_dist(self, is_gpu=False):
p = numpy.random.normal(size=self.shape+(3,)).astype(numpy.float32)
p = numpy.exp(p)
p /= numpy.expand_dims(p.sum(axis=-1), axis=-1)
params = self.encode_params({"p": p}, is_gpu)
return distributions.Categorical(**params)
def make_dirichlet_dist(self, is_gpu=False):
alpha = numpy.random.uniform(
0.5, 10, self.shape + (3,)).astype(numpy.float32)
params = self.encode_params({"alpha": alpha}, is_gpu)
return distributions.Dirichlet(**params)
def make_exponential_dist(self, is_gpu=False):
lam = numpy.exp(
numpy.random.uniform(0, 0.5, self.shape)).astype(numpy.float32)
params = self.encode_params({"lam": lam}, is_gpu)
return distributions.Exponential(**params)
def make_gamma_dist(self, is_gpu=False):
k = numpy.random.uniform(1, 5, self.shape).astype(numpy.float32)
theta = numpy.random.uniform(0, 2, self.shape).astype(numpy.float32)
params = self.encode_params({"k": k, "theta": theta}, is_gpu)
return distributions.Gamma(**params)
def make_gumbel_dist(self, is_gpu=False):
loc = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
scale = numpy.exp(
numpy.random.uniform(0, 1, self.shape)).astype(numpy.float32)
params = self.encode_params({"loc": loc, "scale": scale}, is_gpu)
return distributions.Gumbel(**params)
def make_laplace_dist(self, is_gpu=False):
loc = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
scale = numpy.exp(
numpy.random.uniform(-1, 1, self.shape)).astype(numpy.float32)
params = self.encode_params({"loc": loc, "scale": scale}, is_gpu)
return distributions.Laplace(**params)
def make_log_normal_dist(self, is_gpu=False):
mu = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
sigma = numpy.exp(
numpy.random.uniform(-1, 1, self.shape)).astype(numpy.float32)
params = self.encode_params({"mu": mu, "sigma": sigma}, is_gpu)
return distributions.LogNormal(**params)
def make_normal_dist(self, is_gpu=False, use_log_scale=False):
loc = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
if use_log_scale:
log_scale = numpy.random.uniform(
-1, 1, self.shape).astype(numpy.float32)
params = self.encode_params(
{"loc": loc, "log_scale": log_scale}, is_gpu)
else:
scale = numpy.exp(
numpy.random.uniform(-1, 1, self.shape)).astype(numpy.float32)
params = self.encode_params({"loc": loc, "scale": scale}, is_gpu)
return distributions.Normal(**params)
def make_multivariatenormal_dist(self, is_gpu=False):
loc = numpy.random.uniform(
-1, 1, self.shape + (3,)).astype(numpy.float32)
cov = numpy.random.normal(size=(numpy.prod(self.shape),) + (3, 3))
cov = [cov_.dot(cov_.T) for cov_ in cov]
cov = numpy.vstack(cov).reshape(self.shape + (3, 3))
scale_tril = numpy.linalg.cholesky(cov).astype(numpy.float32)
params = self.encode_params(
{"loc": loc, "scale_tril": scale_tril}, is_gpu)
return distributions.MultivariateNormal(**params)
def make_one_hot_categorical_dist(self, is_gpu=False):
p = numpy.random.normal(size=self.shape+(3,)).astype(numpy.float32)
p = numpy.exp(p)
p /= numpy.expand_dims(p.sum(axis=-1), axis=-1)
params = self.encode_params({"p": p}, is_gpu)
return distributions.OneHotCategorical(**params)
def make_pareto_dist(self, is_gpu=False):
scale = numpy.exp(numpy.random.uniform(
0.5, 1, self.shape)).astype(numpy.float32)
alpha = numpy.exp(numpy.random.uniform(
1, 2, self.shape)).astype(numpy.float32)
params = self.encode_params({"scale": scale, "alpha": alpha}, is_gpu)
return distributions.Pareto(**params)
def make_poisson_dist(self, is_gpu=False):
lam = numpy.random.uniform(5, 10, self.shape).astype(numpy.float32)
params = self.encode_params({"lam": lam}, is_gpu)
return distributions.Poisson(**params)
def make_uniform_dist(self, is_gpu=False, low=None, high=None,
loc=None, scale=None, use_loc_scale=False):
if use_loc_scale:
if loc is None:
loc = numpy.random.uniform(
-3, 0, self.shape).astype(numpy.float32)
if scale is None:
scale = numpy.random.uniform(
1, 5, self.shape).astype(numpy.float32)
params = self.encode_params({"loc": loc, "scale": scale}, is_gpu)
else:
if low is None:
low = numpy.random.uniform(
-3, 0, self.shape).astype(numpy.float32)
if high is None:
high = numpy.random.uniform(
low + 1, low + 6, self.shape).astype(numpy.float32)
params = self.encode_params({"low": low, "high": high}, is_gpu)
return distributions.Uniform(**params)
def test_bernoulli_bernoulli_cpu(self):
dist1 = self.make_bernoulli_dist()
dist2 = self.make_bernoulli_dist()
self.check_kl(dist1, dist2)
@attr.gpu
def test_bernoulli_bernoulli_gpu(self):
dist1 = self.make_bernoulli_dist(True)
dist2 = self.make_bernoulli_dist(True)
self.check_kl(dist1, dist2)
@testing.with_requires('scipy')
def test_beta_beta_cpu(self):
dist1 = self.make_beta_dist()
dist2 = self.make_beta_dist()
self.check_kl(dist1, dist2)
@attr.gpu
def test_beta_beta_gpu(self):
dist1 = self.make_beta_dist(True)
dist2 = self.make_beta_dist(True)
self.check_kl(dist1, dist2)
@testing.with_requires('numpy>=1.11')
def test_categorical_categorical_cpu(self):
dist1 = self.make_categorical_dist()
dist2 = self.make_categorical_dist()
self.check_kl(dist1, dist2)
@attr.gpu
def test_categorical_categorical_gpu(self):
dist1 = self.make_categorical_dist(True)
dist2 = self.make_categorical_dist(True)
self.check_kl(dist1, dist2)
@testing.with_requires('scipy')
def test_dirichlet_dirichlet_cpu(self):
dist1 = self.make_dirichlet_dist()
dist2 = self.make_dirichlet_dist()
self.check_kl(dist1, dist2)
@attr.gpu
def test_dirichlet_dirichlet_gpu(self):
dist1 = self.make_dirichlet_dist(True)
dist2 = self.make_dirichlet_dist(True)
self.check_kl(dist1, dist2)
def test_exponential_exponential_cpu(self):
dist1 = self.make_exponential_dist()
dist2 = self.make_exponential_dist()
self.check_kl(dist1, dist2)
@attr.gpu
def test_exponential_exponential_gpu(self):
dist1 = self.make_exponential_dist(True)
dist2 = self.make_exponential_dist(True)
self.check_kl(dist1, dist2)
@testing.with_requires('scipy')
def test_gamma_gamma_cpu(self):
dist1 = self.make_gamma_dist()
dist2 = self.make_gamma_dist()
self.check_kl(dist1, dist2)
@attr.gpu
def test_gamma_gamma_gpu(self):
dist1 = self.make_gamma_dist(True)
dist2 = self.make_gamma_dist(True)
self.check_kl(dist1, dist2)
@testing.with_requires('scipy')
def test_gumbel_gumbel_cpu(self):
dist1 = self.make_gumbel_dist()
dist2 = self.make_gumbel_dist()
self.check_kl(dist1, dist2)
@attr.gpu
def test_gumbel_gumbel_gpu(self):
dist1 = self.make_gumbel_dist(True)
dist2 = self.make_gumbel_dist(True)
self.check_kl(dist1, dist2)
def test_laplace_laplace_cpu(self):
dist1 = self.make_laplace_dist()
dist2 = self.make_laplace_dist()
self.check_kl(dist1, dist2)
@attr.gpu
def test_laplace_laplace_gpu(self):
dist1 = self.make_laplace_dist(True)
dist2 = self.make_laplace_dist(True)
self.check_kl(dist1, dist2)
def test_log_normal_log_normal_cpu(self):
dist1 = self.make_log_normal_dist()
dist2 = self.make_log_normal_dist()
self.check_kl(dist1, dist2)
@attr.gpu
def test_log_normal_log_normal_gpu(self):
dist1 = self.make_log_normal_dist(True)
dist2 = self.make_log_normal_dist(True)
self.check_kl(dist1, dist2)
def test_normal_normal_cpu(self):
for use_log_scale1 in [True, False]:
for use_log_scale2 in [True, False]:
dist1 = self.make_normal_dist(use_log_scale=use_log_scale1)
dist2 = self.make_normal_dist(use_log_scale=use_log_scale2)
self.check_kl(dist1, dist2)
@attr.gpu
def test_normal_normal_gpu(self):
for use_log_scale1 in [True, False]:
for use_log_scale2 in [True, False]:
dist1 = self.make_normal_dist(
True, use_log_scale=use_log_scale1)
dist2 = self.make_normal_dist(
True, use_log_scale=use_log_scale2)
self.check_kl(dist1, dist2)
@testing.with_requires('scipy')
def test_multivariatenormal_multivariatenormal_cpu(self):
dist1 = self.make_multivariatenormal_dist()
dist2 = self.make_multivariatenormal_dist()
self.check_kl(dist1, dist2)
@attr.gpu
def test_multivariatenormal_multivariatenormal_gpu(self):
dist1 = self.make_multivariatenormal_dist(True)
dist2 = self.make_multivariatenormal_dist(True)
self.check_kl(dist1, dist2)
def test_one_hot_categorical_one_hot_categorical_cpu(self):
dist1 = self.make_one_hot_categorical_dist()
dist2 = self.make_one_hot_categorical_dist()
self.check_kl(dist1, dist2)
@attr.gpu
def test_one_hot_categorical_one_hot_categorical_gpu(self):
dist1 = self.make_one_hot_categorical_dist(True)
dist2 = self.make_one_hot_categorical_dist(True)
self.check_kl(dist1, dist2)
def test_pareto_pareto_cpu(self):
dist1 = self.make_pareto_dist()
dist2 = self.make_pareto_dist()
self.check_kl(dist1, dist2)
@attr.gpu
def test_pareto_pareto_gpu(self):
dist1 = self.make_pareto_dist(True)
dist2 = self.make_pareto_dist(True)
self.check_kl(dist1, dist2)
@testing.with_requires('scipy')
def test_poisson_poisson_cpu(self):
dist1 = self.make_poisson_dist()
dist2 = self.make_poisson_dist()
self.check_kl(dist1, dist2)
@attr.gpu
def test_poisson_poisson_gpu(self):
dist1 = self.make_poisson_dist(True)
dist2 = self.make_poisson_dist(True)
self.check_kl(dist1, dist2)
def test_uniform_uniform_cpu(self):
for use_loc_scale1 in [True, False]:
for use_loc_scale2 in [True, False]:
dist1 = self.make_uniform_dist(use_loc_scale=use_loc_scale1)
dist2 = self.make_uniform_dist(use_loc_scale=use_loc_scale2)
self.check_kl(dist1, dist2)
@attr.gpu
def test_uniform_uniform_gpu(self):
for use_loc_scale1 in [True, False]:
for use_loc_scale2 in [True, False]:
dist1 = self.make_uniform_dist(
True, use_loc_scale=use_loc_scale1)
dist2 = self.make_uniform_dist(
True, use_loc_scale=use_loc_scale2)
self.check_kl(dist1, dist2)
testing.run_module(__name__, __file__)
|
|
import unittest
from test import test_support, test_genericpath
import posixpath, os
from posixpath import realpath, abspath, dirname, basename
# An absolute path to a temporary filename for testing. We can't rely on TESTFN
# being an absolute path, so we need this.
ABSTFN = abspath(test_support.TESTFN)
def safe_rmdir(dirname):
try:
os.rmdir(dirname)
except OSError:
pass
class PosixPathTest(unittest.TestCase):
def setUp(self):
self.tearDown()
def tearDown(self):
for suffix in ["", "1", "2"]:
test_support.unlink(test_support.TESTFN + suffix)
safe_rmdir(test_support.TESTFN + suffix)
def test_join(self):
self.assertEqual(posixpath.join("/foo", "bar", "/bar", "baz"), "/bar/baz")
self.assertEqual(posixpath.join("/foo", "bar", "baz"), "/foo/bar/baz")
self.assertEqual(posixpath.join("/foo/", "bar/", "baz/"), "/foo/bar/baz/")
def test_split(self):
self.assertEqual(posixpath.split("/foo/bar"), ("/foo", "bar"))
self.assertEqual(posixpath.split("/"), ("/", ""))
self.assertEqual(posixpath.split("foo"), ("", "foo"))
self.assertEqual(posixpath.split("////foo"), ("////", "foo"))
self.assertEqual(posixpath.split("//foo//bar"), ("//foo", "bar"))
def splitextTest(self, path, filename, ext):
self.assertEqual(posixpath.splitext(path), (filename, ext))
self.assertEqual(posixpath.splitext("/" + path), ("/" + filename, ext))
self.assertEqual(posixpath.splitext("abc/" + path), ("abc/" + filename, ext))
self.assertEqual(posixpath.splitext("abc.def/" + path), ("abc.def/" + filename, ext))
self.assertEqual(posixpath.splitext("/abc.def/" + path), ("/abc.def/" + filename, ext))
self.assertEqual(posixpath.splitext(path + "/"), (filename + ext + "/", ""))
def test_splitext(self):
self.splitextTest("foo.bar", "foo", ".bar")
self.splitextTest("foo.boo.bar", "foo.boo", ".bar")
self.splitextTest("foo.boo.biff.bar", "foo.boo.biff", ".bar")
self.splitextTest(".csh.rc", ".csh", ".rc")
self.splitextTest("nodots", "nodots", "")
self.splitextTest(".cshrc", ".cshrc", "")
self.splitextTest("...manydots", "...manydots", "")
self.splitextTest("...manydots.ext", "...manydots", ".ext")
self.splitextTest(".", ".", "")
self.splitextTest("..", "..", "")
self.splitextTest("........", "........", "")
self.splitextTest("", "", "")
def test_isabs(self):
self.assertIs(posixpath.isabs(""), False)
self.assertIs(posixpath.isabs("/"), True)
self.assertIs(posixpath.isabs("/foo"), True)
self.assertIs(posixpath.isabs("/foo/bar"), True)
self.assertIs(posixpath.isabs("foo/bar"), False)
def test_basename(self):
self.assertEqual(posixpath.basename("/foo/bar"), "bar")
self.assertEqual(posixpath.basename("/"), "")
self.assertEqual(posixpath.basename("foo"), "foo")
self.assertEqual(posixpath.basename("////foo"), "foo")
self.assertEqual(posixpath.basename("//foo//bar"), "bar")
def test_dirname(self):
self.assertEqual(posixpath.dirname("/foo/bar"), "/foo")
self.assertEqual(posixpath.dirname("/"), "/")
self.assertEqual(posixpath.dirname("foo"), "")
self.assertEqual(posixpath.dirname("////foo"), "////")
self.assertEqual(posixpath.dirname("//foo//bar"), "//foo")
def test_islink(self):
self.assertIs(posixpath.islink(test_support.TESTFN + "1"), False)
f = open(test_support.TESTFN + "1", "wb")
try:
f.write("foo")
f.close()
self.assertIs(posixpath.islink(test_support.TESTFN + "1"), False)
if hasattr(os, "symlink"):
os.symlink(test_support.TESTFN + "1", test_support.TESTFN + "2")
self.assertIs(posixpath.islink(test_support.TESTFN + "2"), True)
os.remove(test_support.TESTFN + "1")
self.assertIs(posixpath.islink(test_support.TESTFN + "2"), True)
self.assertIs(posixpath.exists(test_support.TESTFN + "2"), False)
self.assertIs(posixpath.lexists(test_support.TESTFN + "2"), True)
finally:
if not f.close():
f.close()
def test_samefile(self):
f = open(test_support.TESTFN + "1", "wb")
try:
f.write("foo")
f.close()
self.assertIs(
posixpath.samefile(
test_support.TESTFN + "1",
test_support.TESTFN + "1"
),
True
)
# If we don't have links, assume that os.stat doesn't return resonable
# inode information and thus, that samefile() doesn't work
if hasattr(os, "symlink"):
os.symlink(
test_support.TESTFN + "1",
test_support.TESTFN + "2"
)
self.assertIs(
posixpath.samefile(
test_support.TESTFN + "1",
test_support.TESTFN + "2"
),
True
)
os.remove(test_support.TESTFN + "2")
f = open(test_support.TESTFN + "2", "wb")
f.write("bar")
f.close()
self.assertIs(
posixpath.samefile(
test_support.TESTFN + "1",
test_support.TESTFN + "2"
),
False
)
finally:
if not f.close():
f.close()
def test_samestat(self):
f = open(test_support.TESTFN + "1", "wb")
try:
f.write("foo")
f.close()
self.assertIs(
posixpath.samestat(
os.stat(test_support.TESTFN + "1"),
os.stat(test_support.TESTFN + "1")
),
True
)
# If we don't have links, assume that os.stat() doesn't return resonable
# inode information and thus, that samefile() doesn't work
if hasattr(os, "symlink"):
if hasattr(os, "symlink"):
os.symlink(test_support.TESTFN + "1", test_support.TESTFN + "2")
self.assertIs(
posixpath.samestat(
os.stat(test_support.TESTFN + "1"),
os.stat(test_support.TESTFN + "2")
),
True
)
os.remove(test_support.TESTFN + "2")
f = open(test_support.TESTFN + "2", "wb")
f.write("bar")
f.close()
self.assertIs(
posixpath.samestat(
os.stat(test_support.TESTFN + "1"),
os.stat(test_support.TESTFN + "2")
),
False
)
finally:
if not f.close():
f.close()
def test_ismount(self):
self.assertIs(posixpath.ismount("/"), True)
def test_expanduser(self):
self.assertEqual(posixpath.expanduser("foo"), "foo")
try:
import pwd
except ImportError:
pass
else:
self.assertIsInstance(posixpath.expanduser("~/"), basestring)
# if home directory == root directory, this test makes no sense
if posixpath.expanduser("~") != '/':
self.assertEqual(
posixpath.expanduser("~") + "/",
posixpath.expanduser("~/")
)
self.assertIsInstance(posixpath.expanduser("~root/"), basestring)
self.assertIsInstance(posixpath.expanduser("~foo/"), basestring)
with test_support.EnvironmentVarGuard() as env:
env['HOME'] = '/'
self.assertEqual(posixpath.expanduser("~"), "/")
def test_normpath(self):
self.assertEqual(posixpath.normpath(""), ".")
self.assertEqual(posixpath.normpath("/"), "/")
self.assertEqual(posixpath.normpath("//"), "//")
self.assertEqual(posixpath.normpath("///"), "/")
self.assertEqual(posixpath.normpath("///foo/.//bar//"), "/foo/bar")
self.assertEqual(posixpath.normpath("///foo/.//bar//.//..//.//baz"), "/foo/baz")
self.assertEqual(posixpath.normpath("///..//./foo/.//bar"), "/foo/bar")
if hasattr(os, "symlink"):
def test_realpath_basic(self):
# Basic operation.
try:
os.symlink(ABSTFN+"1", ABSTFN)
self.assertEqual(realpath(ABSTFN), ABSTFN+"1")
finally:
test_support.unlink(ABSTFN)
def test_realpath_symlink_loops(self):
# Bug #930024, return the path unchanged if we get into an infinite
# symlink loop.
try:
old_path = abspath('.')
os.symlink(ABSTFN, ABSTFN)
self.assertEqual(realpath(ABSTFN), ABSTFN)
os.symlink(ABSTFN+"1", ABSTFN+"2")
os.symlink(ABSTFN+"2", ABSTFN+"1")
self.assertEqual(realpath(ABSTFN+"1"), ABSTFN+"1")
self.assertEqual(realpath(ABSTFN+"2"), ABSTFN+"2")
# Test using relative path as well.
os.chdir(dirname(ABSTFN))
self.assertEqual(realpath(basename(ABSTFN)), ABSTFN)
finally:
os.chdir(old_path)
test_support.unlink(ABSTFN)
test_support.unlink(ABSTFN+"1")
test_support.unlink(ABSTFN+"2")
def test_realpath_resolve_parents(self):
# We also need to resolve any symlinks in the parents of a relative
# path passed to realpath. E.g.: current working directory is
# /usr/doc with 'doc' being a symlink to /usr/share/doc. We call
# realpath("a"). This should return /usr/share/doc/a/.
try:
old_path = abspath('.')
os.mkdir(ABSTFN)
os.mkdir(ABSTFN + "/y")
os.symlink(ABSTFN + "/y", ABSTFN + "/k")
os.chdir(ABSTFN + "/k")
self.assertEqual(realpath("a"), ABSTFN + "/y/a")
finally:
os.chdir(old_path)
test_support.unlink(ABSTFN + "/k")
safe_rmdir(ABSTFN + "/y")
safe_rmdir(ABSTFN)
def test_realpath_resolve_before_normalizing(self):
# Bug #990669: Symbolic links should be resolved before we
# normalize the path. E.g.: if we have directories 'a', 'k' and 'y'
# in the following hierarchy:
# a/k/y
#
# and a symbolic link 'link-y' pointing to 'y' in directory 'a',
# then realpath("link-y/..") should return 'k', not 'a'.
try:
old_path = abspath('.')
os.mkdir(ABSTFN)
os.mkdir(ABSTFN + "/k")
os.mkdir(ABSTFN + "/k/y")
os.symlink(ABSTFN + "/k/y", ABSTFN + "/link-y")
# Absolute path.
self.assertEqual(realpath(ABSTFN + "/link-y/.."), ABSTFN + "/k")
# Relative path.
os.chdir(dirname(ABSTFN))
self.assertEqual(realpath(basename(ABSTFN) + "/link-y/.."),
ABSTFN + "/k")
finally:
os.chdir(old_path)
test_support.unlink(ABSTFN + "/link-y")
safe_rmdir(ABSTFN + "/k/y")
safe_rmdir(ABSTFN + "/k")
safe_rmdir(ABSTFN)
def test_realpath_resolve_first(self):
# Bug #1213894: The first component of the path, if not absolute,
# must be resolved too.
try:
old_path = abspath('.')
os.mkdir(ABSTFN)
os.mkdir(ABSTFN + "/k")
os.symlink(ABSTFN, ABSTFN + "link")
os.chdir(dirname(ABSTFN))
base = basename(ABSTFN)
self.assertEqual(realpath(base + "link"), ABSTFN)
self.assertEqual(realpath(base + "link/k"), ABSTFN + "/k")
finally:
os.chdir(old_path)
test_support.unlink(ABSTFN + "link")
safe_rmdir(ABSTFN + "/k")
safe_rmdir(ABSTFN)
def test_relpath(self):
(real_getcwd, os.getcwd) = (os.getcwd, lambda: r"/home/user/bar")
try:
curdir = os.path.split(os.getcwd())[-1]
self.assertRaises(ValueError, posixpath.relpath, "")
self.assertEqual(posixpath.relpath("a"), "a")
self.assertEqual(posixpath.relpath(posixpath.abspath("a")), "a")
self.assertEqual(posixpath.relpath("a/b"), "a/b")
self.assertEqual(posixpath.relpath("../a/b"), "../a/b")
self.assertEqual(posixpath.relpath("a", "../b"), "../"+curdir+"/a")
self.assertEqual(posixpath.relpath("a/b", "../c"), "../"+curdir+"/a/b")
self.assertEqual(posixpath.relpath("a", "b/c"), "../../a")
self.assertEqual(posixpath.relpath("a", "a"), ".")
finally:
os.getcwd = real_getcwd
class PosixCommonTest(test_genericpath.CommonTest):
pathmodule = posixpath
attributes = ['relpath', 'samefile', 'sameopenfile', 'samestat']
def test_main():
test_support.run_unittest(PosixPathTest, PosixCommonTest)
if __name__=="__main__":
test_main()
|
|
# xml.etree test. This file contains enough tests to make sure that
# all included components work as they should.
# Large parts are extracted from the upstream test suite.
# IMPORTANT: the same doctests are run from "test_xml_etree_c" in
# order to ensure consistency between the C implementation and the
# Python implementation.
#
# For this purpose, the module-level "ET" symbol is temporarily
# monkey-patched when running the "test_xml_etree_c" test suite.
# Don't re-import "xml.etree.ElementTree" module in the docstring,
# except if the test is specific to the Python implementation.
import sys
import cgi
from test import test_support
from test.test_support import findfile
from xml.etree import ElementTree as ET
SIMPLE_XMLFILE = findfile("simple.xml", subdir="xmltestdata")
SIMPLE_NS_XMLFILE = findfile("simple-ns.xml", subdir="xmltestdata")
SAMPLE_XML = """\
<body>
<tag class='a'>text</tag>
<tag class='b' />
<section>
<tag class='b' id='inner'>subtext</tag>
</section>
</body>
"""
SAMPLE_SECTION = """\
<section>
<tag class='b' id='inner'>subtext</tag>
<nexttag />
<nextsection>
<tag />
</nextsection>
</section>
"""
SAMPLE_XML_NS = """
<body xmlns="http://effbot.org/ns">
<tag>text</tag>
<tag />
<section>
<tag>subtext</tag>
</section>
</body>
"""
def sanity():
"""
Import sanity.
>>> from xml.etree import ElementTree
>>> from xml.etree import ElementInclude
>>> from xml.etree import ElementPath
"""
def check_method(method):
if not hasattr(method, '__call__'):
print method, "not callable"
def serialize(elem, to_string=True, **options):
import StringIO
file = StringIO.StringIO()
tree = ET.ElementTree(elem)
tree.write(file, **options)
if to_string:
return file.getvalue()
else:
file.seek(0)
return file
def summarize(elem):
if elem.tag == ET.Comment:
return "<Comment>"
return elem.tag
def summarize_list(seq):
return [summarize(elem) for elem in seq]
def normalize_crlf(tree):
for elem in tree.iter():
if elem.text:
elem.text = elem.text.replace("\r\n", "\n")
if elem.tail:
elem.tail = elem.tail.replace("\r\n", "\n")
def check_string(string):
len(string)
for char in string:
if len(char) != 1:
print "expected one-character string, got %r" % char
new_string = string + ""
new_string = string + " "
string[:0]
def check_mapping(mapping):
len(mapping)
keys = mapping.keys()
items = mapping.items()
for key in keys:
item = mapping[key]
mapping["key"] = "value"
if mapping["key"] != "value":
print "expected value string, got %r" % mapping["key"]
def check_element(element):
if not ET.iselement(element):
print "not an element"
if not hasattr(element, "tag"):
print "no tag member"
if not hasattr(element, "attrib"):
print "no attrib member"
if not hasattr(element, "text"):
print "no text member"
if not hasattr(element, "tail"):
print "no tail member"
check_string(element.tag)
check_mapping(element.attrib)
if element.text is not None:
check_string(element.text)
if element.tail is not None:
check_string(element.tail)
for elem in element:
check_element(elem)
# --------------------------------------------------------------------
# element tree tests
def interface():
r"""
Test element tree interface.
>>> element = ET.Element("tag")
>>> check_element(element)
>>> tree = ET.ElementTree(element)
>>> check_element(tree.getroot())
>>> element = ET.Element("t\xe4g", key="value")
>>> tree = ET.ElementTree(element)
>>> repr(element) # doctest: +ELLIPSIS
"<Element 't\\xe4g' at 0x...>"
>>> element = ET.Element("tag", key="value")
Make sure all standard element methods exist.
>>> check_method(element.append)
>>> check_method(element.extend)
>>> check_method(element.insert)
>>> check_method(element.remove)
>>> check_method(element.getchildren)
>>> check_method(element.find)
>>> check_method(element.iterfind)
>>> check_method(element.findall)
>>> check_method(element.findtext)
>>> check_method(element.clear)
>>> check_method(element.get)
>>> check_method(element.set)
>>> check_method(element.keys)
>>> check_method(element.items)
>>> check_method(element.iter)
>>> check_method(element.itertext)
>>> check_method(element.getiterator)
These methods return an iterable. See bug 6472.
>>> check_method(element.iter("tag").next)
>>> check_method(element.iterfind("tag").next)
>>> check_method(element.iterfind("*").next)
>>> check_method(tree.iter("tag").next)
>>> check_method(tree.iterfind("tag").next)
>>> check_method(tree.iterfind("*").next)
These aliases are provided:
>>> assert ET.XML == ET.fromstring
>>> assert ET.PI == ET.ProcessingInstruction
>>> assert ET.XMLParser == ET.XMLTreeBuilder
"""
def simpleops():
"""
Basic method sanity checks.
>>> elem = ET.XML("<body><tag/></body>")
>>> serialize(elem)
'<body><tag /></body>'
>>> e = ET.Element("tag2")
>>> elem.append(e)
>>> serialize(elem)
'<body><tag /><tag2 /></body>'
>>> elem.remove(e)
>>> serialize(elem)
'<body><tag /></body>'
>>> elem.insert(0, e)
>>> serialize(elem)
'<body><tag2 /><tag /></body>'
>>> elem.remove(e)
>>> elem.extend([e])
>>> serialize(elem)
'<body><tag /><tag2 /></body>'
>>> elem.remove(e)
>>> element = ET.Element("tag", key="value")
>>> serialize(element) # 1
'<tag key="value" />'
>>> subelement = ET.Element("subtag")
>>> element.append(subelement)
>>> serialize(element) # 2
'<tag key="value"><subtag /></tag>'
>>> element.insert(0, subelement)
>>> serialize(element) # 3
'<tag key="value"><subtag /><subtag /></tag>'
>>> element.remove(subelement)
>>> serialize(element) # 4
'<tag key="value"><subtag /></tag>'
>>> element.remove(subelement)
>>> serialize(element) # 5
'<tag key="value" />'
>>> element.remove(subelement)
Traceback (most recent call last):
ValueError: list.remove(x): x not in list
>>> serialize(element) # 6
'<tag key="value" />'
>>> element[0:0] = [subelement, subelement, subelement]
>>> serialize(element[1])
'<subtag />'
>>> element[1:9] == [element[1], element[2]]
True
>>> element[:9:2] == [element[0], element[2]]
True
>>> del element[1:2]
>>> serialize(element)
'<tag key="value"><subtag /><subtag /></tag>'
"""
def cdata():
"""
Test CDATA handling (etc).
>>> serialize(ET.XML("<tag>hello</tag>"))
'<tag>hello</tag>'
>>> serialize(ET.XML("<tag>hello</tag>"))
'<tag>hello</tag>'
>>> serialize(ET.XML("<tag><![CDATA[hello]]></tag>"))
'<tag>hello</tag>'
"""
# Only with Python implementation
def simplefind():
"""
Test find methods using the elementpath fallback.
>>> from xml.etree import ElementTree
>>> CurrentElementPath = ElementTree.ElementPath
>>> ElementTree.ElementPath = ElementTree._SimpleElementPath()
>>> elem = ElementTree.XML(SAMPLE_XML)
>>> elem.find("tag").tag
'tag'
>>> ElementTree.ElementTree(elem).find("tag").tag
'tag'
>>> elem.findtext("tag")
'text'
>>> elem.findtext("tog")
>>> elem.findtext("tog", "default")
'default'
>>> ElementTree.ElementTree(elem).findtext("tag")
'text'
>>> summarize_list(elem.findall("tag"))
['tag', 'tag']
>>> summarize_list(elem.findall(".//tag"))
['tag', 'tag', 'tag']
Path syntax doesn't work in this case.
>>> elem.find("section/tag")
>>> elem.findtext("section/tag")
>>> summarize_list(elem.findall("section/tag"))
[]
>>> ElementTree.ElementPath = CurrentElementPath
"""
def find():
"""
Test find methods (including xpath syntax).
>>> elem = ET.XML(SAMPLE_XML)
>>> elem.find("tag").tag
'tag'
>>> ET.ElementTree(elem).find("tag").tag
'tag'
>>> elem.find("section/tag").tag
'tag'
>>> elem.find("./tag").tag
'tag'
>>> ET.ElementTree(elem).find("./tag").tag
'tag'
>>> ET.ElementTree(elem).find("/tag").tag
'tag'
>>> elem[2] = ET.XML(SAMPLE_SECTION)
>>> elem.find("section/nexttag").tag
'nexttag'
>>> ET.ElementTree(elem).find("section/tag").tag
'tag'
>>> ET.ElementTree(elem).find("tog")
>>> ET.ElementTree(elem).find("tog/foo")
>>> elem.findtext("tag")
'text'
>>> elem.findtext("section/nexttag")
''
>>> elem.findtext("section/nexttag", "default")
''
>>> elem.findtext("tog")
>>> elem.findtext("tog", "default")
'default'
>>> ET.ElementTree(elem).findtext("tag")
'text'
>>> ET.ElementTree(elem).findtext("tog/foo")
>>> ET.ElementTree(elem).findtext("tog/foo", "default")
'default'
>>> ET.ElementTree(elem).findtext("./tag")
'text'
>>> ET.ElementTree(elem).findtext("/tag")
'text'
>>> elem.findtext("section/tag")
'subtext'
>>> ET.ElementTree(elem).findtext("section/tag")
'subtext'
>>> summarize_list(elem.findall("."))
['body']
>>> summarize_list(elem.findall("tag"))
['tag', 'tag']
>>> summarize_list(elem.findall("tog"))
[]
>>> summarize_list(elem.findall("tog/foo"))
[]
>>> summarize_list(elem.findall("*"))
['tag', 'tag', 'section']
>>> summarize_list(elem.findall(".//tag"))
['tag', 'tag', 'tag', 'tag']
>>> summarize_list(elem.findall("section/tag"))
['tag']
>>> summarize_list(elem.findall("section//tag"))
['tag', 'tag']
>>> summarize_list(elem.findall("section/*"))
['tag', 'nexttag', 'nextsection']
>>> summarize_list(elem.findall("section//*"))
['tag', 'nexttag', 'nextsection', 'tag']
>>> summarize_list(elem.findall("section/.//*"))
['tag', 'nexttag', 'nextsection', 'tag']
>>> summarize_list(elem.findall("*/*"))
['tag', 'nexttag', 'nextsection']
>>> summarize_list(elem.findall("*//*"))
['tag', 'nexttag', 'nextsection', 'tag']
>>> summarize_list(elem.findall("*/tag"))
['tag']
>>> summarize_list(elem.findall("*/./tag"))
['tag']
>>> summarize_list(elem.findall("./tag"))
['tag', 'tag']
>>> summarize_list(elem.findall(".//tag"))
['tag', 'tag', 'tag', 'tag']
>>> summarize_list(elem.findall("././tag"))
['tag', 'tag']
>>> summarize_list(elem.findall(".//tag[@class]"))
['tag', 'tag', 'tag']
>>> summarize_list(elem.findall(".//tag[@class='a']"))
['tag']
>>> summarize_list(elem.findall(".//tag[@class='b']"))
['tag', 'tag']
>>> summarize_list(elem.findall(".//tag[@id]"))
['tag']
>>> summarize_list(elem.findall(".//section[tag]"))
['section']
>>> summarize_list(elem.findall(".//section[element]"))
[]
>>> summarize_list(elem.findall("../tag"))
[]
>>> summarize_list(elem.findall("section/../tag"))
['tag', 'tag']
>>> summarize_list(ET.ElementTree(elem).findall("./tag"))
['tag', 'tag']
Following example is invalid in 1.2.
A leading '*' is assumed in 1.3.
>>> elem.findall("section//") == elem.findall("section//*")
True
ET's Path module handles this case incorrectly; this gives
a warning in 1.3, and the behaviour will be modified in 1.4.
>>> summarize_list(ET.ElementTree(elem).findall("/tag"))
['tag', 'tag']
>>> elem = ET.XML(SAMPLE_XML_NS)
>>> summarize_list(elem.findall("tag"))
[]
>>> summarize_list(elem.findall("{http://effbot.org/ns}tag"))
['{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag']
>>> summarize_list(elem.findall(".//{http://effbot.org/ns}tag"))
['{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag']
"""
def file_init():
"""
>>> import StringIO
>>> stringfile = StringIO.StringIO(SAMPLE_XML)
>>> tree = ET.ElementTree(file=stringfile)
>>> tree.find("tag").tag
'tag'
>>> tree.find("section/tag").tag
'tag'
>>> tree = ET.ElementTree(file=SIMPLE_XMLFILE)
>>> tree.find("element").tag
'element'
>>> tree.find("element/../empty-element").tag
'empty-element'
"""
def bad_find():
"""
Check bad or unsupported path expressions.
>>> elem = ET.XML(SAMPLE_XML)
>>> elem.findall("/tag")
Traceback (most recent call last):
SyntaxError: cannot use absolute path on element
"""
def path_cache():
"""
Check that the path cache behaves sanely.
>>> elem = ET.XML(SAMPLE_XML)
>>> for i in range(10): ET.ElementTree(elem).find('./'+str(i))
>>> cache_len_10 = len(ET.ElementPath._cache)
>>> for i in range(10): ET.ElementTree(elem).find('./'+str(i))
>>> len(ET.ElementPath._cache) == cache_len_10
True
>>> for i in range(20): ET.ElementTree(elem).find('./'+str(i))
>>> len(ET.ElementPath._cache) > cache_len_10
True
>>> for i in range(600): ET.ElementTree(elem).find('./'+str(i))
>>> len(ET.ElementPath._cache) < 500
True
"""
def copy():
"""
Test copy handling (etc).
>>> import copy
>>> e1 = ET.XML("<tag>hello<foo/></tag>")
>>> e2 = copy.copy(e1)
>>> e3 = copy.deepcopy(e1)
>>> e1.find("foo").tag = "bar"
>>> serialize(e1)
'<tag>hello<bar /></tag>'
>>> serialize(e2)
'<tag>hello<bar /></tag>'
>>> serialize(e3)
'<tag>hello<foo /></tag>'
"""
def attrib():
"""
Test attribute handling.
>>> elem = ET.Element("tag")
>>> elem.get("key") # 1.1
>>> elem.get("key", "default") # 1.2
'default'
>>> elem.set("key", "value")
>>> elem.get("key") # 1.3
'value'
>>> elem = ET.Element("tag", key="value")
>>> elem.get("key") # 2.1
'value'
>>> elem.attrib # 2.2
{'key': 'value'}
>>> attrib = {"key": "value"}
>>> elem = ET.Element("tag", attrib)
>>> attrib.clear() # check for aliasing issues
>>> elem.get("key") # 3.1
'value'
>>> elem.attrib # 3.2
{'key': 'value'}
>>> attrib = {"key": "value"}
>>> elem = ET.Element("tag", **attrib)
>>> attrib.clear() # check for aliasing issues
>>> elem.get("key") # 4.1
'value'
>>> elem.attrib # 4.2
{'key': 'value'}
>>> elem = ET.Element("tag", {"key": "other"}, key="value")
>>> elem.get("key") # 5.1
'value'
>>> elem.attrib # 5.2
{'key': 'value'}
>>> elem = ET.Element('test')
>>> elem.text = "aa"
>>> elem.set('testa', 'testval')
>>> elem.set('testb', 'test2')
>>> ET.tostring(elem)
'<test testa="testval" testb="test2">aa</test>'
>>> sorted(elem.keys())
['testa', 'testb']
>>> sorted(elem.items())
[('testa', 'testval'), ('testb', 'test2')]
>>> elem.attrib['testb']
'test2'
>>> elem.attrib['testb'] = 'test1'
>>> elem.attrib['testc'] = 'test2'
>>> ET.tostring(elem)
'<test testa="testval" testb="test1" testc="test2">aa</test>'
"""
def makeelement():
"""
Test makeelement handling.
>>> elem = ET.Element("tag")
>>> attrib = {"key": "value"}
>>> subelem = elem.makeelement("subtag", attrib)
>>> if subelem.attrib is attrib:
... print "attrib aliasing"
>>> elem.append(subelem)
>>> serialize(elem)
'<tag><subtag key="value" /></tag>'
>>> elem.clear()
>>> serialize(elem)
'<tag />'
>>> elem.append(subelem)
>>> serialize(elem)
'<tag><subtag key="value" /></tag>'
>>> elem.extend([subelem, subelem])
>>> serialize(elem)
'<tag><subtag key="value" /><subtag key="value" /><subtag key="value" /></tag>'
>>> elem[:] = [subelem]
>>> serialize(elem)
'<tag><subtag key="value" /></tag>'
>>> elem[:] = tuple([subelem])
>>> serialize(elem)
'<tag><subtag key="value" /></tag>'
"""
def parsefile():
"""
Test parsing from file.
>>> tree = ET.parse(SIMPLE_XMLFILE)
>>> normalize_crlf(tree)
>>> tree.write(sys.stdout)
<root>
<element key="value">text</element>
<element>text</element>tail
<empty-element />
</root>
>>> tree = ET.parse(SIMPLE_NS_XMLFILE)
>>> normalize_crlf(tree)
>>> tree.write(sys.stdout)
<ns0:root xmlns:ns0="namespace">
<ns0:element key="value">text</ns0:element>
<ns0:element>text</ns0:element>tail
<ns0:empty-element />
</ns0:root>
>>> with open(SIMPLE_XMLFILE) as f:
... data = f.read()
>>> parser = ET.XMLParser()
>>> parser.version # doctest: +ELLIPSIS
'Expat ...'
>>> parser.feed(data)
>>> print serialize(parser.close())
<root>
<element key="value">text</element>
<element>text</element>tail
<empty-element />
</root>
>>> parser = ET.XMLTreeBuilder() # 1.2 compatibility
>>> parser.feed(data)
>>> print serialize(parser.close())
<root>
<element key="value">text</element>
<element>text</element>tail
<empty-element />
</root>
>>> target = ET.TreeBuilder()
>>> parser = ET.XMLParser(target=target)
>>> parser.feed(data)
>>> print serialize(parser.close())
<root>
<element key="value">text</element>
<element>text</element>tail
<empty-element />
</root>
"""
def parseliteral():
"""
>>> element = ET.XML("<html><body>text</body></html>")
>>> ET.ElementTree(element).write(sys.stdout)
<html><body>text</body></html>
>>> element = ET.fromstring("<html><body>text</body></html>")
>>> ET.ElementTree(element).write(sys.stdout)
<html><body>text</body></html>
>>> sequence = ["<html><body>", "text</bo", "dy></html>"]
>>> element = ET.fromstringlist(sequence)
>>> print ET.tostring(element)
<html><body>text</body></html>
>>> print "".join(ET.tostringlist(element))
<html><body>text</body></html>
>>> ET.tostring(element, "ascii")
"<?xml version='1.0' encoding='ascii'?>\\n<html><body>text</body></html>"
>>> _, ids = ET.XMLID("<html><body>text</body></html>")
>>> len(ids)
0
>>> _, ids = ET.XMLID("<html><body id='body'>text</body></html>")
>>> len(ids)
1
>>> ids["body"].tag
'body'
"""
def iterparse():
"""
Test iterparse interface.
>>> iterparse = ET.iterparse
>>> context = iterparse(SIMPLE_XMLFILE)
>>> action, elem = next(context)
>>> print action, elem.tag
end element
>>> for action, elem in context:
... print action, elem.tag
end element
end empty-element
end root
>>> context.root.tag
'root'
>>> context = iterparse(SIMPLE_NS_XMLFILE)
>>> for action, elem in context:
... print action, elem.tag
end {namespace}element
end {namespace}element
end {namespace}empty-element
end {namespace}root
>>> events = ()
>>> context = iterparse(SIMPLE_XMLFILE, events)
>>> for action, elem in context:
... print action, elem.tag
>>> events = ()
>>> context = iterparse(SIMPLE_XMLFILE, events=events)
>>> for action, elem in context:
... print action, elem.tag
>>> events = ("start", "end")
>>> context = iterparse(SIMPLE_XMLFILE, events)
>>> for action, elem in context:
... print action, elem.tag
start root
start element
end element
start element
end element
start empty-element
end empty-element
end root
>>> events = ("start", "end", "start-ns", "end-ns")
>>> context = iterparse(SIMPLE_NS_XMLFILE, events)
>>> for action, elem in context:
... if action in ("start", "end"):
... print action, elem.tag
... else:
... print action, elem
start-ns ('', 'namespace')
start {namespace}root
start {namespace}element
end {namespace}element
start {namespace}element
end {namespace}element
start {namespace}empty-element
end {namespace}empty-element
end {namespace}root
end-ns None
>>> import StringIO
>>> events = ('start-ns', 'end-ns')
>>> context = ET.iterparse(StringIO.StringIO(r"<root xmlns=''/>"), events)
>>> for action, elem in context:
... print action, elem
start-ns ('', '')
end-ns None
>>> events = ("start", "end", "bogus")
>>> with open(SIMPLE_XMLFILE, "rb") as f:
... iterparse(f, events)
Traceback (most recent call last):
ValueError: unknown event 'bogus'
>>> source = StringIO.StringIO(
... "<?xml version='1.0' encoding='iso-8859-1'?>\\n"
... "<body xmlns='http://éffbot.org/ns'\\n"
... " xmlns:cl\\xe9='http://effbot.org/ns'>text</body>\\n")
>>> events = ("start-ns",)
>>> context = iterparse(source, events)
>>> for action, elem in context:
... print action, elem
start-ns ('', u'http://\\xe9ffbot.org/ns')
start-ns (u'cl\\xe9', 'http://effbot.org/ns')
>>> source = StringIO.StringIO("<document />junk")
>>> try:
... for action, elem in iterparse(source):
... print action, elem.tag
... except ET.ParseError, v:
... print v
end document
junk after document element: line 1, column 12
"""
def writefile():
"""
>>> elem = ET.Element("tag")
>>> elem.text = "text"
>>> serialize(elem)
'<tag>text</tag>'
>>> ET.SubElement(elem, "subtag").text = "subtext"
>>> serialize(elem)
'<tag>text<subtag>subtext</subtag></tag>'
Test tag suppression
>>> elem.tag = None
>>> serialize(elem)
'text<subtag>subtext</subtag>'
>>> elem.insert(0, ET.Comment("comment"))
>>> serialize(elem) # assumes 1.3
'text<!--comment--><subtag>subtext</subtag>'
>>> elem[0] = ET.PI("key", "value")
>>> serialize(elem)
'text<?key value?><subtag>subtext</subtag>'
"""
def custom_builder():
"""
Test parser w. custom builder.
>>> with open(SIMPLE_XMLFILE) as f:
... data = f.read()
>>> class Builder:
... def start(self, tag, attrib):
... print "start", tag
... def end(self, tag):
... print "end", tag
... def data(self, text):
... pass
>>> builder = Builder()
>>> parser = ET.XMLParser(target=builder)
>>> parser.feed(data)
start root
start element
end element
start element
end element
start empty-element
end empty-element
end root
>>> with open(SIMPLE_NS_XMLFILE) as f:
... data = f.read()
>>> class Builder:
... def start(self, tag, attrib):
... print "start", tag
... def end(self, tag):
... print "end", tag
... def data(self, text):
... pass
... def pi(self, target, data):
... print "pi", target, repr(data)
... def comment(self, data):
... print "comment", repr(data)
>>> builder = Builder()
>>> parser = ET.XMLParser(target=builder)
>>> parser.feed(data)
pi pi 'data'
comment ' comment '
start {namespace}root
start {namespace}element
end {namespace}element
start {namespace}element
end {namespace}element
start {namespace}empty-element
end {namespace}empty-element
end {namespace}root
"""
def getchildren():
"""
Test Element.getchildren()
>>> with open(SIMPLE_XMLFILE, "r") as f:
... tree = ET.parse(f)
>>> for elem in tree.getroot().iter():
... summarize_list(elem.getchildren())
['element', 'element', 'empty-element']
[]
[]
[]
>>> for elem in tree.getiterator():
... summarize_list(elem.getchildren())
['element', 'element', 'empty-element']
[]
[]
[]
>>> elem = ET.XML(SAMPLE_XML)
>>> len(elem.getchildren())
3
>>> len(elem[2].getchildren())
1
>>> elem[:] == elem.getchildren()
True
>>> child1 = elem[0]
>>> child2 = elem[2]
>>> del elem[1:2]
>>> len(elem.getchildren())
2
>>> child1 == elem[0]
True
>>> child2 == elem[1]
True
>>> elem[0:2] = [child2, child1]
>>> child2 == elem[0]
True
>>> child1 == elem[1]
True
>>> child1 == elem[0]
False
>>> elem.clear()
>>> elem.getchildren()
[]
"""
def writestring():
"""
>>> elem = ET.XML("<html><body>text</body></html>")
>>> ET.tostring(elem)
'<html><body>text</body></html>'
>>> elem = ET.fromstring("<html><body>text</body></html>")
>>> ET.tostring(elem)
'<html><body>text</body></html>'
"""
def check_encoding(encoding):
"""
>>> check_encoding("ascii")
>>> check_encoding("us-ascii")
>>> check_encoding("iso-8859-1")
>>> check_encoding("iso-8859-15")
>>> check_encoding("cp437")
>>> check_encoding("mac-roman")
>>> check_encoding("gbk")
Traceback (most recent call last):
ValueError: multi-byte encodings are not supported
>>> check_encoding("cp037")
Traceback (most recent call last):
ParseError: unknown encoding: line 1, column 30
"""
ET.XML("<?xml version='1.0' encoding='%s'?><xml />" % encoding)
def encoding():
r"""
Test encoding issues.
>>> elem = ET.Element("tag")
>>> elem.text = u"abc"
>>> serialize(elem)
'<tag>abc</tag>'
>>> serialize(elem, encoding="utf-8")
'<tag>abc</tag>'
>>> serialize(elem, encoding="us-ascii")
'<tag>abc</tag>'
>>> serialize(elem, encoding="iso-8859-1")
"<?xml version='1.0' encoding='iso-8859-1'?>\n<tag>abc</tag>"
>>> elem.text = "<&\"\'>"
>>> serialize(elem)
'<tag><&"\'></tag>'
>>> serialize(elem, encoding="utf-8")
'<tag><&"\'></tag>'
>>> serialize(elem, encoding="us-ascii") # cdata characters
'<tag><&"\'></tag>'
>>> serialize(elem, encoding="iso-8859-1")
'<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag><&"\'></tag>'
>>> elem.attrib["key"] = "<&\"\'>"
>>> elem.text = None
>>> serialize(elem)
'<tag key="<&"\'>" />'
>>> serialize(elem, encoding="utf-8")
'<tag key="<&"\'>" />'
>>> serialize(elem, encoding="us-ascii")
'<tag key="<&"\'>" />'
>>> serialize(elem, encoding="iso-8859-1")
'<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag key="<&"\'>" />'
>>> elem.text = u'\xe5\xf6\xf6<>'
>>> elem.attrib.clear()
>>> serialize(elem)
'<tag>åöö<></tag>'
>>> serialize(elem, encoding="utf-8")
'<tag>\xc3\xa5\xc3\xb6\xc3\xb6<></tag>'
>>> serialize(elem, encoding="us-ascii")
'<tag>åöö<></tag>'
>>> serialize(elem, encoding="iso-8859-1")
"<?xml version='1.0' encoding='iso-8859-1'?>\n<tag>\xe5\xf6\xf6<></tag>"
>>> elem.attrib["key"] = u'\xe5\xf6\xf6<>'
>>> elem.text = None
>>> serialize(elem)
'<tag key="åöö<>" />'
>>> serialize(elem, encoding="utf-8")
'<tag key="\xc3\xa5\xc3\xb6\xc3\xb6<>" />'
>>> serialize(elem, encoding="us-ascii")
'<tag key="åöö<>" />'
>>> serialize(elem, encoding="iso-8859-1")
'<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag key="\xe5\xf6\xf6<>" />'
"""
def methods():
r"""
Test serialization methods.
>>> e = ET.XML("<html><link/><script>1 < 2</script></html>")
>>> e.tail = "\n"
>>> serialize(e)
'<html><link /><script>1 < 2</script></html>\n'
>>> serialize(e, method=None)
'<html><link /><script>1 < 2</script></html>\n'
>>> serialize(e, method="xml")
'<html><link /><script>1 < 2</script></html>\n'
>>> serialize(e, method="html")
'<html><link><script>1 < 2</script></html>\n'
>>> serialize(e, method="text")
'1 < 2\n'
"""
def iterators():
"""
Test iterators.
>>> e = ET.XML("<html><body>this is a <i>paragraph</i>.</body>..</html>")
>>> summarize_list(e.iter())
['html', 'body', 'i']
>>> summarize_list(e.find("body").iter())
['body', 'i']
>>> summarize(next(e.iter()))
'html'
>>> "".join(e.itertext())
'this is a paragraph...'
>>> "".join(e.find("body").itertext())
'this is a paragraph.'
>>> next(e.itertext())
'this is a '
Method iterparse should return an iterator. See bug 6472.
>>> sourcefile = serialize(e, to_string=False)
>>> next(ET.iterparse(sourcefile)) # doctest: +ELLIPSIS
('end', <Element 'i' at 0x...>)
>>> tree = ET.ElementTree(None)
>>> tree.iter()
Traceback (most recent call last):
AttributeError: 'NoneType' object has no attribute 'iter'
"""
ENTITY_XML = """\
<!DOCTYPE points [
<!ENTITY % user-entities SYSTEM 'user-entities.xml'>
%user-entities;
]>
<document>&entity;</document>
"""
def entity():
"""
Test entity handling.
1) good entities
>>> e = ET.XML("<document title='舰'>test</document>")
>>> serialize(e)
'<document title="舰">test</document>'
2) bad entities
>>> ET.XML("<document>&entity;</document>")
Traceback (most recent call last):
ParseError: undefined entity: line 1, column 10
>>> ET.XML(ENTITY_XML)
Traceback (most recent call last):
ParseError: undefined entity &entity;: line 5, column 10
3) custom entity
>>> parser = ET.XMLParser()
>>> parser.entity["entity"] = "text"
>>> parser.feed(ENTITY_XML)
>>> root = parser.close()
>>> serialize(root)
'<document>text</document>'
"""
def error(xml):
"""
Test error handling.
>>> issubclass(ET.ParseError, SyntaxError)
True
>>> error("foo").position
(1, 0)
>>> error("<tag>&foo;</tag>").position
(1, 5)
>>> error("foobar<").position
(1, 6)
"""
try:
ET.XML(xml)
except ET.ParseError:
return sys.exc_value
def namespace():
"""
Test namespace issues.
1) xml namespace
>>> elem = ET.XML("<tag xml:lang='en' />")
>>> serialize(elem) # 1.1
'<tag xml:lang="en" />'
2) other "well-known" namespaces
>>> elem = ET.XML("<rdf:RDF xmlns:rdf='http://www.w3.org/1999/02/22-rdf-syntax-ns#' />")
>>> serialize(elem) # 2.1
'<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" />'
>>> elem = ET.XML("<html:html xmlns:html='http://www.w3.org/1999/xhtml' />")
>>> serialize(elem) # 2.2
'<html:html xmlns:html="http://www.w3.org/1999/xhtml" />'
>>> elem = ET.XML("<soap:Envelope xmlns:soap='http://schemas.xmlsoap.org/soap/envelope' />")
>>> serialize(elem) # 2.3
'<ns0:Envelope xmlns:ns0="http://schemas.xmlsoap.org/soap/envelope" />'
3) unknown namespaces
>>> elem = ET.XML(SAMPLE_XML_NS)
>>> print serialize(elem)
<ns0:body xmlns:ns0="http://effbot.org/ns">
<ns0:tag>text</ns0:tag>
<ns0:tag />
<ns0:section>
<ns0:tag>subtext</ns0:tag>
</ns0:section>
</ns0:body>
"""
def qname():
"""
Test QName handling.
1) decorated tags
>>> elem = ET.Element("{uri}tag")
>>> serialize(elem) # 1.1
'<ns0:tag xmlns:ns0="uri" />'
>>> elem = ET.Element(ET.QName("{uri}tag"))
>>> serialize(elem) # 1.2
'<ns0:tag xmlns:ns0="uri" />'
>>> elem = ET.Element(ET.QName("uri", "tag"))
>>> serialize(elem) # 1.3
'<ns0:tag xmlns:ns0="uri" />'
>>> elem = ET.Element(ET.QName("uri", "tag"))
>>> subelem = ET.SubElement(elem, ET.QName("uri", "tag1"))
>>> subelem = ET.SubElement(elem, ET.QName("uri", "tag2"))
>>> serialize(elem) # 1.4
'<ns0:tag xmlns:ns0="uri"><ns0:tag1 /><ns0:tag2 /></ns0:tag>'
2) decorated attributes
>>> elem.clear()
>>> elem.attrib["{uri}key"] = "value"
>>> serialize(elem) # 2.1
'<ns0:tag xmlns:ns0="uri" ns0:key="value" />'
>>> elem.clear()
>>> elem.attrib[ET.QName("{uri}key")] = "value"
>>> serialize(elem) # 2.2
'<ns0:tag xmlns:ns0="uri" ns0:key="value" />'
3) decorated values are not converted by default, but the
QName wrapper can be used for values
>>> elem.clear()
>>> elem.attrib["{uri}key"] = "{uri}value"
>>> serialize(elem) # 3.1
'<ns0:tag xmlns:ns0="uri" ns0:key="{uri}value" />'
>>> elem.clear()
>>> elem.attrib["{uri}key"] = ET.QName("{uri}value")
>>> serialize(elem) # 3.2
'<ns0:tag xmlns:ns0="uri" ns0:key="ns0:value" />'
>>> elem.clear()
>>> subelem = ET.Element("tag")
>>> subelem.attrib["{uri1}key"] = ET.QName("{uri2}value")
>>> elem.append(subelem)
>>> elem.append(subelem)
>>> serialize(elem) # 3.3
'<ns0:tag xmlns:ns0="uri" xmlns:ns1="uri1" xmlns:ns2="uri2"><tag ns1:key="ns2:value" /><tag ns1:key="ns2:value" /></ns0:tag>'
4) Direct QName tests
>>> str(ET.QName('ns', 'tag'))
'{ns}tag'
>>> str(ET.QName('{ns}tag'))
'{ns}tag'
>>> q1 = ET.QName('ns', 'tag')
>>> q2 = ET.QName('ns', 'tag')
>>> q1 == q2
True
>>> q2 = ET.QName('ns', 'other-tag')
>>> q1 == q2
False
>>> q1 == 'ns:tag'
False
>>> q1 == '{ns}tag'
True
"""
def doctype_public():
"""
Test PUBLIC doctype.
>>> elem = ET.XML('<!DOCTYPE html PUBLIC'
... ' "-//W3C//DTD XHTML 1.0 Transitional//EN"'
... ' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
... '<html>text</html>')
"""
def xpath_tokenizer(p):
"""
Test the XPath tokenizer.
>>> # tests from the xml specification
>>> xpath_tokenizer("*")
['*']
>>> xpath_tokenizer("text()")
['text', '()']
>>> xpath_tokenizer("@name")
['@', 'name']
>>> xpath_tokenizer("@*")
['@', '*']
>>> xpath_tokenizer("para[1]")
['para', '[', '1', ']']
>>> xpath_tokenizer("para[last()]")
['para', '[', 'last', '()', ']']
>>> xpath_tokenizer("*/para")
['*', '/', 'para']
>>> xpath_tokenizer("/doc/chapter[5]/section[2]")
['/', 'doc', '/', 'chapter', '[', '5', ']', '/', 'section', '[', '2', ']']
>>> xpath_tokenizer("chapter//para")
['chapter', '//', 'para']
>>> xpath_tokenizer("//para")
['//', 'para']
>>> xpath_tokenizer("//olist/item")
['//', 'olist', '/', 'item']
>>> xpath_tokenizer(".")
['.']
>>> xpath_tokenizer(".//para")
['.', '//', 'para']
>>> xpath_tokenizer("..")
['..']
>>> xpath_tokenizer("../@lang")
['..', '/', '@', 'lang']
>>> xpath_tokenizer("chapter[title]")
['chapter', '[', 'title', ']']
>>> xpath_tokenizer("employee[@secretary and @assistant]")
['employee', '[', '@', 'secretary', '', 'and', '', '@', 'assistant', ']']
>>> # additional tests
>>> xpath_tokenizer("{http://spam}egg")
['{http://spam}egg']
>>> xpath_tokenizer("./spam.egg")
['.', '/', 'spam.egg']
>>> xpath_tokenizer(".//{http://spam}egg")
['.', '//', '{http://spam}egg']
"""
from xml.etree import ElementPath
out = []
for op, tag in ElementPath.xpath_tokenizer(p):
out.append(op or tag)
return out
def processinginstruction():
"""
Test ProcessingInstruction directly
>>> ET.tostring(ET.ProcessingInstruction('test', 'instruction'))
'<?test instruction?>'
>>> ET.tostring(ET.PI('test', 'instruction'))
'<?test instruction?>'
Issue #2746
>>> ET.tostring(ET.PI('test', '<testing&>'))
'<?test <testing&>?>'
>>> ET.tostring(ET.PI('test', u'<testing&>\xe3'), 'latin1')
"<?xml version='1.0' encoding='latin1'?>\\n<?test <testing&>\\xe3?>"
"""
#
# xinclude tests (samples from appendix C of the xinclude specification)
XINCLUDE = {}
XINCLUDE["C1.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>120 Mz is adequate for an average home user.</p>
<xi:include href="disclaimer.xml"/>
</document>
"""
XINCLUDE["disclaimer.xml"] = """\
<?xml version='1.0'?>
<disclaimer>
<p>The opinions represented herein represent those of the individual
and should not be interpreted as official policy endorsed by this
organization.</p>
</disclaimer>
"""
XINCLUDE["C2.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>This document has been accessed
<xi:include href="count.txt" parse="text"/> times.</p>
</document>
"""
XINCLUDE["count.txt"] = "324387"
XINCLUDE["C2b.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>This document has been <em>accessed</em>
<xi:include href="count.txt" parse="text"/> times.</p>
</document>
"""
XINCLUDE["C3.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>The following is the source of the "data.xml" resource:</p>
<example><xi:include href="data.xml" parse="text"/></example>
</document>
"""
XINCLUDE["data.xml"] = """\
<?xml version='1.0'?>
<data>
<item><![CDATA[Brooks & Shields]]></item>
</data>
"""
XINCLUDE["C5.xml"] = """\
<?xml version='1.0'?>
<div xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="example.txt" parse="text">
<xi:fallback>
<xi:include href="fallback-example.txt" parse="text">
<xi:fallback><a href="mailto:bob@example.org">Report error</a></xi:fallback>
</xi:include>
</xi:fallback>
</xi:include>
</div>
"""
XINCLUDE["default.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>Example.</p>
<xi:include href="{}"/>
</document>
""".format(cgi.escape(SIMPLE_XMLFILE, True))
def xinclude_loader(href, parse="xml", encoding=None):
try:
data = XINCLUDE[href]
except KeyError:
raise IOError("resource not found")
if parse == "xml":
from xml.etree.ElementTree import XML
return XML(data)
return data
def xinclude():
r"""
Basic inclusion example (XInclude C.1)
>>> from xml.etree import ElementTree as ET
>>> from xml.etree import ElementInclude
>>> document = xinclude_loader("C1.xml")
>>> ElementInclude.include(document, xinclude_loader)
>>> print serialize(document) # C1
<document>
<p>120 Mz is adequate for an average home user.</p>
<disclaimer>
<p>The opinions represented herein represent those of the individual
and should not be interpreted as official policy endorsed by this
organization.</p>
</disclaimer>
</document>
Textual inclusion example (XInclude C.2)
>>> document = xinclude_loader("C2.xml")
>>> ElementInclude.include(document, xinclude_loader)
>>> print serialize(document) # C2
<document>
<p>This document has been accessed
324387 times.</p>
</document>
Textual inclusion after sibling element (based on modified XInclude C.2)
>>> document = xinclude_loader("C2b.xml")
>>> ElementInclude.include(document, xinclude_loader)
>>> print(serialize(document)) # C2b
<document>
<p>This document has been <em>accessed</em>
324387 times.</p>
</document>
Textual inclusion of XML example (XInclude C.3)
>>> document = xinclude_loader("C3.xml")
>>> ElementInclude.include(document, xinclude_loader)
>>> print serialize(document) # C3
<document>
<p>The following is the source of the "data.xml" resource:</p>
<example><?xml version='1.0'?>
<data>
<item><![CDATA[Brooks & Shields]]></item>
</data>
</example>
</document>
Fallback example (XInclude C.5)
Note! Fallback support is not yet implemented
>>> document = xinclude_loader("C5.xml")
>>> ElementInclude.include(document, xinclude_loader)
Traceback (most recent call last):
IOError: resource not found
>>> # print serialize(document) # C5
"""
def xinclude_default():
"""
>>> from xml.etree import ElementInclude
>>> document = xinclude_loader("default.xml")
>>> ElementInclude.include(document)
>>> print serialize(document) # default
<document>
<p>Example.</p>
<root>
<element key="value">text</element>
<element>text</element>tail
<empty-element />
</root>
</document>
"""
#
# badly formatted xi:include tags
XINCLUDE_BAD = {}
XINCLUDE_BAD["B1.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>120 Mz is adequate for an average home user.</p>
<xi:include href="disclaimer.xml" parse="BAD_TYPE"/>
</document>
"""
XINCLUDE_BAD["B2.xml"] = """\
<?xml version='1.0'?>
<div xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:fallback></xi:fallback>
</div>
"""
def xinclude_failures():
r"""
Test failure to locate included XML file.
>>> from xml.etree import ElementInclude
>>> def none_loader(href, parser, encoding=None):
... return None
>>> document = ET.XML(XINCLUDE["C1.xml"])
>>> ElementInclude.include(document, loader=none_loader)
Traceback (most recent call last):
FatalIncludeError: cannot load 'disclaimer.xml' as 'xml'
Test failure to locate included text file.
>>> document = ET.XML(XINCLUDE["C2.xml"])
>>> ElementInclude.include(document, loader=none_loader)
Traceback (most recent call last):
FatalIncludeError: cannot load 'count.txt' as 'text'
Test bad parse type.
>>> document = ET.XML(XINCLUDE_BAD["B1.xml"])
>>> ElementInclude.include(document, loader=none_loader)
Traceback (most recent call last):
FatalIncludeError: unknown parse type in xi:include tag ('BAD_TYPE')
Test xi:fallback outside xi:include.
>>> document = ET.XML(XINCLUDE_BAD["B2.xml"])
>>> ElementInclude.include(document, loader=none_loader)
Traceback (most recent call last):
FatalIncludeError: xi:fallback tag must be child of xi:include ('{http://www.w3.org/2001/XInclude}fallback')
"""
# --------------------------------------------------------------------
# reported bugs
def bug_xmltoolkit21():
"""
marshaller gives obscure errors for non-string values
>>> elem = ET.Element(123)
>>> serialize(elem) # tag
Traceback (most recent call last):
TypeError: cannot serialize 123 (type int)
>>> elem = ET.Element("elem")
>>> elem.text = 123
>>> serialize(elem) # text
Traceback (most recent call last):
TypeError: cannot serialize 123 (type int)
>>> elem = ET.Element("elem")
>>> elem.tail = 123
>>> serialize(elem) # tail
Traceback (most recent call last):
TypeError: cannot serialize 123 (type int)
>>> elem = ET.Element("elem")
>>> elem.set(123, "123")
>>> serialize(elem) # attribute key
Traceback (most recent call last):
TypeError: cannot serialize 123 (type int)
>>> elem = ET.Element("elem")
>>> elem.set("123", 123)
>>> serialize(elem) # attribute value
Traceback (most recent call last):
TypeError: cannot serialize 123 (type int)
"""
def bug_xmltoolkit25():
"""
typo in ElementTree.findtext
>>> elem = ET.XML(SAMPLE_XML)
>>> tree = ET.ElementTree(elem)
>>> tree.findtext("tag")
'text'
>>> tree.findtext("section/tag")
'subtext'
"""
def bug_xmltoolkit28():
"""
.//tag causes exceptions
>>> tree = ET.XML("<doc><table><tbody/></table></doc>")
>>> summarize_list(tree.findall(".//thead"))
[]
>>> summarize_list(tree.findall(".//tbody"))
['tbody']
"""
def bug_xmltoolkitX1():
"""
dump() doesn't flush the output buffer
>>> tree = ET.XML("<doc><table><tbody/></table></doc>")
>>> ET.dump(tree); sys.stdout.write("tail")
<doc><table><tbody /></table></doc>
tail
"""
def bug_xmltoolkit39():
"""
non-ascii element and attribute names doesn't work
>>> tree = ET.XML("<?xml version='1.0' encoding='iso-8859-1'?><t\xe4g />")
>>> ET.tostring(tree, "utf-8")
'<t\\xc3\\xa4g />'
>>> tree = ET.XML("<?xml version='1.0' encoding='iso-8859-1'?><tag \xe4ttr='välue' />")
>>> tree.attrib
{u'\\xe4ttr': u'v\\xe4lue'}
>>> ET.tostring(tree, "utf-8")
'<tag \\xc3\\xa4ttr="v\\xc3\\xa4lue" />'
>>> tree = ET.XML("<?xml version='1.0' encoding='iso-8859-1'?><t\xe4g>text</t\xe4g>")
>>> ET.tostring(tree, "utf-8")
'<t\\xc3\\xa4g>text</t\\xc3\\xa4g>'
>>> tree = ET.Element(u"t\u00e4g")
>>> ET.tostring(tree, "utf-8")
'<t\\xc3\\xa4g />'
>>> tree = ET.Element("tag")
>>> tree.set(u"\u00e4ttr", u"v\u00e4lue")
>>> ET.tostring(tree, "utf-8")
'<tag \\xc3\\xa4ttr="v\\xc3\\xa4lue" />'
"""
def bug_xmltoolkit54():
"""
problems handling internally defined entities
>>> e = ET.XML("<!DOCTYPE doc [<!ENTITY ldots '舰'>]><doc>&ldots;</doc>")
>>> serialize(e)
'<doc>舰</doc>'
"""
def bug_xmltoolkit55():
"""
make sure we're reporting the first error, not the last
>>> e = ET.XML("<!DOCTYPE doc SYSTEM 'doc.dtd'><doc>&ldots;&ndots;&rdots;</doc>")
Traceback (most recent call last):
ParseError: undefined entity &ldots;: line 1, column 36
"""
class ExceptionFile:
def read(self, x):
raise IOError
def xmltoolkit60():
"""
Handle crash in stream source.
>>> tree = ET.parse(ExceptionFile())
Traceback (most recent call last):
IOError
"""
XMLTOOLKIT62_DOC = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE patent-application-publication SYSTEM "pap-v15-2001-01-31.dtd" []>
<patent-application-publication>
<subdoc-abstract>
<paragraph id="A-0001" lvl="0">A new cultivar of Begonia plant named ‘BCT9801BEG’.</paragraph>
</subdoc-abstract>
</patent-application-publication>"""
def xmltoolkit62():
"""
Don't crash when using custom entities.
>>> xmltoolkit62()
u'A new cultivar of Begonia plant named \u2018BCT9801BEG\u2019.'
"""
ENTITIES = {u'rsquo': u'\u2019', u'lsquo': u'\u2018'}
parser = ET.XMLTreeBuilder()
parser.entity.update(ENTITIES)
parser.feed(XMLTOOLKIT62_DOC)
t = parser.close()
return t.find('.//paragraph').text
def xmltoolkit63():
"""
Check reference leak.
>>> xmltoolkit63()
>>> count = sys.getrefcount(None)
>>> for i in range(1000):
... xmltoolkit63()
>>> sys.getrefcount(None) - count
0
"""
tree = ET.TreeBuilder()
tree.start("tag", {})
tree.data("text")
tree.end("tag")
# --------------------------------------------------------------------
def bug_200708_newline():
r"""
Preserve newlines in attributes.
>>> e = ET.Element('SomeTag', text="def _f():\n return 3\n")
>>> ET.tostring(e)
'<SomeTag text="def _f(): return 3 " />'
>>> ET.XML(ET.tostring(e)).get("text")
'def _f():\n return 3\n'
>>> ET.tostring(ET.XML(ET.tostring(e)))
'<SomeTag text="def _f(): return 3 " />'
"""
def bug_200708_close():
"""
Test default builder.
>>> parser = ET.XMLParser() # default
>>> parser.feed("<element>some text</element>")
>>> summarize(parser.close())
'element'
Test custom builder.
>>> class EchoTarget:
... def close(self):
... return ET.Element("element") # simulate root
>>> parser = ET.XMLParser(EchoTarget())
>>> parser.feed("<element>some text</element>")
>>> summarize(parser.close())
'element'
"""
def bug_200709_default_namespace():
"""
>>> e = ET.Element("{default}elem")
>>> s = ET.SubElement(e, "{default}elem")
>>> serialize(e, default_namespace="default") # 1
'<elem xmlns="default"><elem /></elem>'
>>> e = ET.Element("{default}elem")
>>> s = ET.SubElement(e, "{default}elem")
>>> s = ET.SubElement(e, "{not-default}elem")
>>> serialize(e, default_namespace="default") # 2
'<elem xmlns="default" xmlns:ns1="not-default"><elem /><ns1:elem /></elem>'
>>> e = ET.Element("{default}elem")
>>> s = ET.SubElement(e, "{default}elem")
>>> s = ET.SubElement(e, "elem") # unprefixed name
>>> serialize(e, default_namespace="default") # 3
Traceback (most recent call last):
ValueError: cannot use non-qualified names with default_namespace option
"""
def bug_200709_register_namespace():
"""
>>> ET.tostring(ET.Element("{http://namespace.invalid/does/not/exist/}title"))
'<ns0:title xmlns:ns0="http://namespace.invalid/does/not/exist/" />'
>>> ET.register_namespace("foo", "http://namespace.invalid/does/not/exist/")
>>> ET.tostring(ET.Element("{http://namespace.invalid/does/not/exist/}title"))
'<foo:title xmlns:foo="http://namespace.invalid/does/not/exist/" />'
And the Dublin Core namespace is in the default list:
>>> ET.tostring(ET.Element("{http://purl.org/dc/elements/1.1/}title"))
'<dc:title xmlns:dc="http://purl.org/dc/elements/1.1/" />'
"""
def bug_200709_element_comment():
"""
Not sure if this can be fixed, really (since the serializer needs
ET.Comment, not cET.comment).
>>> a = ET.Element('a')
>>> a.append(ET.Comment('foo'))
>>> a[0].tag == ET.Comment
True
>>> a = ET.Element('a')
>>> a.append(ET.PI('foo'))
>>> a[0].tag == ET.PI
True
"""
def bug_200709_element_insert():
"""
>>> a = ET.Element('a')
>>> b = ET.SubElement(a, 'b')
>>> c = ET.SubElement(a, 'c')
>>> d = ET.Element('d')
>>> a.insert(0, d)
>>> summarize_list(a)
['d', 'b', 'c']
>>> a.insert(-1, d)
>>> summarize_list(a)
['d', 'b', 'd', 'c']
"""
def bug_200709_iter_comment():
"""
>>> a = ET.Element('a')
>>> b = ET.SubElement(a, 'b')
>>> comment_b = ET.Comment("TEST-b")
>>> b.append(comment_b)
>>> summarize_list(a.iter(ET.Comment))
['<Comment>']
"""
def bug_18347():
"""
>>> e = ET.XML('<html><CamelCase>text</CamelCase></html>')
>>> serialize(e)
'<html><CamelCase>text</CamelCase></html>'
>>> serialize(e, method="html")
'<html><CamelCase>text</CamelCase></html>'
"""
# --------------------------------------------------------------------
# reported on bugs.python.org
def bug_1534630():
"""
>>> bob = ET.TreeBuilder()
>>> e = bob.data("data")
>>> e = bob.start("tag", {})
>>> e = bob.end("tag")
>>> e = bob.close()
>>> serialize(e)
'<tag />'
"""
def check_issue6233():
"""
>>> e = ET.XML("<?xml version='1.0' encoding='utf-8'?><body>t\\xc3\\xa3g</body>")
>>> ET.tostring(e, 'ascii')
"<?xml version='1.0' encoding='ascii'?>\\n<body>tãg</body>"
>>> e = ET.XML("<?xml version='1.0' encoding='iso-8859-1'?><body>t\\xe3g</body>")
>>> ET.tostring(e, 'ascii')
"<?xml version='1.0' encoding='ascii'?>\\n<body>tãg</body>"
"""
def check_issue3151():
"""
>>> e = ET.XML('<prefix:localname xmlns:prefix="${stuff}"/>')
>>> e.tag
'{${stuff}}localname'
>>> t = ET.ElementTree(e)
>>> ET.tostring(e)
'<ns0:localname xmlns:ns0="${stuff}" />'
"""
def check_issue6565():
"""
>>> elem = ET.XML("<body><tag/></body>")
>>> summarize_list(elem)
['tag']
>>> newelem = ET.XML(SAMPLE_XML)
>>> elem[:] = newelem[:]
>>> summarize_list(elem)
['tag', 'tag', 'section']
"""
def check_html_empty_elems_serialization(self):
# issue 15970
# from http://www.w3.org/TR/html401/index/elements.html
"""
>>> empty_elems = ['AREA', 'BASE', 'BASEFONT', 'BR', 'COL', 'FRAME', 'HR',
... 'IMG', 'INPUT', 'ISINDEX', 'LINK', 'META', 'PARAM']
>>> elems = ''.join('<%s />' % elem for elem in empty_elems)
>>> serialize(ET.XML('<html>%s</html>' % elems), method='html')
'<html><AREA><BASE><BASEFONT><BR><COL><FRAME><HR><IMG><INPUT><ISINDEX><LINK><META><PARAM></html>'
>>> serialize(ET.XML('<html>%s</html>' % elems.lower()), method='html')
'<html><area><base><basefont><br><col><frame><hr><img><input><isindex><link><meta><param></html>'
>>> elems = ''.join('<%s></%s>' % (elem, elem) for elem in empty_elems)
>>> serialize(ET.XML('<html>%s</html>' % elems), method='html')
'<html><AREA><BASE><BASEFONT><BR><COL><FRAME><HR><IMG><INPUT><ISINDEX><LINK><META><PARAM></html>'
>>> serialize(ET.XML('<html>%s</html>' % elems.lower()), method='html')
'<html><area><base><basefont><br><col><frame><hr><img><input><isindex><link><meta><param></html>'
"""
# --------------------------------------------------------------------
class CleanContext(object):
"""Provide default namespace mapping and path cache."""
checkwarnings = None
def __init__(self, quiet=False):
if sys.flags.optimize >= 2:
# under -OO, doctests cannot be run and therefore not all warnings
# will be emitted
quiet = True
deprecations = (
# Search behaviour is broken if search path starts with "/".
("This search is broken in 1.3 and earlier, and will be fixed "
"in a future version. If you rely on the current behaviour, "
"change it to '.+'", FutureWarning),
# Element.getchildren() and Element.getiterator() are deprecated.
("This method will be removed in future versions. "
"Use .+ instead.", DeprecationWarning),
("This method will be removed in future versions. "
"Use .+ instead.", PendingDeprecationWarning),
# XMLParser.doctype() is deprecated.
("This method of XMLParser is deprecated. Define doctype.. "
"method on the TreeBuilder target.", DeprecationWarning))
self.checkwarnings = test_support.check_warnings(*deprecations,
quiet=quiet)
def __enter__(self):
from xml.etree import ElementTree
self._nsmap = ElementTree._namespace_map
self._path_cache = ElementTree.ElementPath._cache
# Copy the default namespace mapping
ElementTree._namespace_map = self._nsmap.copy()
# Copy the path cache (should be empty)
ElementTree.ElementPath._cache = self._path_cache.copy()
self.checkwarnings.__enter__()
def __exit__(self, *args):
from xml.etree import ElementTree
# Restore mapping and path cache
ElementTree._namespace_map = self._nsmap
ElementTree.ElementPath._cache = self._path_cache
self.checkwarnings.__exit__(*args)
def test_main(module_name='xml.etree.ElementTree'):
from test import test_xml_etree
use_py_module = (module_name == 'xml.etree.ElementTree')
# The same doctests are used for both the Python and the C implementations
assert test_xml_etree.ET.__name__ == module_name
# XXX the C module should give the same warnings as the Python module
with CleanContext(quiet=not use_py_module):
test_support.run_doctest(test_xml_etree, verbosity=True)
# The module should not be changed by the tests
assert test_xml_etree.ET.__name__ == module_name
if __name__ == '__main__':
test_main()
|
|
import requests, json
import urler
from lxml import etree
from copy import deepcopy
from common import GtR, Paging, MIME_MAP
NSMAP = {"gtr" : "http://gtr.rcuk.ac.uk/api"}
GTR_PREFIX = "gtr"
class GtRNative(GtR):
def __init__(self, base_url, page_size=25, serialisation="json", username=None, password=None):
super(GtRNative, self).__init__(base_url, page_size, serialisation, username, password)
self.factory = GtRDAOFactory()
self.project_base = self.base_url + "/project/"
self.org_base = self.base_url + "/organisation/"
self.person_base = self.base_url + "/person/"
self.publication_base = self.base_url + "/publication/"
## List Retrieval Methods ##
def projects(self, page=None, page_size=None):
page_size = self._constrain_page_size(page_size)
page_size = page_size if page_size is not None else self.page_size
data, paging = self._api(self.project_base, page=page, page_size=page_size)
if data is not None and paging is not None:
return Projects(self, data, paging, self.project_base)
return None
def organisations(self, page=None, page_size=None):
page_size = self._constrain_page_size(page_size)
page_size = page_size if page_size is not None else self.page_size
data, paging = self._api(self.org_base, page=page, page_size=page_size)
if data is not None and paging is not None:
return Organisations(self, data, paging, self.org_base)
return None
def people(self, page=None, page_size=None):
page_size = self._constrain_page_size(page_size)
page_size = page_size if page_size is not None else self.page_size
data, paging = self._api(self.person_base, page=page, page_size=page_size)
if data is not None and paging is not None:
return People(self, data, paging, self.person_base)
return None
def publications(self, page=None, page_size=None):
page_size = self._constrain_page_size(page_size)
page_size = page_size if page_size is not None else self.page_size
data, paging = self._api(self.publication_base, page=page, page_size=page_size)
if data is not None and paging is not None:
return Publications(self, data, paging, self.publication_base)
return None
## Individual retrieval methods ##
def project(self, uuid):
url = self.project_base + uuid
raw, _ = self._api(url)
if raw is not None:
return Project(self, raw)
return None
def organisation(self, uuid, page_size=None):
url = self.org_base + uuid
page_size = self._constrain_page_size(page_size)
page_size = page_size if page_size is not None else self.page_size
raw, paging = self._api(url, page_size=page_size)
if raw is not None and paging is not None:
return Organisation(self, raw, paging)
return None
def person(self, uuid):
url = self.person_base + uuid
raw, _ = self._api(url)
if raw is not None:
return Person(self, raw)
return None
def publication(self, uuid):
url = self.publication_base + uuid
raw, _ = self._api(url)
if raw is not None:
return Publication(self, raw)
return None
class GtRDAOFactory(object):
def __init__(self):
self.class_map = {
"application/xml" : {
"projects" : ProjectsXMLDAO,
"organisations" : OrganisationsXMLDAO,
"people" : PeopleXMLDAO,
"publications" : PublicationsXMLDAO,
"project" : ProjectXMLDAO,
"organisation" : OrganisationXMLDAO,
"person" : PersonXMLDAO,
"publication" : PublicationXMLDAO
},
"application/json" : {
"projects" : ProjectsJSONDAO,
"organisations" : OrganisationsJSONDAO,
"people" : PeopleJSONDAO,
"publications" : PublicationsJSONDAO,
"project" : ProjectJSONDAO,
"organisation" : OrganisationJSONDAO,
"person" : PersonJSONDAO,
"publication" : PublicationJSONDAO
}
}
def projects(self, client, data):
return self._load(client, data, "projects")
def project(self, client, data):
return self._load(client, data, "project")
def organisations(self, client, data):
return self._load(client, data, "organisations")
def organisation(self, client, data):
return self._load(client, data, "organisation")
def people(self, client, data):
return self._load(client, data, "people")
def person(self, client, data):
return self._load(client, data, "person")
def publications(self, client, data):
return self._load(client, data, "publications")
def publication(self, client, data):
return self._load(client, data, "publication")
def _load(self, client, data, domain):
klazz = self.class_map.get(client.mimetype, {}).get(domain)
if domain is not None:
return klazz(data)
return None
class Native(object):
def __init__(self, client):
self.client = client
self.dao = None
def url(self):
raise NotImplementedError()
def xml(self, pretty_print=True):
if self.dao is None:
return None
if hasattr(self.dao, "xml"):
return self.dao.xml(pretty_print)
xml, _ = self.client._api(self.url(), mimetype="application/xml")
if xml is not None:
return etree.tostring(xml, pretty_print=pretty_print)
return None
def as_dict(self):
if self.dao is None:
return None
if hasattr(self.dao, "as_dict"):
return self.dao.as_dict()
j, _ = self.client._api(self.url(), mimetype="application/json")
return j
def json(self, pretty_print=True):
d = self.as_dict()
if pretty_print:
return json.dumps(d, indent=2)
return json.dumps(d)
class NativeXMLDAO(object):
def __init__(self, raw):
self.raw = raw
## Methods for use by extending classes ##
def _from_xpath(self, xp):
"""
return the text from the first element found by the provided xpath
"""
els = self.raw.xpath(xp, namespaces=NSMAP)
if els is not None and len(els) > 0:
if hasattr(els[0], "text"):
return els[0].text
return str(els[0])
return None
def _get_subs(self, parent_xpath, siblings=()):
"""
get a tuple containing the text from the first sibling xpath inside each parent xpath
"""
tups = []
for org in self.raw.xpath(parent_xpath, namespaces=NSMAP):
sibs = []
for sib in siblings:
els = org.xpath(sib, namespaces=NSMAP)
if els is not None and len(els) > 0:
val = els[0].text
sibs.append(val)
tups.append(tuple(sibs))
return tups
def _do_xpath(self, xp):
"""
just apply the xpath to the raw appropriately
"""
return self.raw.xpath(xp, namespaces=NSMAP)
def _port(self, xp, new_root):
"""
for each result for the xpath, port (via a deep copy) the result to an element
named by new_root
"""
ports = []
for el in self.raw.xpath(xp, namespaces=NSMAP):
root = self._gtr_element(new_root)
for child in el:
root.append(deepcopy(child))
ports.append(root)
return ports
def _wrap(self, source, wrappers, clone=True):
"""
wrap the provided element (via a deep copy if requested) in an
element named by wrappers (which may be a hierarchy of elements with their namespacessa
"""
# first create the a list of elements from the hierarchy
hierarchy = wrappers.split("/")
elements = []
for wrapper in hierarchy:
parts = wrapper.split(":")
element = None
if len(parts) == 1:
element = self._element(GTR_PREFIX, parts[0])
elif len(parts) == 2:
element = self._element(parts[0], parts[1])
elements.append(element)
if clone:
source = deepcopy(source)
# now add the elements to eachother in reverse
for i in range(len(elements) - 1, -1, -1):
elements[i].append(source)
source = elements[i]
return source
def _element(self, prefix, name):
return etree.Element("{" + NSMAP.get(prefix) + "}" + name, nsmap=NSMAP)
def _gtr_element(self, name):
"""
create a new element with the GTR prefix and namespace map
"""
return self._element(GTR_PREFIX, name)
def xml(self, pretty_print=True):
return etree.tostring(self.raw, pretty_print=pretty_print)
class NativeJSONDAO(object):
def __init__(self, raw):
self.raw = raw
def as_dict(self):
return self.raw
def json(self, pretty_print=True):
d = self.as_dict()
if pretty_print:
return json.dumps(d, indent=2)
return json.dumps(d)
class NativePaged(Native):
def __init__(self, client, paging):
super(NativePaged, self).__init__(client)
self.paging = paging
def record_count(self):
return self.paging.record_count
def pages(self):
return self.paging.pages
def next_page(self):
if self.paging.next is None or self.paging.next == "":
return False
raw, paging = self.client._api(self.paging.next)
if raw is not None and paging is not None:
self.dao.raw = raw
self.paging = paging
return True
return False
def previous_page(self):
if self.paging.previous is None or self.paging.previous == "":
return False
raw, paging = self.client._api(self.paging.previous)
if raw is not None and paging is not None:
self.dao.raw = raw
self.paging = paging
return True
return False
def first_page(self):
if self.paging.first is None or self.paging.first == "":
return False
raw, paging = self.client._api(self.paging.first)
if raw is not None and paging is not None:
self.dao.raw = raw
self.paging = paging
return True
return False
def last_page(self):
if self.paging.last is None or self.paging.last == "":
return False
raw, paging = self.client._api(self.paging.last)
if raw is not None and paging is not None:
self.dao.raw = raw
self.paging = paging
return True
return False
def skip_to_page(self, page):
if self.paging.last is None or self.paging.last == "":
return False
if page > self.paging.last:
return False
if page < 1:
return False
raw, paging = self.client._api(self.url(), page=page)
if raw is not None and paging is not None:
self.dao.raw = raw
self.paging = paging
return True
return False
def current_page(self):
return self.paging.current_page()
def current_page_size(self):
return self.paging.current_page_size()
def list_elements(self):
"""
subclass should implement this to return a list of Native objects.
It will be used to run the iterator
"""
raise NotImplementedError("list_elements has not been implemented")
def __iter__(self):
return self.iterator()
def iterator(self, reset_pages=True, stop_at_page_boundary=False):
if reset_pages:
self.first_page()
def f():
while True:
elements = self.list_elements()
for p in elements:
yield p
if stop_at_page_boundary:
break
if not self.next_page():
break
return f()
def __len__(self):
return self.record_count()
#### List Objects ####
## ------ Projects ------- ##
class Projects(NativePaged):
def __init__(self, client, raw, paging, url, dao=None):
super(Projects, self).__init__(client, paging)
self.dao = dao if dao is not None else client.factory.projects(client, raw)
self._url = url
def url(self):
return self._url
def projects(self):
return self.dao.projects(self.client)
def list_elements(self):
return self.projects()
class ProjectsXMLDAO(NativeXMLDAO):
project_xpath = "/gtr:projects/gtr:project"
project_wrapper = "gtr:projectOverview/gtr:projectComposition"
def __init__(self, raw):
super(ProjectsXMLDAO, self).__init__(raw)
def projects(self, client):
raws = self._do_xpath(self.project_xpath)
return [Project(client, self._wrap(raw, self.project_wrapper)) for raw in raws]
class ProjectsJSONDAO(NativeJSONDAO):
def __init__(self, raw):
super(ProjectsJSONDAO, self).__init__(raw)
def projects(self, client):
return [Project(client, {"projectComposition" : {"project" : data}}) for data in self.raw.get('project', [])]
### -------- End Projects -------- ###
### ------- Organisations -------- ###
class Organisations(NativePaged):
def __init__(self, client, raw, paging, url, dao=None):
super(Organisations, self).__init__(client, paging)
self.dao = dao if dao is not None else client.factory.organisations(client, raw)
self._url = url
def url(self):
return self._url
def organisations(self):
return self.dao.organisations(self.client)
def list_elements(self):
return self.organisations()
class OrganisationsXMLDAO(NativeXMLDAO):
organisation_xpath = "/gtr:organisations/gtr:organisation"
organisation_wrapper = "gtr:organisationOverview"
def __init__(self, raw):
super(OrganisationsXMLDAO, self).__init__(raw)
def organisations(self):
raws = self._do_xpath(self.organisation_xpath)
return [Organisation(self.client, self._wrap(raw, self.organisation_wrapper), None) for raw in raws]
class OrganisationsJSONDAO(NativeJSONDAO):
def __init__(self, raw):
super(OrganisationsJSONDAO, self).__init__(raw)
def organisations(self, client):
return [Organisation(client, {"organisationOverview" : {"organisation" : data}}, None)
for data in self.raw.get('organisation', [])]
## ---- End Organisations ---- ##
## ----- People ------ ##
class People(NativePaged):
def __init__(self, client, raw, paging, url, dao=None):
super(People, self).__init__(client, paging)
self.dao = dao if dao is not None else client.factory.people(client, raw)
self._url = url
def url(self):
return self._url
def people(self):
return self.dao.people(self.client)
def list_elements(self):
return self.people()
class PeopleXMLDAO(NativeXMLDAO):
person_xpath = "/gtr:people/gtr:person"
person_wrapper = "gtr:personOverview"
def __init__(self, raw):
super(PeopleXMLDAO, self).__init__(raw)
def people(self, client):
raws = self._do_xpath(self.person_xpath)
return [Person(client, None, self._wrap(raw, self.person_wrapper)) for raw in raws]
class PeopleJSONDAO(NativeJSONDAO):
def __init__(self, raw):
super(PeopleJSONDAO, self).__init__(raw)
def people(self, client):
return [Person(client, {"person" : data})
for data in self.raw.get("person", [])]
## ----- End People ------ ##
## ------ Publications ------ ##
class Publications(NativePaged):
def __init__(self, client, raw, paging, url, dao=None):
super(Publications, self).__init__(client, paging)
self.dao = dao if dao is not None else client.factory.publications(client, raw)
self._url = url
def url(self):
return self._url
def publications(self):
return self.dao.publications(self.client)
def list_elements(self):
return self.publications()
class PublicationsXMLDAO(NativeXMLDAO):
publication_xpath = "/gtr:publications/gtr:publication"
publication_wrapper = "gtr:publicationOverview"
def __init__(self, raw):
super(PublicationsXMLDAO, self).__init__(raw)
def publications(self, client):
raws = self._do_xpath(self.publication_xpath)
return [Publication(client, self._wrap(raw, self.publication_wrapper)) for raw in raws]
class PublicationsJSONDAO(NativeJSONDAO):
def __init__(self, raw):
super(PublicationsJSONDAO, self).__init__(raw)
def publications(self, client):
return [Publication(client, { "publication" : data })
for data in self.raw.get("publication", [])]
## ------- End Publications ------ ##
##### Individual Entity Objects ####
## ------ Project ------- ##
class Project(Native):
def __init__(self, client, raw, dao=None):
super(Project, self).__init__(client)
self.dao = dao if dao is not None else client.factory.project(client, raw)
def url(self): return self.dao.url()
def id(self): return self.dao.id()
def title(self): return self.dao.title()
def start(self): return self.dao.start()
def status(self): return self.dao.status()
def end(self): return self.dao.end()
def abstract(self): return self.dao.abstract()
def value(self): return self.dao.value()
def category(self): return self.dao.category()
def reference(self): return self.dao.reference()
def funder(self): return self.dao.funder(self.client)
def lead(self): return self.dao.lead(self.client)
def orgs(self): return self.dao.orgs(self.client)
def people(self): return self.dao.people(self.client)
def collaborators(self): return self.dao.collaborators(self.client)
def collaboration_outputs(self): pass
def intellectual_property_outputs(self): pass
def policy_influence_outputs(self): pass
def product_outputs(self): pass
def research_material_outputs(self): pass
def publications(self): pass
def fetch(self):
updated_proj = self.client.project(self.id())
if updated_proj is not None:
self.dao.raw = updated_proj.dao.raw
return True
return False
class ProjectXMLDAO(NativeXMLDAO):
composition_base = "/gtr:projectOverview/gtr:projectComposition"
project_base = composition_base + "/gtr:project"
url_xpath = project_base + "/@url"
id_xpath = project_base + "/gtr:id"
title_xpath = project_base + "/gtr:title"
start_xpath = project_base + "/gtr:fund/gtr:start"
status_xpath = project_base + "/gtr:status"
end_xpath = project_base + "/gtr:fund/gtr:end"
abstract_xpath = project_base + "/gtr:abstractText"
funder_xpath = project_base + "/gtr:fund/gtr:funder/gtr:name"
value_xpath = project_base + "/gtr:fund/gtr:valuePounds"
category_xpath = project_base + "/gtr:grantCategory"
reference_xpath = project_base + "/gtr:grantReference"
lead_xpath = composition_base + "/gtr:leadResearchOrganisation"
orgs_xpath = composition_base + "/gtr:organisations/gtr:organisation"
person_xpath = composition_base + "/gtr:projectPeople/gtr:projectPerson"
collaborator_xpath = composition_base + "/gtr:collaborations/gtr:collaborator"
organisation_wrapper = "organisationOverview"
person_wrapper = "personOverview"
organisation_element = "organisation"
person_element = "person"
def __init__(self, raw):
super(ProjectXMLDAO, self).__init__(raw)
def url(self):
return self._from_xpath(self.url_xpath)
def id(self):
return self._from_xpath(self.id_xpath)
def title(self):
return self._from_xpath(self.title_xpath)
def start(self):
return self._from_xpath(self.start_xpath)
def status(self):
return self._from_xpath(self.status_xpath)
def end(self):
return self._from_xpath(self.end_xpath)
def abstract(self):
return self._from_xpath(self.abstract_xpath)
# FIXME
def funder(self):
return self._from_xpath(self.funder_xpath)
def value(self):
return self._from_xpath(self.value_xpath)
def category(self):
return self._from_xpath(self.category_xpath)
def reference(self):
return self._from_xpath(self.reference_xpath)
def lead(self, client):
raws = self._port(self.lead_xpath, self.organisation_element)
if len(raws) > 0:
return Organisation(client, self._wrap(raws[0], self.organisation_wrapper), None)
return None
def orgs(self, client):
raws = self._do_xpath(self.orgs_xpath)
return [Organisation(client, self._wrap(raw, self.organisation_wrapper), None) for raw in raws]
def people(self, client):
raws = self._port(self.person_xpath, self.person_element)
return [Person(client, self._wrap(raw, self.person_wrapper)) for raw in raws]
def collaborators(self, client):
raws = self._port(self.collaborator_xpath, self.organisation_element)
return [Organisation(client, self._wrap(raw, self.organisation_wrapper), None) for raw in raws]
class ProjectJSONDAO(NativeJSONDAO):
def __init__(self, raw):
super(ProjectJSONDAO, self).__init__(raw)
def _composition(self):
return (self.raw.get("projectComposition", {}))
def _project(self):
return (self.raw.get("projectComposition", {})
.get("project", {}))
def url(self):
return self._project().get("url")
def id(self):
return self._project().get("id")
def title(self):
return self._project().get("title")
def start(self):
return self._project().get("fund", {}).get("start")
def status(self):
return self._project().get("status")
def end(self):
return self._project().get("fund", {}).get("end")
def abstract(self):
return self._project().get("abstractText")
def funder(self, client):
return Organisation(client, {"organisationOverview" : {"organisation" : self._project().get("fund", {}).get("funder", {})}}, None)
def value(self):
return self._project().get("fund", {}).get("valuePounds")
def category(self):
return self._project().get("grantCategory")
def reference(self):
return self._project().get("grantReference")
def lead(self, client):
lro = self._composition().get("leadResearchOrganisation")
if lro is not None:
return Organisation(client, {"organisationOverview" : {"organisation" : lro}}, None)
return None
def orgs(self, client):
return [Organisation(client, {"organisationOverview" : {"organisation" : data}}, None)
for data in self._composition().get("organisation", [])]
def people(self, client):
return [Person(client, {"person" : data })
for data in self._composition().get("projectPerson", [])]
def collaborators(self, client):
return [Organisation(client, {"organisationOverview" : {"organisation" : data}}, None)
for data in self._composition().get("collaborator", [])]
## ------ End Project -------- ##
## -------- Organisation -------- ##
class Organisation(NativePaged):
def __init__(self, client, raw, paging, dao=None):
super(Organisation, self).__init__(client, paging)
self.dao = dao if dao is not None else client.factory.organisation(client, raw)
self.custom_dao = dao is not None
def url(self): return self.dao.url()
def id(self): return self.dao.id()
def name(self): return self.dao.name()
def projects(self): return self.dao.projects(self.client)
def load_all_projects(self):
# use with caution, will load all the projects for this organisation
# and if you use any of the paging features afterwards, it will be
# reset
current_projects = self.projects()
self.next_page()
self.dao.add_projects(current_projects)
"""
if self.paging.next is None or self.paging.next == "":
return
raw, paging = self.client._api(self.paging.next)
if paging is not None:
self.paging = paging
if raw is not None:
interim_dao = None
if self.custom_dao:
interim_dao = deepcopy(self.dao)
interim_dao.raw = raw
else:
interim_dao = client.factory.organisation(client, raw)
projects = raw.get("organisationOverview", {}).get("project", [])
if raw is not None and paging is not None:
self.dao.raw = raw
self.paging = paging
return True
return False
next.get("organisationOverview", {})
"""
def fetch(self):
updated_org = self.client.organisation(self.id())
if updated_org is not None:
self.dao.raw = updated_org.dao.raw
self.paging = updated_org.paging
return True
return False
class OrganisationXMLDAO(NativeXMLDAO):
overview_base = "/gtr:organisationOverview"
url_xpath = overview_base + "/gtr:organisation/@url"
id_xpath = overview_base + "/gtr:organisation/gtr:id"
name_xpath = overview_base + "/gtr:organisation/gtr:name"
def __init__(self, raw):
super(OrganisationXMLDAO, self).__init__(raw)
def url(self):
return self._from_xpath(self.url_xpath)
def id(self):
return self._from_xpath(self.id_xpath)
def name(self):
return self._from_xpath(self.name_xpath)
class OrganisationJSONDAO(NativeJSONDAO):
def __init__(self, raw):
super(OrganisationJSONDAO, self).__init__(raw)
def _overview(self):
return self.raw.get("organisationOverview", {})
def _org(self):
return (self.raw.get("organisationOverview", {})
.get("organisation", {}))
def url(self):
return self._org().get("url")
def id(self):
return self._org().get("id")
def name(self):
return self._org().get("name")
def projects(self, client):
return [Project(client, {"projectOverview" : {"project" : data}})
for data in self._overview().get("project", [])]
def add_projects(self, projects):
project_raw = [p.dao.raw['projectOverview']['project'] for p in projects]
self.raw['organisationOverview']['project'] += project_raw
## ------- End Organisation ---------- ##
## -------- Person -------------- ##
class Person(Native):
def __init__(self, client, raw, dao=None):
super(Person, self).__init__(client)
self.dao = dao if dao is not None else client.factory.person(client, raw)
def url(self): return self.dao.url()
def id(self): return self.dao.id()
def isPI(self):
pr = self.dao.get_project_roles()
pi = self.dao.principal_investigator()
return pi and "PRINCIPAL_INVESTIGATOR" in pr
def isCI(self):
pr = self.dao.get_project_roles()
ci = self.dao.co_investigator()
return ci and "CO_INVESTIGATOR" in pr
def get_project_roles(self):
return self.dao.get_project_roles()
def projects(self): return self.dao.projects(self.client)
def fetch(self):
""" will flesh this item out with full data from the API - this WILL lose relation information from the parent object """
updated_person = self.client.person(self.id())
if updated_person is not None:
self.dao.raw = updated_person.dao.raw
return True
return False
def get_full(self):
""" will return a full person object from the API """
return self.client.person(self.id())
class PersonXMLDAO(NativeXMLDAO):
overview_base = "/gtr:personOverview"
person_base = overview_base + "/gtr:person"
url_xpath = person_base + "/@url"
id_xpath = person_base + "/gtr:id"
projects_xpath = overview_base + "/gtr:projectCompositions/gtr:projectComposition"
project_wrapper = "projectOverview"
def __init__(self, raw):
super(PersonXMLDAO, self).__init__(raw)
def url(self):
return self._from_xpath(self.url_xpath)
def id(self):
return self._from_xpath(self.id_xpath)
def projects(self, client):
raws = self._do_xpath(self.projects_xpath)
return [Project(client, self._wrap(raw, self.project_wrapper)) for raw in raws]
class PersonJSONDAO(NativeJSONDAO):
def __init__(self, raw):
super(PersonJSONDAO, self).__init__(raw)
def _person(self):
if "personOverview" in self.raw:
return self.raw.get("personOverview", {}).get("person", {})
return self.raw.get("person", {})
def url(self):
return self._person().get("url")
def id(self):
return self._person().get("id")
def get_project_roles(self):
return self._person().get("projectRole", [])
def principal_investigator(self):
return self._person().get("principalInvestigator", False)
def co_investigator(self):
return self._person().get("coInvestigator", False)
def projects(self, client):
return [Project(client, {"projectOverview" : {"project" : data}})
for data in self._overview().get("projectComposition", [])]
## --------- End Person ----------- ##
## -------- Publication ----------- ##
class Publication(Native):
def __init__(self, client, raw, dao=None):
super(Publication, self).__init__(client)
self.dao = dao if dao is not None else client.factory.publication(client, raw)
def url(self): return self.dao.url()
def id(self): return self.dao.id()
def title(self): return self.dao.title()
def fetch(self):
updated_pub = self.client.publication(self.id())
if updated_pub is not None:
self.dao.raw = updated_pub.dao.raw
return True
return False
class PublicationXMLDAO(NativeXMLDAO):
overview_base = "/gtr:publicationOverview"
publication_base = overview_base + "/gtr:publication"
url_xpath = publication_base + "/@url"
id_xpath = publication_base + "/gtr:id"
title_xpath = publication_base + "/gtr:title"
def __init__(self, raw):
super(PublicationXMLDAO, self).__init__(raw)
def url(self):
return self._from_xpath(self.url_xpath)
def id(self):
return self._from_xpath(self.id_xpath)
def title(self):
return self._from_xpath(self.title_xpath)
class PublicationJSONDAO(NativeJSONDAO):
def __init__(self, raw):
super(PublicationJSONDAO, self).__init__(raw)
def _publication(self):
return self.raw.get("publication", {})
def url(self):
return self._publication().get("url")
def id(self):
return self._publication().get("id")
def title(self):
return self._publication().get("title")
## ---------- End Publication -------- ##
|
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""OpenAIRE configuration file."""
from __future__ import absolute_import, print_function
from invenio_records_rest.facets import terms_filter
from invenio_records_rest.utils import allow_all
OPENAIRE_FUNDREF_LOCAL_SOURCE = 'data/fundref_registry.rdf.gz'
OPENAIRE_FUNDREF_ENDPOINT = 'http://dx.doi.org/10.13039/fundref_registry'
OPENAIRE_CC_SOURCE = 'data/geonames2countrycodes_iso_3166.txt'
OPENAIRE_OAI_LOCAL_SOURCE = '' # Large file that requires separate download
OPENAIRE_OAIPMH_ENDPOINT = 'http://api.openaire.eu/oai_pmh'
OPENAIRE_OAIPMH_DEFAULT_SET = 'projects'
OPENAIRE_FUNDREF_NAMESPACES = {
'dct': 'http://purl.org/dc/terms/',
'fref': 'http://data.crossref.org/fundingdata/terms',
'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#',
'rdfs': 'http://www.w3.org/2000/01/rdf-schema#',
'skos': 'http://www.w3.org/2004/02/skos/core#',
'skosxl': 'http://www.w3.org/2008/05/skos-xl#',
'svf': 'http://data.crossref.org/fundingdata/xml/schema/grant/grant-1.2/',
'xml': 'http://www.w3.org/XML/1998/namespace',
}
OPENAIRE_OAIPMH_NAMESPACES = {
'dri': 'http://www.driver-repository.eu/namespace/dri',
'oai': 'http://www.openarchives.org/OAI/2.0/',
'oaf': 'http://namespace.openaire.eu/oaf',
}
OPENAIRE_SCHEMAS_HOST = 'inveniosoftware.org'
OPENAIRE_SCHEMAS_ENDPOINT = '/schemas'
OPENAIRE_SCHEMAS_DEFAULT_FUNDER = 'funders/funder-v1.0.0.json'
OPENAIRE_SCHEMAS_DEFAULT_GRANT = 'grants/grant-v1.0.0.json'
OPENAIRE_JSONRESOLVER_GRANTS_HOST = 'inveniosoftware.org'
OPENAIRE_GRANTS_SPECS = [
'ARCProjects',
'ECProjects',
'FCTProjects',
'FWFProjects',
'HRZZProjects',
'MESTDProjects',
'MZOSProjects',
'NHMRCProjects',
'NIHProjects',
'NSFProjects',
'NWOProjects',
'SFIProjects',
'SNSFProjects',
'WTProjects',
]
OPENAIRE_FIXED_FUNDERS = {
'aka_________::AKA': 'http://dx.doi.org/10.13039/501100002341',
'arc_________::ARC': 'http://dx.doi.org/10.13039/501100000923',
'ec__________::EC': 'http://dx.doi.org/10.13039/501100000780',
'fct_________::FCT': 'http://dx.doi.org/10.13039/501100001871',
'fwf_________::FWF': 'http://dx.doi.org/10.13039/501100002428',
'irb_hr______::HRZZ': 'http://dx.doi.org/10.13039/501100004488',
'irb_hr______::MZOS': 'http://dx.doi.org/10.13039/501100006588',
'mestd_______::MESTD': 'http://dx.doi.org/10.13039/501100004564',
'nhmrc_______::NHMRC': 'http://dx.doi.org/10.13039/501100000925',
'nih_________::NIH': 'http://dx.doi.org/10.13039/100000002',
'nsf_________::NSF': 'http://dx.doi.org/10.13039/100000001',
'nwo_________::NWO': 'http://dx.doi.org/10.13039/501100003246',
'rcuk________::RCUK': 'http://dx.doi.org/10.13039/501100000690',
'sfi_________::SFI': 'http://dx.doi.org/10.13039/501100001602',
'snsf________::SNSF': 'http://dx.doi.org/10.13039/501100001711',
'tubitakf____::tubitak': 'http://dx.doi.org/10.13039/501100004410',
'wt__________::WT': 'http://dx.doi.org/10.13039/100004440',
}
OPENAIRE_REST_ENDPOINTS = dict(
frdoi=dict(
pid_type='frdoi',
pid_minter='openaire_funder_minter',
pid_fetcher='openaire_funder_fetcher',
list_route='/funders/',
item_route='/funders/<pidpath(frdoi):pid_value>',
search_index='funders',
search_type=None,
record_serializers={
'application/json': (
'invenio_records_rest.serializers:json_v1_response'),
},
search_serializers={
'application/json': (
'invenio_records_rest.serializers:json_v1_search'),
},
default_media_type='application/json',
suggesters=dict(
text=dict(completion=dict(
field='suggest'
))
),
read_permission_factory_imp=allow_all,
),
grant=dict(
pid_type='grant',
pid_minter='openaire_grant_minter',
pid_fetcher='openaire_grant_fetcher',
list_route='/grants/',
item_route='/grants/<pidpath(grant):pid_value>',
search_index='grants',
search_type=None,
record_serializers={
'application/json': (
'invenio_records_rest.serializers:json_v1_response'),
},
search_serializers={
'application/json': (
'invenio_records_rest.serializers:json_v1_search'),
},
default_media_type='application/json',
suggesters=dict(
text=dict(completion=dict(
field='suggest',
contexts='funder',
))
),
read_permission_factory_imp=allow_all,
),
)
OPENAIRE_REST_SORT_OPTIONS = dict(
funders=dict(
bestmatch=dict(
fields=['-_score'],
title='Best match',
default_order='asc',
order=1,
),
name=dict(
fields=['name'],
title='Name',
default_order='asc',
order=2,
),
),
grants=dict(
bestmatch=dict(
fields=['-_score'],
title='Best match',
default_order='asc',
order=1,
),
startdate=dict(
fields=['startdate'],
title='Start date',
default_order='asc',
order=2,
),
enddate=dict(
fields=['enddate'],
title='End date',
default_order='asc',
order=2,
),
)
)
#: Default sort for records REST API.
OPENAIRE_REST_DEFAULT_SORT = dict(
grants=dict(query='bestmatch', noquery='bestmatch'),
funders=dict(query='bestmatch', noquery='bestmatch'),
)
OPENAIRE_REST_FACETS = dict(
funders=dict(
aggs=dict(
country=dict(
terms=dict(field='country'),
),
type=dict(
terms=dict(field='type'),
),
),
filters=dict(
country=terms_filter('country'),
type=terms_filter('type'),
),
),
grants=dict(
aggs=dict(
funder=dict(
terms=dict(field='funder.acronyms'),
),
),
filters=dict(
funder=terms_filter('funder.acronyms'),
),
)
)
|
|
from __future__ import unicode_literals
from itertools import product
import numpy as np
from matplotlib import cm
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
import param
from ...core import OrderedDict
from ...core.util import (match_spec, unique_iterator, safe_unicode,
basestring, max_range, unicode)
from ...element import Points, Raster, Polygons, HeatMap
from ..util import compute_sizes, get_sideplot_ranges
from .element import ElementPlot, ColorbarPlot, LegendPlot
from .path import PathPlot
from .plot import AdjoinedPlot
class ChartPlot(ElementPlot):
show_legend = param.Boolean(default=True, doc="""
Whether to show legend for the plot.""")
class CurvePlot(ChartPlot):
"""
CurvePlot can plot Curve and ViewMaps of Curve, which can be
displayed as a single frame or animation. Axes, titles and legends
are automatically generated from dim_info.
If the dimension is set to cyclic in the dim_info it will rotate
the curve so that minimum y values are at the minimum x value to
make the plots easier to interpret.
"""
autotick = param.Boolean(default=False, doc="""
Whether to let matplotlib automatically compute tick marks
or to allow the user to control tick marks.""")
relative_labels = param.Boolean(default=False, doc="""
If plotted quantity is cyclic and center_cyclic is enabled,
will compute tick labels relative to the center.""")
show_frame = param.Boolean(default=False, doc="""
Disabled by default for clarity.""")
show_grid = param.Boolean(default=True, doc="""
Enable axis grid.""")
show_legend = param.Boolean(default=True, doc="""
Whether to show legend for the plot.""")
style_opts = ['alpha', 'color', 'visible', 'linewidth', 'linestyle', 'marker']
_plot_methods = dict(single='plot')
def get_data(self, element, ranges, style):
xs = element.dimension_values(0)
ys = element.dimension_values(1)
return (xs, ys), style, {}
def update_handles(self, key, axis, element, ranges, style):
artist = self.handles['artist']
(xs, ys), style, axis_kwargs = self.get_data(element, ranges, style)
artist.set_xdata(xs)
artist.set_ydata(ys)
return axis_kwargs
class ErrorPlot(ChartPlot):
"""
ErrorPlot plots the ErrorBar Element type and supporting
both horizontal and vertical error bars via the 'horizontal'
plot option.
"""
style_opts = ['ecolor', 'elinewidth', 'capsize', 'capthick',
'barsabove', 'lolims', 'uplims', 'xlolims',
'errorevery', 'xuplims', 'alpha', 'linestyle',
'linewidth', 'markeredgecolor', 'markeredgewidth',
'markerfacecolor', 'markersize', 'solid_capstyle',
'solid_joinstyle', 'dashes', 'color']
_plot_methods = dict(single='errorbar')
def init_artists(self, ax, plot_data, plot_kwargs):
_, (bottoms, tops), verts = ax.errorbar(*plot_data, **plot_kwargs)
return {'bottoms': bottoms, 'tops': tops, 'verts': verts[0]}
def get_data(self, element, ranges, style):
style['fmt'] = 'none'
dims = element.dimensions()
xs, ys = (element.dimension_values(i) for i in range(2))
yerr = element.array(dimensions=dims[2:4])
style['yerr'] = yerr.T if len(dims) > 3 else yerr
return (xs, ys), style, {}
def update_handles(self, key, axis, element, ranges, style):
bottoms = self.handles['bottoms']
tops = self.handles['tops']
verts = self.handles['verts']
paths = verts.get_paths()
(xs, ys), style, axis_kwargs = self.get_data(element, ranges, style)
neg_error = element.dimension_values(2)
pos_error = element.dimension_values(3) if len(element.dimensions()) > 3 else neg_error
if self.invert_axes:
bdata = xs - neg_error
tdata = xs + pos_error
tops.set_xdata(bdata)
tops.set_ydata(ys)
bottoms.set_xdata(tdata)
bottoms.set_ydata(ys)
for i, path in enumerate(paths):
path.vertices = np.array([[bdata[i], ys[i]],
[tdata[i], ys[i]]])
else:
bdata = ys - neg_error
tdata = ys + pos_error
bottoms.set_xdata(xs)
bottoms.set_ydata(bdata)
tops.set_xdata(xs)
tops.set_ydata(tdata)
for i, path in enumerate(paths):
path.vertices = np.array([[xs[i], bdata[i]],
[xs[i], tdata[i]]])
return axis_kwargs
class AreaPlot(ChartPlot):
show_legend = param.Boolean(default=False, doc="""
Whether to show legend for the plot.""")
style_opts = ['color', 'facecolor', 'alpha', 'edgecolor', 'linewidth',
'hatch', 'linestyle', 'joinstyle',
'fill', 'capstyle', 'interpolate']
_plot_methods = dict(single='fill_between')
def get_data(self, element, ranges, style):
xs = element.dimension_values(0)
ys = [element.dimension_values(vdim) for vdim in element.vdims]
return tuple([xs]+ys), style, {}
def init_artists(self, ax, plot_data, plot_kwargs):
fill_fn = ax.fill_betweenx if self.invert_axes else ax.fill_between
stack = fill_fn(*plot_data, **plot_kwargs)
return {'artist': stack}
def get_extents(self, element, ranges):
vdims = element.vdims
vdim = vdims[0].name
ranges[vdim] = max_range([ranges[vd.name] for vd in vdims])
return super(AreaPlot, self).get_extents(element, ranges)
class SpreadPlot(AreaPlot):
"""
SpreadPlot plots the Spread Element type.
"""
show_legend = param.Boolean(default=False, doc="""
Whether to show legend for the plot.""")
def __init__(self, element, **params):
super(SpreadPlot, self).__init__(element, **params)
self._extents = None
def get_data(self, element, ranges, style):
xs = element.dimension_values(0)
mean = element.dimension_values(1)
neg_error = element.dimension_values(2)
pos_idx = 3 if len(element.dimensions()) > 3 else 2
pos_error = element.dimension_values(pos_idx)
return (xs, mean-neg_error, mean+pos_error), style, {}
class HistogramPlot(ChartPlot):
"""
HistogramPlot can plot DataHistograms and ViewMaps of
DataHistograms, which can be displayed as a single frame or
animation.
"""
show_frame = param.Boolean(default=False, doc="""
Disabled by default for clarity.""")
show_grid = param.Boolean(default=False, doc="""
Whether to overlay a grid on the axis.""")
style_opts = ['alpha', 'color', 'align', 'visible', 'facecolor',
'edgecolor', 'log', 'capsize', 'error_kw', 'hatch']
def __init__(self, histograms, **params):
self.center = False
self.cyclic = False
super(HistogramPlot, self).__init__(histograms, **params)
if self.invert_axes:
self.axis_settings = ['ylabel', 'xlabel', 'yticks']
else:
self.axis_settings = ['xlabel', 'ylabel', 'xticks']
val_dim = self.hmap.last.get_dimension(1)
self.cyclic_range = val_dim.range if val_dim.cyclic else None
def initialize_plot(self, ranges=None):
hist = self.hmap.last
key = self.keys[-1]
ranges = self.compute_ranges(self.hmap, key, ranges)
el_ranges = match_spec(hist, ranges)
# Get plot ranges and values
edges, hvals, widths, lims = self._process_hist(hist)
if self.invert_axes:
self.offset_linefn = self.handles['axis'].axvline
self.plotfn = self.handles['axis'].barh
else:
self.offset_linefn = self.handles['axis'].axhline
self.plotfn = self.handles['axis'].bar
# Plot bars and make any adjustments
style = self.style[self.cyclic_index]
legend = hist.label if self.show_legend else ''
bars = self.plotfn(edges, hvals, widths, zorder=self.zorder, label=legend, **style)
self.handles['artist'] = self._update_plot(self.keys[-1], hist, bars, lims, ranges) # Indexing top
ticks = self._compute_ticks(hist, edges, widths, lims)
ax_settings = self._process_axsettings(hist, lims, ticks)
return self._finalize_axis(self.keys[-1], ranges=el_ranges, **ax_settings)
def _process_hist(self, hist):
"""
Get data from histogram, including bin_ranges and values.
"""
self.cyclic = hist.get_dimension(0).cyclic
edges = hist.edges[:-1]
hist_vals = np.array(hist.values)
widths = [hist._width] * len(hist) if getattr(hist, '_width', None) else np.diff(hist.edges)
lims = hist.range(0) + hist.range(1)
return edges, hist_vals, widths, lims
def _compute_ticks(self, element, edges, widths, lims):
"""
Compute the ticks either as cyclic values in degrees or as roughly
evenly spaced bin centers.
"""
if self.xticks is None or not isinstance(self.xticks, int):
return None
if self.cyclic:
x0, x1, _, _ = lims
xvals = np.linspace(x0, x1, self.xticks)
labels = ["%.0f" % np.rad2deg(x) + '\N{DEGREE SIGN}' for x in xvals]
elif self.xticks:
dim = element.get_dimension(0)
inds = np.linspace(0, len(edges), self.xticks, dtype=np.int)
edges = list(edges) + [edges[-1] + widths[-1]]
xvals = [edges[i] for i in inds]
labels = [dim.pprint_value(v) for v in xvals]
return [xvals, labels]
def get_extents(self, element, ranges):
x0, y0, x1, y1 = super(HistogramPlot, self).get_extents(element, ranges)
y0 = np.nanmin([0, y0])
return (x0, y0, x1, y1)
def _process_axsettings(self, hist, lims, ticks):
"""
Get axis settings options including ticks, x- and y-labels
and limits.
"""
axis_settings = dict(zip(self.axis_settings, [None, None, (None if self.overlaid else ticks)]))
return axis_settings
def _update_plot(self, key, hist, bars, lims, ranges):
"""
Process bars can be subclassed to manually adjust bars
after being plotted.
"""
return bars
def _update_artists(self, key, hist, edges, hvals, widths, lims, ranges):
"""
Update all the artists in the histogram. Subclassable to
allow updating of further artists.
"""
plot_vals = zip(self.handles['artist'], edges, hvals, widths)
for bar, edge, height, width in plot_vals:
if self.invert_axes:
bar.set_y(edge)
bar.set_width(height)
bar.set_height(width)
else:
bar.set_x(edge)
bar.set_height(height)
bar.set_width(width)
def update_handles(self, key, axis, element, ranges, style):
# Process values, axes and style
edges, hvals, widths, lims = self._process_hist(element)
ticks = self._compute_ticks(element, edges, widths, lims)
ax_settings = self._process_axsettings(element, lims, ticks)
self._update_artists(key, element, edges, hvals, widths, lims, ranges)
return ax_settings
class SideHistogramPlot(AdjoinedPlot, HistogramPlot):
bgcolor = param.Parameter(default=(1, 1, 1, 0), doc="""
Make plot background invisible.""")
offset = param.Number(default=0.2, bounds=(0,1), doc="""
Histogram value offset for a colorbar.""")
show_grid = param.Boolean(default=True, doc="""
Whether to overlay a grid on the axis.""")
def _process_hist(self, hist):
"""
Subclassed to offset histogram by defined amount.
"""
edges, hvals, widths, lims = super(SideHistogramPlot, self)._process_hist(hist)
offset = self.offset * lims[3]
hvals *= 1-self.offset
hvals += offset
lims = lims[0:3] + (lims[3] + offset,)
return edges, hvals, widths, lims
def _update_artists(self, n, element, edges, hvals, widths, lims, ranges):
super(SideHistogramPlot, self)._update_artists(n, element, edges, hvals, widths, lims, ranges)
self._update_plot(n, element, self.handles['artist'], lims, ranges)
def _update_plot(self, key, element, bars, lims, ranges):
"""
Process the bars and draw the offset line as necessary. If a
color map is set in the style of the 'main' ViewableElement object, color
the bars appropriately, respecting the required normalization
settings.
"""
main = self.adjoined.main
_, y1 = element.range(1)
offset = self.offset * y1
range_item, main_range, dim = get_sideplot_ranges(self, element, main, ranges)
if isinstance(range_item, (Raster, Points, Polygons, HeatMap)):
style = self.lookup_options(range_item, 'style')[self.cyclic_index]
cmap = cm.get_cmap(style.get('cmap'))
main_range = style.get('clims', main_range)
else:
cmap = None
if offset and ('offset_line' not in self.handles):
self.handles['offset_line'] = self.offset_linefn(offset,
linewidth=1.0,
color='k')
elif offset:
self._update_separator(offset)
if cmap is not None:
self._colorize_bars(cmap, bars, element, main_range, dim)
return bars
def get_extents(self, element, ranges):
x0, _, x1, _ = element.extents
_, y1 = element.range(1)
return (x0, 0, x1, y1)
def _colorize_bars(self, cmap, bars, element, main_range, dim):
"""
Use the given cmap to color the bars, applying the correct
color ranges as necessary.
"""
cmap_range = main_range[1] - main_range[0]
lower_bound = main_range[0]
colors = np.array(element.dimension_values(dim))
colors = (colors - lower_bound) / (cmap_range)
for c, bar in zip(colors, bars):
bar.set_facecolor(cmap(c))
bar.set_clip_on(False)
def _update_separator(self, offset):
"""
Compute colorbar offset and update separator line
if map is non-zero.
"""
offset_line = self.handles['offset_line']
if offset == 0:
offset_line.set_visible(False)
else:
offset_line.set_visible(True)
if self.invert_axes:
offset_line.set_xdata(offset)
else:
offset_line.set_ydata(offset)
class PointPlot(ChartPlot, ColorbarPlot):
"""
Note that the 'cmap', 'vmin' and 'vmax' style arguments control
how point magnitudes are rendered to different colors.
"""
color_index = param.ClassSelector(default=3, class_=(basestring, int),
allow_None=True, doc="""
Index of the dimension from which the color will the drawn""")
size_index = param.ClassSelector(default=2, class_=(basestring, int),
allow_None=True, doc="""
Index of the dimension from which the sizes will the drawn.""")
scaling_method = param.ObjectSelector(default="area",
objects=["width", "area"],
doc="""
Determines whether the `scaling_factor` should be applied to
the width or area of each point (default: "area").""")
scaling_factor = param.Number(default=1, bounds=(0, None), doc="""
Scaling factor which is applied to either the width or area
of each point, depending on the value of `scaling_method`.""")
show_grid = param.Boolean(default=True, doc="""
Whether to draw grid lines at the tick positions.""")
size_fn = param.Callable(default=np.abs, doc="""
Function applied to size values before applying scaling,
to remove values lower than zero.""")
style_opts = ['alpha', 'color', 'edgecolors', 'facecolors',
'linewidth', 'marker', 'size', 'visible',
'cmap', 'vmin', 'vmax']
_disabled_opts = ['size']
_plot_methods = dict(single='scatter')
def get_data(self, element, ranges, style):
xs, ys = (element.dimension_values(i) for i in range(2))
self._compute_styles(element, ranges, style)
return (xs, ys), style, {}
def _compute_styles(self, element, ranges, style):
cdim = element.get_dimension(self.color_index)
color = style.pop('color', None)
if cdim:
cs = element.dimension_values(self.color_index)
style['c'] = cs
self._norm_kwargs(element, ranges, style, cdim)
elif color:
style['c'] = color
style['edgecolors'] = style.pop('edgecolors', style.pop('edgecolor', 'none'))
if element.get_dimension(self.size_index):
sizes = element.dimension_values(self.size_index)
ms = style.pop('s') if 's' in style else plt.rcParams['lines.markersize']
style['s'] = compute_sizes(sizes, self.size_fn, self.scaling_factor,
self.scaling_method, ms)
style['edgecolors'] = style.pop('edgecolors', 'none')
def update_handles(self, key, axis, element, ranges, style):
paths = self.handles['artist']
(xs, ys), style, _ = self.get_data(element, ranges, style)
paths.set_offsets(np.column_stack([xs, ys]))
sdim = element.get_dimension(self.size_index)
if sdim:
paths.set_sizes(style['s'])
cdim = element.get_dimension(self.color_index)
if cdim:
paths.set_clim((style['vmin'], style['vmax']))
paths.set_array(style['c'])
if 'norm' in style:
paths.norm = style['norm']
class VectorFieldPlot(ColorbarPlot):
"""
Renders vector fields in sheet coordinates. The vectors are
expressed in polar coordinates and may be displayed according to
angle alone (with some common, arbitrary arrow length) or may be
true polar vectors.
The color or magnitude can be mapped onto any dimension using the
color_index and size_index.
The length of the arrows is controlled by the 'scale' style
option. The scaling of the arrows may also be controlled via the
normalize_lengths and rescale_lengths plot option, which will
normalize the lengths to a maximum of 1 and scale them according
to the minimum distance respectively.
"""
color_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Index of the dimension from which the color will the drawn""")
size_index = param.ClassSelector(default=3, class_=(basestring, int),
allow_None=True, doc="""
Index of the dimension from which the sizes will the drawn.""")
arrow_heads = param.Boolean(default=True, doc="""
Whether or not to draw arrow heads. If arrowheads are enabled,
they may be customized with the 'headlength' and
'headaxislength' style options.""")
normalize_lengths = param.Boolean(default=True, doc="""
Whether to normalize vector magnitudes automatically. If False,
it will be assumed that the lengths have already been correctly
normalized.""")
rescale_lengths = param.Boolean(default=True, doc="""
Whether the lengths will be rescaled to take into account the
smallest non-zero distance between two vectors.""")
style_opts = ['alpha', 'color', 'edgecolors', 'facecolors',
'linewidth', 'marker', 'visible', 'cmap',
'scale', 'headlength', 'headaxislength', 'pivot',
'width','headwidth']
_plot_methods = dict(single='quiver')
def __init__(self, *args, **params):
super(VectorFieldPlot, self).__init__(*args, **params)
self._min_dist = self._get_map_info(self.hmap)
def _get_map_info(self, vmap):
"""
Get the minimum sample distance and maximum magnitude
"""
return np.min([self._get_min_dist(vfield) for vfield in vmap])
def _get_min_dist(self, vfield):
"Get the minimum sampling distance."
xys = vfield.array([0, 1]).view(dtype=np.complex128)
m, n = np.meshgrid(xys, xys)
distances = np.abs(m-n)
np.fill_diagonal(distances, np.inf)
return distances[distances>0].min()
def get_data(self, element, ranges, style):
input_scale = style.pop('scale', 1.0)
xs = element.dimension_values(0) if len(element.data) else []
ys = element.dimension_values(1) if len(element.data) else []
radians = element.dimension_values(2) if len(element.data) else []
angles = list(np.rad2deg(radians))
if self.rescale_lengths:
input_scale = input_scale / self._min_dist
mag_dim = element.get_dimension(self.size_index)
if mag_dim:
magnitudes = element.dimension_values(mag_dim)
_, max_magnitude = ranges[mag_dim.name]
if self.normalize_lengths and max_magnitude != 0:
magnitudes = magnitudes / max_magnitude
else:
magnitudes = np.ones(len(xs))
args = (xs, ys, magnitudes, [0.0] * len(element))
if self.color_index:
colors = element.dimension_values(self.color_index)
args += (colors,)
cdim = element.get_dimension(self.color_index)
self._norm_kwargs(element, ranges, style, cdim)
style['clim'] = (style.pop('vmin'), style.pop('vmax'))
style.pop('color', None)
if 'pivot' not in style: style['pivot'] = 'mid'
if not self.arrow_heads:
style['headaxislength'] = 0
style.update(dict(scale=input_scale, angles=angles,
units='x', scale_units='x'))
return args, style, {}
def update_handles(self, key, axis, element, ranges, style):
args, style, axis_kwargs = self.get_data(element, ranges, style)
# Set magnitudes, angles and colors if supplied.
quiver = self.handles['artist']
quiver.set_offsets(np.column_stack(args[:2]))
quiver.U = args[2]
quiver.angles = style['angles']
if self.color_index:
quiver.set_array(args[-1])
quiver.set_clim(style['clim'])
return axis_kwargs
class BarPlot(LegendPlot):
group_index = param.Integer(default=0, doc="""
Index of the dimension in the supplied Bars
Element, which will be laid out into groups.""")
category_index = param.Integer(default=1, doc="""
Index of the dimension in the supplied Bars
Element, which will be laid out into categories.""")
stack_index = param.Integer(default=2, doc="""
Index of the dimension in the supplied Bars
Element, which will stacked.""")
padding = param.Number(default=0.2, doc="""
Defines the padding between groups.""")
color_by = param.List(default=['category'], doc="""
Defines how the Bar elements colored. Valid options include
any permutation of 'group', 'category' and 'stack'.""")
show_legend = param.Boolean(default=True, doc="""
Whether to show legend for the plot.""")
xticks = param.Integer(0, precedence=-1)
style_opts = ['alpha', 'color', 'align', 'visible', 'edgecolor',
'log', 'facecolor', 'capsize', 'error_kw', 'hatch']
legend_specs = dict(LegendPlot.legend_specs, **{
'top': dict(bbox_to_anchor=(0., 1.02, 1., .102),
ncol=3, loc=3, mode="expand", borderaxespad=0.),
'bottom': dict(ncol=3, mode="expand", loc=2,
bbox_to_anchor=(0., -0.4, 1., .102),
borderaxespad=0.1)})
_dimensions = OrderedDict([('group', 0),
('category',1),
('stack',2)])
def __init__(self, element, **params):
super(BarPlot, self).__init__(element, **params)
self.values, self.bar_dimensions = self._get_values()
def _get_values(self):
"""
Get unique index value for each bar
"""
gi, ci, si =self.group_index, self.category_index, self.stack_index
ndims = self.hmap.last.ndims
dims = self.hmap.last.kdims
dimensions = []
values = {}
for vidx, vtype in zip([gi, ci, si], self._dimensions):
if vidx < ndims:
dim = dims[vidx]
dimensions.append(dim)
vals = self.hmap.dimension_values(dim.name)
else:
dimensions.append(None)
vals = [None]
values[vtype] = list(unique_iterator(vals))
return values, dimensions
def _compute_styles(self, element, style_groups):
"""
Computes color and hatch combinations by
any combination of the 'group', 'category'
and 'stack'.
"""
style = self.lookup_options(element, 'style')[0]
sopts = []
for sopt in ['color', 'hatch']:
if sopt in style:
sopts.append(sopt)
style.pop(sopt, None)
color_groups = []
for sg in style_groups:
color_groups.append(self.values[sg])
style_product = list(product(*color_groups))
wrapped_style = self.lookup_options(element, 'style').max_cycles(len(style_product))
color_groups = {k:tuple(wrapped_style[n][sopt] for sopt in sopts)
for n,k in enumerate(style_product)}
return style, color_groups, sopts
def get_extents(self, element, ranges):
ngroups = len(self.values['group'])
vdim = element.vdims[0].name
if self.stack_index in range(element.ndims):
return 0, 0, ngroups, np.NaN
else:
vrange = ranges[vdim]
return 0, np.nanmin([vrange[0], 0]), ngroups, vrange[1]
def initialize_plot(self, ranges=None):
element = self.hmap.last
vdim = element.vdims[0]
axis = self.handles['axis']
key = self.keys[-1]
ranges = self.compute_ranges(self.hmap, key, ranges)
ranges = match_spec(element, ranges)
self.handles['artist'], self.handles['xticks'], xdims = self._create_bars(axis, element)
return self._finalize_axis(key, ranges=ranges, xticks=self.handles['xticks'],
dimensions=[xdims, vdim])
def _finalize_ticks(self, axis, element, xticks, yticks, zticks):
"""
Apply ticks with appropriate offsets.
"""
yalignments = None
if xticks is not None:
ticks, labels, yalignments = zip(*sorted(xticks, key=lambda x: x[0]))
xticks = (list(ticks), list(labels))
super(BarPlot, self)._finalize_ticks(axis, element, xticks, yticks, zticks)
if yalignments:
for t, y in zip(axis.get_xticklabels(), yalignments):
t.set_y(y)
def _create_bars(self, axis, element):
# Get style and dimension information
values = self.values
gi, ci, si = self.group_index, self.category_index, self.stack_index
gdim, cdim, sdim = [element.kdims[i] if i < element.ndims else None
for i in (gi, ci, si) ]
indices = dict(zip(self._dimensions, (gi, ci, si)))
style_groups = [sg for sg in self.color_by if indices[sg] < element.ndims]
style_opts, color_groups, sopts = self._compute_styles(element, style_groups)
dims = element.dimensions('key', label=True)
ndims = len(dims)
xdims = [d for d in [cdim, gdim] if d is not None]
# Compute widths
width = (1-(2.*self.padding)) / len(values['category'])
# Initialize variables
xticks = []
val_key = [None] * ndims
style_key = [None] * len(style_groups)
label_key = [None] * len(style_groups)
labels = []
bars = {}
# Iterate over group, category and stack dimension values
# computing xticks and drawing bars and applying styles
for gidx, grp_name in enumerate(values['group']):
if grp_name is not None:
grp = gdim.pprint_value(grp_name)
if 'group' in style_groups:
idx = style_groups.index('group')
label_key[idx] = str(grp)
style_key[idx] = grp_name
val_key[gi] = grp_name
if ci < ndims:
yalign = -0.04
else:
yalign = 0
xticks.append((gidx+0.5, grp, yalign))
for cidx, cat_name in enumerate(values['category']):
xpos = gidx+self.padding+(cidx*width)
if cat_name is not None:
cat = gdim.pprint_value(cat_name)
if 'category' in style_groups:
idx = style_groups.index('category')
label_key[idx] = str(cat)
style_key[idx] = cat_name
val_key[ci] = cat_name
xticks.append((xpos+width/2., cat, 0))
prev = 0
for stk_name in values['stack']:
if stk_name is not None:
if 'stack' in style_groups:
idx = style_groups.index('stack')
stk = gdim.pprint_value(stk_name)
label_key[idx] = str(stk)
style_key[idx] = stk_name
val_key[si] = stk_name
vals = element.sample([tuple(val_key)]).dimension_values(element.vdims[0].name)
val = float(vals[0]) if len(vals) else np.NaN
label = ', '.join(label_key)
style = dict(style_opts, label='' if label in labels else label,
**dict(zip(sopts, color_groups[tuple(style_key)])))
bar = axis.bar([xpos], [val], width=width, bottom=prev,
**style)
# Update variables
bars[tuple(val_key)] = bar
prev += val if np.isfinite(val) else 0
labels.append(label)
title = [str(element.kdims[indices[cg]])
for cg in self.color_by if indices[cg] < ndims]
if self.show_legend and any(len(l) for l in labels):
leg_spec = self.legend_specs[self.legend_position]
if self.legend_cols: leg_spec['ncol'] = self.legend_cols
axis.legend(title=', '.join(title), **leg_spec)
return bars, xticks, xdims
def update_handles(self, key, axis, element, ranges, style):
dims = element.dimensions('key', label=True)
ndims = len(dims)
ci, gi, si = self.category_index, self.group_index, self.stack_index
val_key = [None] * ndims
for g in self.values['group']:
if g is not None: val_key[gi] = g
for c in self.values['category']:
if c is not None: val_key[ci] = c
prev = 0
for s in self.values['stack']:
if s is not None: val_key[si] = s
bar = self.handles['artist'].get(tuple(val_key))
if bar:
vals = element.sample([tuple(val_key)]).dimension_values(element.vdims[0].name)
height = float(vals[0]) if len(vals) else np.NaN
bar[0].set_height(height)
bar[0].set_y(prev)
prev += height if np.isfinite(height) else 0
return {'xticks': self.handles['xticks']}
class SpikesPlot(PathPlot, ColorbarPlot):
aspect = param.Parameter(default='square', doc="""
The aspect ratio mode of the plot. Allows setting an
explicit aspect ratio as width/height as well as
'square' and 'equal' options.""")
color_index = param.ClassSelector(default=1, class_=(basestring, int), doc="""
Index of the dimension from which the color will the drawn""")
spike_length = param.Number(default=0.1, doc="""
The length of each spike if Spikes object is one dimensional.""")
position = param.Number(default=0., doc="""
The position of the lower end of each spike.""")
style_opts = PathPlot.style_opts + ['cmap']
def init_artists(self, ax, plot_args, plot_kwargs):
line_segments = LineCollection(*plot_args, **plot_kwargs)
ax.add_collection(line_segments)
return {'artist': line_segments}
def get_extents(self, element, ranges):
l, b, r, t = super(SpikesPlot, self).get_extents(element, ranges)
ndims = len(element.dimensions(label=True))
max_length = t if ndims > 1 else self.spike_length
return (l, self.position, r, self.position+max_length)
def get_data(self, element, ranges, style):
dimensions = element.dimensions(label=True)
ndims = len(dimensions)
pos = self.position
if ndims > 1:
data = [[(x, pos), (x, pos+y)] for x, y in element.array()]
else:
height = self.spike_length
data = [[(x[0], pos), (x[0], pos+height)] for x in element.array()]
if self.invert_axes:
data = [(line[0][::-1], line[1][::-1]) for line in data]
cdim = element.get_dimension(self.color_index)
if cdim:
style['array'] = element.dimension_values(cdim)
self._norm_kwargs(element, ranges, style, cdim)
style['clim'] = style.pop('vmin'), style.pop('vmax')
return (np.array(data),), style, {}
def update_handles(self, key, axis, element, ranges, style):
artist = self.handles['artist']
(data,), kwargs, axis_kwargs = self.get_data(element, ranges, style)
artist.set_paths(data)
artist.set_visible(style.get('visible', True))
if 'array' in kwargs:
artist.set_clim((kwargs['vmin'], kwargs['vmax']))
artist.set_array(kwargs['array'])
if 'norm' in kwargs:
artist.norm = kwargs['norm']
return axis_kwargs
class SideSpikesPlot(AdjoinedPlot, SpikesPlot):
bgcolor = param.Parameter(default=(1, 1, 1, 0), doc="""
Make plot background invisible.""")
border_size = param.Number(default=0, doc="""
The size of the border expressed as a fraction of the main plot.""")
subplot_size = param.Number(default=0.1, doc="""
The size subplots as expressed as a fraction of the main plot.""")
spike_length = param.Number(default=1, doc="""
The length of each spike if Spikes object is one dimensional.""")
xaxis = param.ObjectSelector(default='bare',
objects=['top', 'bottom', 'bare', 'top-bare',
'bottom-bare', None], doc="""
Whether and where to display the xaxis, bare options allow suppressing
all axis labels including ticks and xlabel. Valid options are 'top',
'bottom', 'bare', 'top-bare' and 'bottom-bare'.""")
yaxis = param.ObjectSelector(default='bare',
objects=['left', 'right', 'bare', 'left-bare',
'right-bare', None], doc="""
Whether and where to display the yaxis, bare options allow suppressing
all axis labels including ticks and ylabel. Valid options are 'left',
'right', 'bare' 'left-bare' and 'right-bare'.""")
class BoxPlot(ChartPlot):
"""
BoxPlot plots the ErrorBar Element type and supporting
both horizontal and vertical error bars via the 'horizontal'
plot option.
"""
style_opts = ['notch', 'sym', 'whis', 'bootstrap',
'conf_intervals', 'widths', 'showmeans',
'show_caps', 'showfliers', 'boxprops',
'whiskerprops', 'capprops', 'flierprops',
'medianprops', 'meanprops', 'meanline']
_plot_methods = dict(single='boxplot')
def get_extents(self, element, ranges):
return (np.NaN,)*4
def get_data(self, element, ranges, style):
groups = element.groupby(element.kdims)
data, labels = [], []
groups = groups.data.items() if element.kdims else [(element.label, element)]
for key, group in groups:
if element.kdims:
label = ','.join([unicode(safe_unicode(d.pprint_value(v)))
for d, v in zip(element.kdims, key)])
else:
label = key
data.append(group[group.vdims[0]])
labels.append(label)
style['labels'] = labels
style.pop('zorder')
style.pop('label')
style['vert'] = not self.invert_axes
format_kdims = [kd(value_format=None) for kd in element.kdims]
return (data,), style, {'dimensions': [format_kdims,
element.vdims[0]]}
def teardown_handles(self):
for group in self.handles['artist'].values():
for v in group:
v.remove()
class SideBoxPlot(AdjoinedPlot, BoxPlot):
bgcolor = param.Parameter(default=(1, 1, 1, 0), doc="""
Make plot background invisible.""")
border_size = param.Number(default=0, doc="""
The size of the border expressed as a fraction of the main plot.""")
xaxis = param.ObjectSelector(default='bare',
objects=['top', 'bottom', 'bare', 'top-bare',
'bottom-bare', None], doc="""
Whether and where to display the xaxis, bare options allow suppressing
all axis labels including ticks and xlabel. Valid options are 'top',
'bottom', 'bare', 'top-bare' and 'bottom-bare'.""")
yaxis = param.ObjectSelector(default='bare',
objects=['left', 'right', 'bare', 'left-bare',
'right-bare', None], doc="""
Whether and where to display the yaxis, bare options allow suppressing
all axis labels including ticks and ylabel. Valid options are 'left',
'right', 'bare' 'left-bare' and 'right-bare'.""")
def __init__(self, *args, **kwargs):
super(SideBoxPlot, self).__init__(*args, **kwargs)
if self.adjoined:
self.invert_axes = not self.invert_axes
|
|
# -*- coding: utf-8 -*-
"""
This file is part of pyCMBS.
(c) 2012- Alexander Loew
For COPYING and LICENSE details, please refer to the LICENSE file
"""
"""Attempt to generate templates for module reference with Sphinx
XXX - we exclude extension modules
To include extension modules, first identify them as valid in the
``_uri2path`` method, then handle them in the ``_parse_module`` script.
We get functions and classes by parsing the text of .py files.
Alternatively we could import the modules for discovery, and we'd have
to do that for extension modules. This would involve changing the
``_parse_module`` method to work via import and introspection, and
might involve changing ``discover_modules`` (which determines which
files are modules, and therefore which module URIs will be passed to
``_parse_module``).
NOTE: this is a modified version of a script originally shipped with the
PyMVPA project, which we've adapted for NIPY use. PyMVPA is an MIT-licensed
project."""
# Stdlib imports
import os
import re
# Functions and classes
class ApiDocWriter(object):
''' Class for automatic detection and parsing of API docs
to Sphinx-parsable reST format'''
# only separating first two levels
rst_section_levels = ['*', '=', '-', '~', '^']
def __init__(self,
package_name,
rst_extension='.rst',
package_skip_patterns=None,
module_skip_patterns=None,
):
''' Initialize package for parsing
Parameters
----------
package_name : string
Name of the top-level package. *package_name* must be the
name of an importable package
rst_extension : string, optional
Extension for reST files, default '.rst'
package_skip_patterns : None or sequence of {strings, regexps}
Sequence of strings giving URIs of packages to be excluded
Operates on the package path, starting at (including) the
first dot in the package path, after *package_name* - so,
if *package_name* is ``sphinx``, then ``sphinx.util`` will
result in ``.util`` being passed for earching by these
regexps. If is None, gives default. Default is:
['\.tests$']
module_skip_patterns : None or sequence
Sequence of strings giving URIs of modules to be excluded
Operates on the module name including preceding URI path,
back to the first dot after *package_name*. For example
``sphinx.util.console`` results in the string to search of
``.util.console``
If is None, gives default. Default is:
['\.setup$', '\._']
'''
if package_skip_patterns is None:
package_skip_patterns = ['\\.tests$']
if module_skip_patterns is None:
module_skip_patterns = ['\\.setup$', '\\._']
self.package_name = package_name
self.rst_extension = rst_extension
self.package_skip_patterns = package_skip_patterns
self.module_skip_patterns = module_skip_patterns
def get_package_name(self):
return self._package_name
def set_package_name(self, package_name):
''' Set package_name
>>> docwriter = ApiDocWriter('sphinx')
>>> import sphinx
>>> docwriter.root_path == sphinx.__path__[0]
True
>>> docwriter.package_name = 'docutils'
>>> import docutils
>>> docwriter.root_path == docutils.__path__[0]
True
'''
# It's also possible to imagine caching the module parsing here
self._package_name = package_name
self.root_module = __import__(package_name)
self.root_path = self.root_module.__path__[0]
self.written_modules = None
package_name = property(get_package_name, set_package_name, None,
'get/set package_name')
def _get_object_name(self, line):
''' Get second token in line
>>> docwriter = ApiDocWriter('sphinx')
>>> docwriter._get_object_name(" def func(): ")
'func'
>>> docwriter._get_object_name(" class Klass(object): ")
'Klass'
>>> docwriter._get_object_name(" class Klass: ")
'Klass'
'''
name = line.split()[1].split('(')[0].strip()
# in case we have classes which are not derived from object
# ie. old style classes
return name.rstrip(':')
def _uri2path(self, uri):
''' Convert uri to absolute filepath
Parameters
----------
uri : string
URI of python module to return path for
Returns
-------
path : None or string
Returns None if there is no valid path for this URI
Otherwise returns absolute file system path for URI
Examples
--------
>>> docwriter = ApiDocWriter('sphinx')
>>> import sphinx
>>> modpath = sphinx.__path__[0]
>>> res = docwriter._uri2path('sphinx.builder')
>>> res == os.path.join(modpath, 'builder.py')
True
>>> res = docwriter._uri2path('sphinx')
>>> res == os.path.join(modpath, '__init__.py')
True
>>> docwriter._uri2path('sphinx.does_not_exist')
'''
if uri == self.package_name:
return os.path.join(self.root_path, '__init__.py')
path = uri.replace('.', os.path.sep)
path = path.replace(self.package_name + os.path.sep, '')
path = os.path.join(self.root_path, path)
# XXX maybe check for extensions as well?
if os.path.exists(path + '.py'): # file
path += '.py'
elif os.path.exists(os.path.join(path, '__init__.py')):
path = os.path.join(path, '__init__.py')
else:
return None
return path
def _path2uri(self, dirpath):
''' Convert directory path to uri '''
relpath = dirpath.replace(self.root_path, self.package_name)
if relpath.startswith(os.path.sep):
relpath = relpath[1:]
return relpath.replace(os.path.sep, '.')
def _parse_module(self, uri):
''' Parse module defined in *uri* '''
filename = self._uri2path(uri)
if filename is None:
# nothing that we could handle here.
return ([],[])
f = open(filename, 'rt')
functions, classes = self._parse_lines(f)
f.close()
return functions, classes
def _parse_lines(self, linesource):
''' Parse lines of text for functions and classes '''
functions = []
classes = []
for line in linesource:
if line.startswith('def ') and line.count('('):
# exclude private stuff
name = self._get_object_name(line)
if not name.startswith('_'):
functions.append(name)
elif line.startswith('class '):
# exclude private stuff
name = self._get_object_name(line)
if not name.startswith('_'):
classes.append(name)
else:
pass
functions.sort()
classes.sort()
return functions, classes
def generate_api_doc(self, uri):
'''Make autodoc documentation template string for a module
Parameters
----------
uri : string
python location of module - e.g 'sphinx.builder'
Returns
-------
S : string
Contents of API doc
'''
# get the names of all classes and functions
functions, classes = self._parse_module(uri)
if not len(functions) and not len(classes):
print 'WARNING: Empty -',uri # dbg
return ''
# Make a shorter version of the uri that omits the package name for
# titles
uri_short = re.sub(r'^%s\.' % self.package_name,'',uri)
ad = '.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n'
chap_title = uri_short
ad += (chap_title+'\n'+ self.rst_section_levels[1] * len(chap_title)
+ '\n\n')
# Set the chapter title to read 'module' for all modules except for the
# main packages
if '.' in uri:
title = 'Module: :mod:`' + uri_short + '`'
else:
title = ':mod:`' + uri_short + '`'
ad += title + '\n' + self.rst_section_levels[2] * len(title)
if len(classes):
ad += '\nInheritance diagram for ``%s``:\n\n' % uri
ad += '.. inheritance-diagram:: %s \n' % uri
ad += ' :parts: 3\n'
ad += '\n.. automodule:: ' + uri + '\n'
ad += '\n.. currentmodule:: ' + uri + '\n'
multi_class = len(classes) > 1
multi_fx = len(functions) > 1
if multi_class:
ad += '\n' + 'Classes' + '\n' + \
self.rst_section_levels[2] * 7 + '\n'
elif len(classes) and multi_fx:
ad += '\n' + 'Class' + '\n' + \
self.rst_section_levels[2] * 5 + '\n'
for c in classes:
ad += '\n:class:`' + c + '`\n' \
+ self.rst_section_levels[multi_class + 2 ] * \
(len(c)+9) + '\n\n'
ad += '\n.. autoclass:: ' + c + '\n'
# must NOT exclude from index to keep cross-refs working
ad += ' :members:\n' \
' :undoc-members:\n' \
' :show-inheritance:\n' \
' :inherited-members:\n' \
'\n' \
' .. automethod:: __init__\n'
if multi_fx:
ad += '\n' + 'Functions' + '\n' + \
self.rst_section_levels[2] * 9 + '\n\n'
elif len(functions) and multi_class:
ad += '\n' + 'Function' + '\n' + \
self.rst_section_levels[2] * 8 + '\n\n'
for f in functions:
# must NOT exclude from index to keep cross-refs working
ad += '\n.. autofunction:: ' + uri + '.' + f + '\n\n'
return ad
def _survives_exclude(self, matchstr, match_type):
''' Returns True if *matchstr* does not match patterns
``self.package_name`` removed from front of string if present
Examples
--------
>>> dw = ApiDocWriter('sphinx')
>>> dw._survives_exclude('sphinx.okpkg', 'package')
True
>>> dw.package_skip_patterns.append('^\\.badpkg$')
>>> dw._survives_exclude('sphinx.badpkg', 'package')
False
>>> dw._survives_exclude('sphinx.badpkg', 'module')
True
>>> dw._survives_exclude('sphinx.badmod', 'module')
True
>>> dw.module_skip_patterns.append('^\\.badmod$')
>>> dw._survives_exclude('sphinx.badmod', 'module')
False
'''
if match_type == 'module':
patterns = self.module_skip_patterns
elif match_type == 'package':
patterns = self.package_skip_patterns
else:
raise ValueError('Cannot interpret match type "%s"'
% match_type)
# Match to URI without package name
L = len(self.package_name)
if matchstr[:L] == self.package_name:
matchstr = matchstr[L:]
for pat in patterns:
try:
pat.search
except AttributeError:
pat = re.compile(pat)
if pat.search(matchstr):
return False
return True
def discover_modules(self):
''' Return module sequence discovered from ``self.package_name``
Parameters
----------
None
Returns
-------
mods : sequence
Sequence of module names within ``self.package_name``
Examples
--------
>>> dw = ApiDocWriter('sphinx')
>>> mods = dw.discover_modules()
>>> 'sphinx.util' in mods
True
>>> dw.package_skip_patterns.append('\.util$')
>>> 'sphinx.util' in dw.discover_modules()
False
>>>
'''
modules = [self.package_name]
# raw directory parsing
for dirpath, dirnames, filenames in os.walk(self.root_path):
# Check directory names for packages
root_uri = self._path2uri(os.path.join(self.root_path,
dirpath))
for dirname in dirnames[:]: # copy list - we modify inplace
package_uri = '.'.join((root_uri, dirname))
if (self._uri2path(package_uri) and
self._survives_exclude(package_uri, 'package')):
modules.append(package_uri)
else:
dirnames.remove(dirname)
# Check filenames for modules
for filename in filenames:
module_name = filename[:-3]
module_uri = '.'.join((root_uri, module_name))
if (self._uri2path(module_uri) and
self._survives_exclude(module_uri, 'module')):
modules.append(module_uri)
return sorted(modules)
def write_modules_api(self, modules,outdir):
# write the list
written_modules = []
for m in modules:
api_str = self.generate_api_doc(m)
if not api_str:
continue
# write out to file
outfile = os.path.join(outdir,
m + self.rst_extension)
fileobj = open(outfile, 'wt')
fileobj.write(api_str)
fileobj.close()
written_modules.append(m)
self.written_modules = written_modules
def write_api_docs(self, outdir):
"""Generate API reST files.
Parameters
----------
outdir : string
Directory name in which to store files
We create automatic filenames for each module
Returns
-------
None
Notes
-----
Sets self.written_modules to list of written modules
"""
if not os.path.exists(outdir):
os.mkdir(outdir)
# compose list of modules
modules = self.discover_modules()
self.write_modules_api(modules,outdir)
def write_index(self, outdir, froot='gen', relative_to=None):
"""Make a reST API index file from written files
Parameters
----------
path : string
Filename to write index to
outdir : string
Directory to which to write generated index file
froot : string, optional
root (filename without extension) of filename to write to
Defaults to 'gen'. We add ``self.rst_extension``.
relative_to : string
path to which written filenames are relative. This
component of the written file path will be removed from
outdir, in the generated index. Default is None, meaning,
leave path as it is.
"""
if self.written_modules is None:
raise ValueError('No modules written')
# Get full filename path
path = os.path.join(outdir, froot+self.rst_extension)
# Path written into index is relative to rootpath
if relative_to is not None:
relpath = outdir.replace(relative_to + os.path.sep, '')
else:
relpath = outdir
idx = open(path,'wt')
w = idx.write
w('.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n')
w('.. toctree::\n\n')
for f in self.written_modules:
w(' %s\n' % os.path.join(relpath,f))
idx.close()
|
|
"""Models mixins for Social Auth"""
import re
import time
import base64
import uuid
import warnings
from datetime import datetime, timedelta
import six
from openid.association import Association as OpenIdAssociation
from .exceptions import MissingBackend
from .backends.utils import get_backend
NO_ASCII_REGEX = re.compile(r'[^\x00-\x7F]+')
NO_SPECIAL_REGEX = re.compile(r'[^\w.@+_-]+', re.UNICODE)
class UserMixin(object):
user = ''
provider = ''
uid = None
extra_data = None
def get_backend(self, strategy):
return get_backend(strategy.get_backends(), self.provider)
def get_backend_instance(self, strategy):
try:
backend_class = self.get_backend(strategy)
except MissingBackend:
return None
else:
return backend_class(strategy=strategy)
@property
def access_token(self):
"""Return access_token stored in extra_data or None"""
return self.extra_data.get('access_token')
@property
def tokens(self):
warnings.warn('tokens is deprecated, use access_token instead')
return self.access_token
def refresh_token(self, strategy, *args, **kwargs):
token = self.extra_data.get('refresh_token') or \
self.extra_data.get('access_token')
backend = self.get_backend(strategy)
if token and backend and hasattr(backend, 'refresh_token'):
backend = backend(strategy=strategy)
response = backend.refresh_token(token, *args, **kwargs)
extra_data = backend.extra_data(self,
self.uid,
response,
self.extra_data)
if self.set_extra_data(extra_data):
self.save()
def expiration_timedelta(self):
"""Return provider session live seconds. Returns a timedelta ready to
use with session.set_expiry().
If provider returns a timestamp instead of session seconds to live, the
timedelta is inferred from current time (using UTC timezone). None is
returned if there's no value stored or it's invalid.
"""
if self.extra_data and 'expires' in self.extra_data:
try:
expires = int(self.extra_data.get('expires'))
except (ValueError, TypeError):
return None
now = datetime.utcnow()
# Detect if expires is a timestamp
if expires > time.mktime(now.timetuple()):
# expires is a datetime, return the remaining difference
return datetime.fromtimestamp(expires) - now
else:
# expires is the time to live seconds since creation,
# check against auth_time if present, otherwise return
# the value
auth_time = self.extra_data.get('auth_time')
if auth_time:
reference = datetime.fromtimestamp(auth_time)
return (reference + timedelta(seconds=expires)) - now
else:
return timedelta(seconds=expires)
def expiration_datetime(self):
# backward compatible alias
return self.expiration_timedelta()
def access_token_expired(self):
expiration = self.expiration_timedelta()
return expiration and expiration.total_seconds() <= 0
def get_access_token(self, strategy):
"""Returns a valid access token."""
if self.access_token_expired():
self.refresh_token(strategy)
return self.access_token
def set_extra_data(self, extra_data=None):
if extra_data and self.extra_data != extra_data:
if self.extra_data and not isinstance(
self.extra_data, six.string_types):
self.extra_data.update(extra_data)
else:
self.extra_data = extra_data
return True
@classmethod
def clean_username(cls, value):
"""Clean username removing any unsupported character"""
value = NO_ASCII_REGEX.sub('', value)
value = NO_SPECIAL_REGEX.sub('', value)
return value
@classmethod
def changed(cls, user):
"""The given user instance is ready to be saved"""
raise NotImplementedError('Implement in subclass')
@classmethod
def get_username(cls, user):
"""Return the username for given user"""
raise NotImplementedError('Implement in subclass')
@classmethod
def user_model(cls):
"""Return the user model"""
raise NotImplementedError('Implement in subclass')
@classmethod
def username_max_length(cls):
"""Return the max length for username"""
raise NotImplementedError('Implement in subclass')
@classmethod
def allowed_to_disconnect(cls, user, backend_name, association_id=None):
"""Return if it's safe to disconnect the social account for the
given user"""
raise NotImplementedError('Implement in subclass')
@classmethod
def disconnect(cls, entry):
"""Disconnect the social account for the given user"""
raise NotImplementedError('Implement in subclass')
@classmethod
def user_exists(cls, *args, **kwargs):
"""
Return True/False if a User instance exists with the given arguments.
Arguments are directly passed to filter() manager method.
"""
raise NotImplementedError('Implement in subclass')
@classmethod
def create_user(cls, *args, **kwargs):
"""Create a user instance"""
raise NotImplementedError('Implement in subclass')
@classmethod
def get_user(cls, pk):
"""Return user instance for given id"""
raise NotImplementedError('Implement in subclass')
@classmethod
def get_users_by_email(cls, email):
"""Return users instances for given email address"""
raise NotImplementedError('Implement in subclass')
@classmethod
def get_social_auth(cls, provider, uid):
"""Return UserSocialAuth for given provider and uid"""
raise NotImplementedError('Implement in subclass')
@classmethod
def get_social_auth_for_user(cls, user, provider=None, id=None):
"""Return all the UserSocialAuth instances for given user"""
raise NotImplementedError('Implement in subclass')
@classmethod
def create_social_auth(cls, user, uid, provider):
"""Create a UserSocialAuth instance for given user"""
raise NotImplementedError('Implement in subclass')
class NonceMixin(object):
"""One use numbers"""
server_url = ''
timestamp = 0
salt = ''
@classmethod
def use(cls, server_url, timestamp, salt):
"""Create a Nonce instance"""
raise NotImplementedError('Implement in subclass')
class AssociationMixin(object):
"""OpenId account association"""
server_url = ''
handle = ''
secret = ''
issued = 0
lifetime = 0
assoc_type = ''
@classmethod
def oids(cls, server_url, handle=None):
kwargs = {'server_url': server_url}
if handle is not None:
kwargs['handle'] = handle
return sorted([
(assoc.id, cls.openid_association(assoc))
for assoc in cls.get(**kwargs)
], key=lambda x: x[1].issued, reverse=True)
@classmethod
def openid_association(cls, assoc):
secret = assoc.secret
if not isinstance(secret, six.binary_type):
secret = secret.encode()
return OpenIdAssociation(assoc.handle, base64.decodestring(secret),
assoc.issued, assoc.lifetime,
assoc.assoc_type)
@classmethod
def store(cls, server_url, association):
"""Create an Association instance"""
raise NotImplementedError('Implement in subclass')
@classmethod
def get(cls, *args, **kwargs):
"""Get an Association instance"""
raise NotImplementedError('Implement in subclass')
@classmethod
def remove(cls, ids_to_delete):
"""Remove an Association instance"""
raise NotImplementedError('Implement in subclass')
class CodeMixin(object):
email = ''
code = ''
verified = False
def verify(self):
self.verified = True
self.save()
@classmethod
def generate_code(cls):
return uuid.uuid4().hex
@classmethod
def make_code(cls, email):
code = cls()
code.email = email
code.code = cls.generate_code()
code.verified = False
code.save()
return code
@classmethod
def get_code(cls, code):
raise NotImplementedError('Implement in subclass')
class PartialMixin(object):
token = ''
data = ''
next_step = ''
backend = ''
@property
def args(self):
return self.data.get('args', [])
@args.setter
def args(self, value):
self.data['args'] = value
@property
def kwargs(self):
return self.data.get('kwargs', {})
@kwargs.setter
def kwargs(self, value):
self.data['kwargs'] = value
def extend_kwargs(self, values):
self.data['kwargs'].update(values)
@classmethod
def generate_token(cls):
return uuid.uuid4().hex
@classmethod
def load(cls, token):
raise NotImplementedError('Implement in subclass')
@classmethod
def destroy(cls, token):
raise NotImplementedError('Implement in subclass')
@classmethod
def prepare(cls, backend, next_step, data):
partial = cls()
partial.backend = backend
partial.next_step = next_step
partial.data = data
partial.token = cls.generate_token()
return partial
@classmethod
def store(cls, partial):
partial.save()
return partial
class BaseStorage(object):
user = UserMixin
nonce = NonceMixin
association = AssociationMixin
code = CodeMixin
partial = PartialMixin
@classmethod
def is_integrity_error(cls, exception):
"""Check if given exception flags an integrity error in the DB"""
raise NotImplementedError('Implement in subclass')
|
|
# Working with TF commit 24466c2e6d32621cd85f0a78d47df6eed2c5c5a6
import math
import numpy as np
import tensorflow as tf
import tensorflow.contrib.seq2seq as seq2seq
from tensorflow.contrib.layers import safe_embedding_lookup_sparse as embedding_lookup_unique
from tensorflow.contrib.rnn import LSTMCell, LSTMStateTuple, GRUCell
import helpers
class Seq2SeqModel():
"""Seq2Seq model usign blocks from new `tf.contrib.seq2seq`.
Requires TF 1.0.0-alpha"""
PAD = 0
EOS = 1
def __init__(self, encoder_cell, decoder_cell, vocab_size, embedding_size,
bidirectional=True,
attention=False,
debug=False):
self.debug = debug
self.bidirectional = bidirectional
self.attention = attention
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.encoder_cell = encoder_cell
self.decoder_cell = decoder_cell
self._make_graph()
@property
def decoder_hidden_units(self):
# @TODO: is this correct for LSTMStateTuple?
return self.decoder_cell.output_size
def _make_graph(self):
if self.debug:
self._init_debug_inputs()
else:
self._init_placeholders()
self._init_decoder_train_connectors()
self._init_embeddings()
if self.bidirectional:
self._init_bidirectional_encoder()
else:
self._init_simple_encoder()
self._init_decoder()
self._init_optimizer()
def _init_debug_inputs(self):
""" Everything is time-major """
x = [[5, 6, 7],
[7, 6, 0],
[0, 7, 0]]
xl = [2, 3, 1]
self.encoder_inputs = tf.constant(x, dtype=tf.int32, name='encoder_inputs')
self.encoder_inputs_length = tf.constant(xl, dtype=tf.int32, name='encoder_inputs_length')
self.decoder_targets = tf.constant(x, dtype=tf.int32, name='decoder_targets')
self.decoder_targets_length = tf.constant(xl, dtype=tf.int32, name='decoder_targets_length')
def _init_placeholders(self):
""" Everything is time-major """
self.encoder_inputs = tf.placeholder(
shape=(None, None),
dtype=tf.int32,
name='encoder_inputs',
)
self.encoder_inputs_length = tf.placeholder(
shape=(None,),
dtype=tf.int32,
name='encoder_inputs_length',
)
# required for training, not required for testing
self.decoder_targets = tf.placeholder(
shape=(None, None),
dtype=tf.int32,
name='decoder_targets'
)
self.decoder_targets_length = tf.placeholder(
shape=(None,),
dtype=tf.int32,
name='decoder_targets_length',
)
def _init_decoder_train_connectors(self):
"""
During training, `decoder_targets`
and decoder logits. This means that their shapes should be compatible.
Here we do a bit of plumbing to set this up.
"""
with tf.name_scope('DecoderTrainFeeds'):
sequence_size, batch_size = tf.unstack(tf.shape(self.decoder_targets))
EOS_SLICE = tf.ones([1, batch_size], dtype=tf.int32) * self.EOS
PAD_SLICE = tf.ones([1, batch_size], dtype=tf.int32) * self.PAD
self.decoder_train_inputs = tf.concat([EOS_SLICE, self.decoder_targets], axis=0)
self.decoder_train_length = self.decoder_targets_length + 1
decoder_train_targets = tf.concat([self.decoder_targets, PAD_SLICE], axis=0)
decoder_train_targets_seq_len, _ = tf.unstack(tf.shape(decoder_train_targets))
decoder_train_targets_eos_mask = tf.one_hot(self.decoder_train_length - 1,
decoder_train_targets_seq_len,
on_value=self.EOS, off_value=self.PAD,
dtype=tf.int32)
decoder_train_targets_eos_mask = tf.transpose(decoder_train_targets_eos_mask, [1, 0])
# hacky way using one_hot to put EOS symbol at the end of target sequence
decoder_train_targets = tf.add(decoder_train_targets,
decoder_train_targets_eos_mask)
self.decoder_train_targets = decoder_train_targets
self.loss_weights = tf.ones([
batch_size,
tf.reduce_max(self.decoder_train_length)
], dtype=tf.float32, name="loss_weights")
def _init_embeddings(self):
with tf.variable_scope("embedding") as scope:
# Uniform(-sqrt(3), sqrt(3)) has variance=1.
sqrt3 = math.sqrt(3)
initializer = tf.random_uniform_initializer(-sqrt3, sqrt3)
self.embedding_matrix = tf.get_variable(
name="embedding_matrix",
shape=[self.vocab_size, self.embedding_size],
initializer=initializer,
dtype=tf.float32)
self.encoder_inputs_embedded = tf.nn.embedding_lookup(
self.embedding_matrix, self.encoder_inputs)
self.decoder_train_inputs_embedded = tf.nn.embedding_lookup(
self.embedding_matrix, self.decoder_train_inputs)
def _init_simple_encoder(self):
with tf.variable_scope("Encoder") as scope:
(self.encoder_outputs, self.encoder_state) = (
tf.nn.dynamic_rnn(cell=self.encoder_cell,
inputs=self.encoder_inputs_embedded,
sequence_length=self.encoder_inputs_length,
time_major=True,
dtype=tf.float32)
)
def _init_bidirectional_encoder(self):
with tf.variable_scope("BidirectionalEncoder") as scope:
((encoder_fw_outputs,
encoder_bw_outputs),
(encoder_fw_state,
encoder_bw_state)) = (
tf.nn.bidirectional_dynamic_rnn(cell_fw=self.encoder_cell,
cell_bw=self.encoder_cell,
inputs=self.encoder_inputs_embedded,
sequence_length=self.encoder_inputs_length,
time_major=True,
dtype=tf.float32)
)
self.encoder_outputs = tf.concat((encoder_fw_outputs, encoder_bw_outputs), 2)
if isinstance(encoder_fw_state, LSTMStateTuple):
encoder_state_c = tf.concat(
(encoder_fw_state.c, encoder_bw_state.c), 1, name='bidirectional_concat_c')
encoder_state_h = tf.concat(
(encoder_fw_state.h, encoder_bw_state.h), 1, name='bidirectional_concat_h')
self.encoder_state = LSTMStateTuple(c=encoder_state_c, h=encoder_state_h)
elif isinstance(encoder_fw_state, tf.Tensor):
self.encoder_state = tf.concat((encoder_fw_state, encoder_bw_state), 1, name='bidirectional_concat')
def _init_decoder(self):
with tf.variable_scope("Decoder") as scope:
def output_fn(outputs):
return tf.contrib.layers.linear(outputs, self.vocab_size, scope=scope)
if not self.attention:
decoder_fn_train = seq2seq.simple_decoder_fn_train(encoder_state=self.encoder_state)
decoder_fn_inference = seq2seq.simple_decoder_fn_inference(
output_fn=output_fn,
encoder_state=self.encoder_state,
embeddings=self.embedding_matrix,
start_of_sequence_id=self.EOS,
end_of_sequence_id=self.EOS,
maximum_length=tf.reduce_max(self.encoder_inputs_length) + 3,
num_decoder_symbols=self.vocab_size,
)
else:
# attention_states: size [batch_size, max_time, num_units]
attention_states = tf.transpose(self.encoder_outputs, [1, 0, 2])
(attention_keys,
attention_values,
attention_score_fn,
attention_construct_fn) = seq2seq.prepare_attention(
attention_states=attention_states,
attention_option="bahdanau",
num_units=self.decoder_hidden_units,
)
decoder_fn_train = seq2seq.attention_decoder_fn_train(
encoder_state=self.encoder_state,
attention_keys=attention_keys,
attention_values=attention_values,
attention_score_fn=attention_score_fn,
attention_construct_fn=attention_construct_fn,
name='attention_decoder'
)
decoder_fn_inference = seq2seq.attention_decoder_fn_inference(
output_fn=output_fn,
encoder_state=self.encoder_state,
attention_keys=attention_keys,
attention_values=attention_values,
attention_score_fn=attention_score_fn,
attention_construct_fn=attention_construct_fn,
embeddings=self.embedding_matrix,
start_of_sequence_id=self.EOS,
end_of_sequence_id=self.EOS,
maximum_length=tf.reduce_max(self.encoder_inputs_length) + 3,
num_decoder_symbols=self.vocab_size,
)
(self.decoder_outputs_train,
self.decoder_state_train,
self.decoder_context_state_train) = (
seq2seq.dynamic_rnn_decoder(
cell=self.decoder_cell,
decoder_fn=decoder_fn_train,
inputs=self.decoder_train_inputs_embedded,
sequence_length=self.decoder_train_length,
time_major=True,
scope=scope,
)
)
self.decoder_logits_train = output_fn(self.decoder_outputs_train)
self.decoder_prediction_train = tf.argmax(self.decoder_logits_train, axis=-1, name='decoder_prediction_train')
scope.reuse_variables()
(self.decoder_logits_inference,
self.decoder_state_inference,
self.decoder_context_state_inference) = (
seq2seq.dynamic_rnn_decoder(
cell=self.decoder_cell,
decoder_fn=decoder_fn_inference,
time_major=True,
scope=scope,
)
)
self.decoder_prediction_inference = tf.argmax(self.decoder_logits_inference, axis=-1, name='decoder_prediction_inference')
def _init_optimizer(self):
logits = tf.transpose(self.decoder_logits_train, [1, 0, 2])
targets = tf.transpose(self.decoder_train_targets, [1, 0])
self.loss = seq2seq.sequence_loss(logits=logits, targets=targets,
weights=self.loss_weights)
self.train_op = tf.train.AdamOptimizer().minimize(self.loss)
def make_train_inputs(self, input_seq, target_seq):
inputs_, inputs_length_ = helpers.batch(input_seq)
targets_, targets_length_ = helpers.batch(target_seq)
return {
self.encoder_inputs: inputs_,
self.encoder_inputs_length: inputs_length_,
self.decoder_targets: targets_,
self.decoder_targets_length: targets_length_,
}
def make_inference_inputs(self, input_seq):
inputs_, inputs_length_ = helpers.batch(input_seq)
return {
self.encoder_inputs: inputs_,
self.encoder_inputs_length: inputs_length_,
}
def make_seq2seq_model(**kwargs):
args = dict(encoder_cell=LSTMCell(10),
decoder_cell=LSTMCell(20),
vocab_size=10,
embedding_size=10,
attention=True,
bidirectional=True,
debug=False)
args.update(kwargs)
return Seq2SeqModel(**args)
def train_on_copy_task(session, model,
length_from=3, length_to=8,
vocab_lower=2, vocab_upper=10,
batch_size=100,
max_batches=5000,
batches_in_epoch=1000,
verbose=True):
batches = helpers.random_sequences(length_from=length_from, length_to=length_to,
vocab_lower=vocab_lower, vocab_upper=vocab_upper,
batch_size=batch_size)
loss_track = []
try:
for batch in range(max_batches+1):
batch_data = next(batches)
fd = model.make_train_inputs(batch_data, batch_data)
_, l = session.run([model.train_op, model.loss], fd)
loss_track.append(l)
if verbose:
if batch == 0 or batch % batches_in_epoch == 0:
print('batch {}'.format(batch))
print(' minibatch loss: {}'.format(session.run(model.loss, fd)))
for i, (e_in, dt_pred) in enumerate(zip(
fd[model.encoder_inputs].T,
session.run(model.decoder_prediction_train, fd).T
)):
print(' sample {}:'.format(i + 1))
print(' enc input > {}'.format(e_in))
print(' dec train predicted > {}'.format(dt_pred))
if i >= 2:
break
print()
except KeyboardInterrupt:
print('training interrupted')
return loss_track
if __name__ == '__main__':
import sys
if 'fw-debug' in sys.argv:
tf.reset_default_graph()
with tf.Session() as session:
model = make_seq2seq_model(debug=True)
session.run(tf.global_variables_initializer())
session.run(model.decoder_prediction_train)
session.run(model.decoder_prediction_train)
elif 'fw-inf' in sys.argv:
tf.reset_default_graph()
with tf.Session() as session:
model = make_seq2seq_model()
session.run(tf.global_variables_initializer())
fd = model.make_inference_inputs([[5, 4, 6, 7], [6, 6]])
inf_out = session.run(model.decoder_prediction_inference, fd)
print(inf_out)
elif 'train' in sys.argv:
tracks = {}
tf.reset_default_graph()
with tf.Session() as session:
model = make_seq2seq_model(attention=True)
session.run(tf.global_variables_initializer())
loss_track_attention = train_on_copy_task(session, model)
tf.reset_default_graph()
with tf.Session() as session:
model = make_seq2seq_model(attention=False)
session.run(tf.global_variables_initializer())
loss_track_no_attention = train_on_copy_task(session, model)
import matplotlib.pyplot as plt
plt.plot(loss_track)
print('loss {:.4f} after {} examples (batch_size={})'.format(loss_track[-1], len(loss_track)*batch_size, batch_size))
else:
tf.reset_default_graph()
session = tf.InteractiveSession()
model = make_seq2seq_model(debug=False)
session.run(tf.global_variables_initializer())
fd = model.make_inference_inputs([[5, 4, 6, 7], [6, 6]])
inf_out = session.run(model.decoder_prediction_inference, fd)
|
|
# Copyright 2016 ASLP@NPU. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: npuichigo@gmail.com (zhangyuchao)
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import argparse
import numpy as np
import os
import sys
import time
import tensorflow as tf
from io_funcs.tf_datasets import SequenceDataset
from models.tf_model import TfModel
from utils.utils import pp, show_all_variables, write_binary_file, ProgressBar
# Basic model parameters as external flags.
FLAGS = None
def restore_from_ckpt(sess, saver):
ckpt = tf.train.get_checkpoint_state(os.path.join(FLAGS.save_dir, "nnet"))
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
return True
else:
tf.logging.fatal("checkpoint not found")
return False
def train_one_epoch(sess, summary_writer, merged, global_step,
train_step, train_loss, train_num_batches):
if FLAGS.show:
bar = ProgressBar('Training', max=train_num_batches)
tr_loss = num_batches = 0
while True:
if FLAGS.show: bar.next()
try:
if num_batches % 50 == 49:
_, loss, summary, step = sess.run([train_step, train_loss,
merged, global_step])
summary_writer.add_summary(summary, step)
else:
_, loss = sess.run([train_step, train_loss])
tr_loss += loss
num_batches += 1
except tf.errors.OutOfRangeError:
break
if FLAGS.show: bar.finish()
tr_loss /= float(num_batches)
return tr_loss
def eval_one_epoch(sess, valid_loss, valid_num_batches):
if FLAGS.show:
bar = ProgressBar('Validation', max=valid_num_batches)
val_loss = num_batches = 0
while True:
if FLAGS.show: bar.next()
try:
loss = sess.run(valid_loss)
val_loss += loss
num_batches += 1
except tf.errors.OutOfRangeError:
break
if FLAGS.show: bar.finish()
val_loss /= float(num_batches)
return val_loss
def train():
"""Run the training of the acoustic or duration model."""
dataset_train = SequenceDataset(
subset="train",
config_dir=FLAGS.config_dir,
data_dir=FLAGS.data_dir,
batch_size=FLAGS.batch_size,
input_size=FLAGS.input_dim,
output_size=FLAGS.output_dim,
num_threads=FLAGS.num_threads,
use_bucket=True,
infer=False,
name="dataset_train")()
dataset_valid = SequenceDataset(
subset="valid",
config_dir=FLAGS.config_dir,
data_dir=FLAGS.data_dir,
batch_size=FLAGS.batch_size,
input_size=FLAGS.input_dim,
output_size=FLAGS.output_dim,
num_threads=FLAGS.num_threads,
use_bucket=True,
infer=False,
name="dataset_valid")()
model = TfModel(
rnn_cell=FLAGS.rnn_cell,
dnn_depth=FLAGS.dnn_depth,
dnn_num_hidden=FLAGS.dnn_num_hidden,
rnn_depth=FLAGS.rnn_depth,
rnn_num_hidden=FLAGS.rnn_num_hidden,
output_size=FLAGS.output_dim,
bidirectional=FLAGS.bidirectional,
rnn_output=FLAGS.rnn_output,
cnn_output=FLAGS.cnn_output,
look_ahead=FLAGS.look_ahead,
mdn_output=FLAGS.mdn_output,
mix_num=FLAGS.mix_num,
name="tf_model")
# Build a reinitializable iterator for both dataset_train and dataset_valid.
iterator = tf.data.Iterator.from_structure(
dataset_train.batched_dataset.output_types,
dataset_train.batched_dataset.output_shapes)
(input_sequence, input_sequence_length,
target_sequence, target_sequence_length) = iterator.get_next()
training_init_op = iterator.make_initializer(dataset_train.batched_dataset)
validation_init_op = iterator.make_initializer(dataset_valid.batched_dataset)
# Build the model and get the loss.
output_sequence_logits, train_final_state = model(
input_sequence, input_sequence_length)
loss = model.loss(
output_sequence_logits, target_sequence, target_sequence_length)
tf.summary.scalar("loss", loss)
learning_rate = tf.get_variable(
"learning_rate",
shape=[],
dtype=tf.float32,
initializer=tf.constant_initializer(FLAGS.learning_rate),
trainable=False)
reduce_learning_rate = learning_rate.assign(
learning_rate * FLAGS.reduce_learning_rate_multiplier)
global_step = tf.get_variable(
name="global_step",
shape=[],
dtype=tf.int64,
initializer=tf.zeros_initializer(),
trainable=False,
collections=[tf.GraphKeys.GLOBAL_VARIABLES, tf.GraphKeys.GLOBAL_STEP])
# Set up optimizer with global norm clipping.
trainable_variables = tf.trainable_variables()
optimizer = tf.train.AdamOptimizer(learning_rate)
grads, _ = tf.clip_by_global_norm(
tf.gradients(loss, trainable_variables),
FLAGS.max_grad_norm)
train_step = optimizer.apply_gradients(
zip(grads, trainable_variables),
global_step=global_step)
show_all_variables()
merged_all = tf.summary.merge_all()
saver = tf.train.Saver(max_to_keep=FLAGS.max_epochs)
# Train
config = tf.ConfigProto()
# Prevent exhausting all the gpu memories
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
# Run init
sess.run(tf.global_variables_initializer())
summary_writer = tf.summary.FileWriter(
os.path.join(FLAGS.save_dir, "nnet"), sess.graph)
if FLAGS.resume_training:
restore_from_ckpt(sess, saver)
# add a blank line for log readability
print()
sys.stdout.flush()
sess.run(validation_init_op)
loss_prev = eval_one_epoch(sess, loss, dataset_valid.num_batches)
tf.logging.info("CROSSVAL PRERUN AVG.LOSS %.4f\n" % loss_prev)
for epoch in range(FLAGS.max_epochs):
# Train one epoch
time_start = time.time()
sess.run(training_init_op)
tr_loss = train_one_epoch(sess, summary_writer, merged_all, global_step,
train_step, loss, dataset_train.num_batches)
time_end = time.time()
used_time = time_end - time_start
# Validate one epoch
sess.run(validation_init_op)
val_loss = eval_one_epoch(sess, loss, dataset_valid.num_batches)
# Determine checkpoint path
FLAGS.learning_rate = sess.run(learning_rate)
cptk_name = 'nnet_epoch%d_lrate%g_tr%.4f_cv%.4f' % (
epoch + 1, FLAGS.learning_rate, tr_loss, val_loss)
checkpoint_path = os.path.join(FLAGS.save_dir, "nnet", cptk_name)
# accept or reject new parameters
if val_loss < loss_prev:
saver.save(sess, checkpoint_path)
# logging training loss along with validation loss
tf.logging.info(
"EPOCH %d: TRAIN AVG.LOSS %.4f, (lrate%g) "
"CROSSVAL AVG.LOSS %.4f, TIME USED %.2f, %s" % (
epoch + 1, tr_loss, FLAGS.learning_rate, val_loss,
used_time, "nnet accepted"))
loss_prev = val_loss
else:
tf.logging.info(
"EPOCH %d: TRAIN AVG.LOSS %.4f, (lrate%g) "
"CROSSVAL AVG.LOSS %.4f, TIME USED %.2f, %s" % (
epoch + 1, tr_loss, FLAGS.learning_rate, val_loss,
used_time, "nnet rejected"))
restore_from_ckpt(sess, saver)
# Reducing learning rate.
sess.run(reduce_learning_rate)
# add a blank line for log readability
print()
sys.stdout.flush()
def decode():
"""Run the decoding of the acoustic or duration model."""
with tf.device('/cpu:0'):
dataset_test = SequenceDataset(
subset="test",
config_dir=FLAGS.config_dir,
data_dir=FLAGS.data_dir,
batch_size=1,
input_size=FLAGS.input_dim,
output_size=FLAGS.output_dim,
infer=True,
name="dataset_test")()
model = TfModel(
rnn_cell=FLAGS.rnn_cell,
dnn_depth=FLAGS.dnn_depth,
dnn_num_hidden=FLAGS.dnn_num_hidden,
rnn_depth=FLAGS.rnn_depth,
rnn_num_hidden=FLAGS.rnn_num_hidden,
output_size=FLAGS.output_dim,
bidirectional=FLAGS.bidirectional,
rnn_output=FLAGS.rnn_output,
cnn_output=FLAGS.cnn_output,
look_ahead=FLAGS.look_ahead,
mdn_output=FLAGS.mdn_output,
mix_num=FLAGS.mix_num,
name="tf_model")
# Build the testing model and get test output sequence.
test_iterator = dataset_test.batched_dataset.make_one_shot_iterator()
input_sequence, input_sequence_length = test_iterator.get_next()
test_output_sequence_logits, test_final_state = model(
input_sequence, input_sequence_length)
show_all_variables()
saver = tf.train.Saver()
# Decode.
with tf.Session() as sess:
# Run init
sess.run(tf.global_variables_initializer())
if not restore_from_ckpt(sess, saver): sys.exit(-1)
# Read cmvn to do reverse mean variance normalization
cmvn = np.load(os.path.join(FLAGS.data_dir, "train_cmvn.npz"))
num_batches = 0
used_time_sum = frames_sum = 0.0
while True:
try:
time_start = time.time()
logits = sess.run(test_output_sequence_logits)
time_end = time.time()
used_time = time_end - time_start
used_time_sum += used_time
frame_num = logits.shape[1]
frames_sum += frame_num
# Squeeze batch dimension.
logits = logits.squeeze(axis=0)
if FLAGS.mdn_output:
out_pi = logits[:, : FLAGS.mix_num]
out_mu = logits[:, FLAGS.mix_num : (FLAGS.mix_num + FLAGS.mix_num * FLAGS.output_dim)]
out_sigma = logits[:, (FLAGS.mix_num + FLAGS.mix_num * FLAGS.output_dim) :]
max_index_pi = out_pi.argmax(axis=1)
result_mu = []
for i in xrange(out_mu.shape[0]):
beg_index = max_index_pi[i] * FLAGS.output_dim
end_index = (max_index_pi[i] + 1) * FLAGS.output_dim
result_mu.append(out_mu[i, beg_index:end_index])
logits = np.vstack(result_mu)
sequence = logits * cmvn["stddev_labels"] + cmvn["mean_labels"]
out_dir_name = os.path.join(FLAGS.save_dir, "test", "cmp")
out_file_name =os.path.basename(
dataset_test.tfrecords_lst[num_batches]).split('.')[0] + ".cmp"
out_path = os.path.join(out_dir_name, out_file_name)
write_binary_file(sequence, out_path, with_dim=False)
#np.savetxt(out_path, sequence, fmt="%f")
tf.logging.info(
"writing inferred cmp to %s (%d frames in %.4f seconds)" % (
out_path, frame_num, used_time))
num_batches += 1
except tf.errors.OutOfRangeError:
break
tf.logging.info("Done decoding -- epoch limit reached (%d "
"frames per second)" % int(frames_sum / used_time_sum))
def main(_):
"""Training or decoding according to FLAGS."""
if FLAGS.decode != True:
train()
else:
decode()
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
def _str_to_bool(s):
"""Convert string to bool (in argparse context)."""
if s.lower() not in ['true', 'false']:
raise ValueError('Argument needs to be a '
'boolean, got {}'.format(s))
return {'true': True, 'false': False}[s.lower()]
parser = argparse.ArgumentParser()
parser.add_argument(
'--decode',
default=False,
help="Flag indicating decoding or training.",
action="store_true"
)
parser.add_argument(
'--resume_training',
default=False,
help="Flag indicating whether to resume training from cptk.",
action="store_true"
)
parser.add_argument(
'--input_dim',
type=int,
default=145,
help='The dimension of inputs.'
)
parser.add_argument(
'--output_dim',
type=int,
default=75,
help='The dimension of outputs.'
)
parser.add_argument(
'--rnn_cell',
type=str,
default='fused_lstm',
help='Rnn cell types including rnn, gru and lstm.'
)
parser.add_argument(
'--bidirectional',
type=_str_to_bool,
default=False,
help='Whether to use bidirectional layers.'
)
parser.add_argument(
'--dnn_depth',
type=int,
default=2,
help='Number of layers of dnn model.'
)
parser.add_argument(
'--rnn_depth',
type=int,
default=3,
help='Number of layers of rnn model.'
)
parser.add_argument(
'--dnn_num_hidden',
type=int,
default=128,
help='Number of hidden units to use.'
)
parser.add_argument(
'--rnn_num_hidden',
type=int,
default=64,
help='Number of hidden units to use.'
)
parser.add_argument(
'--max_grad_norm',
type=float,
default=5.0,
help='The max gradient normalization.'
)
parser.add_argument(
'--rnn_output',
type=_str_to_bool,
default=False,
help='Whether to use rnn as the output layer.'
)
parser.add_argument(
'--cnn_output',
type=_str_to_bool,
default=False,
help='Whether to use cnn as the output layer.'
)
parser.add_argument(
'--look_ahead',
type=int,
default=5,
help='Number of steps to look ahead in cnn output layer.',
)
parser.add_argument(
'--mdn_output',
type=_str_to_bool,
default=False,
help='Whether to use mdn as the output layer.'
)
parser.add_argument(
'--mix_num',
type=int,
default=1,
help='Number of gaussian mixes in mdn output layer.',
)
parser.add_argument(
'--batch_size',
type=int,
default=32,
help='Mini-batch size.'
)
parser.add_argument(
'--learning_rate',
type=float,
default=0.001,
help='Initial learning rate.'
)
parser.add_argument(
'--max_epochs',
type=int,
default=30,
help='Max number of epochs to run trainer totally.',
)
parser.add_argument(
'--reduce_learning_rate_multiplier',
type=float,
default=0.5,
help='Factor for reducing learning rate.'
)
parser.add_argument(
'--num_threads',
type=int,
default=8,
help='The num of threads to read tfrecords files.'
)
parser.add_argument(
'--save_dir',
type=str,
default='exp/acoustic/',
help='Directory to put the training result.'
)
parser.add_argument(
'--data_dir',
type=str,
default='data/',
help='Directory of train, val and test data.'
)
parser.add_argument(
'--config_dir',
type=str,
default='config/',
help='Directory to load train, val and test lists.'
)
parser.add_argument(
'--show',
type=_str_to_bool,
default=True,
help='Whether to use progress bar.'
)
FLAGS, unparsed = parser.parse_known_args()
pp.pprint(FLAGS.__dict__)
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
|
""" Echidna's parameter module.
Contains :class:`Parameter` and all classes that inherit from it.
"""
import numpy
import abc
import logging
import warnings
from echidna.core import scale, shift, smear
class Parameter(object):
""" The base class for creating parameter classes.
Args:
type_name (string): The type of the parameter.
name (str): The name of this parameter
low (float): The lower limit to float the parameter from
high (float): The higher limit to float the parameter from
bins (int): The number of steps between low and high values
Attributes:
_type (string): The type of the parameter.
_name (str): The name of this parameter
_low (float): The lower limit to float the parameter from
_high (float): The higher limit to float the parameter from
_bins (int): The number of steps between low and high values
_min (float): Minimum bound for values - (-inf) by default
_max (float): Maximum bound for values - (inf) by default
"""
def __init__(self, type_name, name, low, high, bins):
""" Initialise config class
"""
self._type = type_name
self._name = name
self._low = float(low)
self._high = float(high)
self._bins = int(bins)
self._min = -numpy.inf
self._max = numpy.inf
def get_bins(self):
""" Get the number of bins.
Returns:
int: Number of bins for this parameter.
"""
return self._bins
def get_bin(self, value):
""" Get bin for value of parameter.
Args:
value (float): Value of parameter.
Returns:
int: Bin index.
"""
return int((value - self._low) / (self._high - self._low) * self._bins)
def get_high(self):
""" Get the high value of the parameter
Returns:
float: The high value of the parameter.
"""
return self._high
def get_low(self):
""" Get the low value of the parameter.
Returns:
float: The low value the parameter.
"""
return self._low
def get_name(self):
""" Get the name of the parameter.
Returns:
float: The name of the parameter.
"""
return self._name
def get_type(self):
""" Get the type of the parameter.
Returns:
float: The type of the parameter.
"""
return self._type
def get_width(self):
"""Get the width of the binning for the parameter
Returns:
float: Bin width.
"""
return (self._high - self._low) / float(self._bins)
def to_dict(self, basic=False):
""" Represent the properties of the parameter in a dictionary.
.. note:: The attributes :attr:`_name`, :attr:`_type` are never
included in the dictionary. This is because it is expected
that the dictionary returned here will usually be used as
part of a larger dictionary where type and/or parameter_name
are keys.
Returns:
dict: Representation of the parameter in the form of a
dictionary.
"""
parameter_dict = {}
parameter_dict["low"] = self._low
parameter_dict["high"] = self._high
parameter_dict["bins"] = self._bins
return parameter_dict
class FitParameter(Parameter):
"""Simple data container that holds information for a fit parameter
(i.e. a systematic to float).
.. warning:: The sigma value can be explicitly set as None. This
is so that you disable a penalty term for a floating parameter.
If a parameter is being floated, but sigma is None, then no
penalty term will be added for the parameter.
.. note:: The :class:`FitParameter` class offers three different
scales for constructing the array of values for the parameter.
These are:
* **linear**: A standard linear scale is the default option. This
creates an array of equally spaced values, starting at
:obj:`low` and ending at :obj:`high` (*includive*). The array
will contain :obj:`bins` values.
* **logscale**: This creates an array of values that are equally
spaced in log-space, but increase exponentially in linear-space,
starting at :obj:`low` and ending at :obj:`high` (*includive*).
The array will contain :obj:`bins` values.
* **logscale_deviation**: This creates an array of values -
centred around the prior - whose absolute deviations from the
prior are equally spaced in log-space, but increase
exponentially in linear-space. The values start at :obj:`low`
and end at :obj:`high` (*includive*). The array will contain
:obj:`bins` values.
Args:
name (str): The name of this parameter
prior (float): The prior of the parameter
sigma (float): The sigma of the parameter
low (float): The lower limit to float the parameter from
high (float): The higher limit to float the parameter from
bins (int): The number of steps between low and high values
dimension (string, optional): The spectral dimension to which the
fit parameter applies.
values (:class:`numpy.array`, optional): Array of parameter
values to test in fit.
best_fit (float, optional): Best-fit value calculated by fit.
penalty_term (float, optional): Penalty term value at best fit.
logscale (bool, optional): Flag to create an logscale array of
values, rather than a linear array.
base (float, optional): Base to use when creating an logscale
array. Default is base-e.
logscale_deviation (bool, optional): Flag to create a logscale deviation
array of values rather than a linear or logscale array.
Attributes:
_prior (float): The prior of the parameter
_sigma (float): The sigma of the parameter
_dimension (string): The spectral dimension to which the fit
parameter applies.
_values (:class:`numpy.array`): Array of parameter values to
test in fit.
_best_fit (float): Best-fit value calculated by fit.
_penalty_term (float): Penalty term value at best fit.
_logscale (bool): Flag to create an logscale array of values,
rather than a linear array.
_base (float): Base to use when creating an logscale array.
Default is base-e
_logscale_deviation (bool): Flag to create a logscale deviation
array of values rather than a linear or logscale array.
_bin_boundaries (:class:`numpy.array`): Array of bin boundaries
corresponding to :attr:`_values`.
"""
def __init__(self, name, prior, sigma, low, high, bins, dimension=None,
values=None, current_value=None, penalty_term=None,
best_fit=None, logscale=None, base=numpy.e,
logscale_deviation=None):
"""Initialise FitParameter class
"""
super(FitParameter, self).__init__("fit", name, low, high, bins)
self._logger = logging.getLogger("FitParameter")
self._prior = float(prior)
if sigma is None:
self._logger.warning(
"Setting sigma explicitly as None for %s - "
"No penalty term will be added for this parameter!" % name)
self._sigma = sigma
self._dimension = dimension
self._values = values
self._current_value = current_value
self._best_fit = best_fit
self._penalty_term = penalty_term
self._logscale = None
self._base = None
self._logscale_deviation = None
self._bin_boundaries = None
if logscale:
self._logger.info("Setting logscale %s for parameter %s" %
(logscale, name))
logging.getLogger("extra").info(" --> with base: %.4g" % base)
if logscale_deviation is not None:
self._logger.warning("Recieved logscale_deviation flag that "
"will not have any effect")
self._logscale = logscale
self._base = base
elif logscale_deviation:
self._logger.info("Setting logscale_deviation %s for parameter %s"
% (logscale_deviation, name))
self._logscale_deviation = logscale_deviation
@abc.abstractmethod
def apply_to(self, spectrum):
""" Applies current value of fit parameter to spectrum.
Args:
spectrum (:class:`Spectra`): Spectrum to which current value
of parameter should be applied.
Returns:
(:class:`Spectra`): Modified spectrum.
Raises:
ValueError: If :attr:`_current_value` is not set.
"""
pass
def check_values(self):
""" Check that no values/bin_boundaries fall outside the min
and max bounds for the paramaeter type. Check that the prior is
in the values.
Warnings:
UserWarning: If any values fall outside the boundaries for
the parameter type - defined by :attr:`_min` and
:attr:`_max`.
UserWarning: If :attr:`_prior` is not present in the
:attr:`_values` array.
"""
values = self.get_values()
# Check for bad values - fall outside of allowed range for parameter
bad_indices = numpy.where((values < self._min) |
(values > self._max))[0]
if len(bad_indices) != 0:
# Bad values present
warnings.warn(
"%d values fall outside bounds (%.4g, %.4g), for parameter %s"
% (len(bad_indices), self._min, self._max, self._name))
bad_values = numpy.take(values, bad_indices)
self._logger.debug("The bad values are:")
logging.getLogger("extra").debug("\n\n%s\n" % str(bad_values))
# Check prior contained in values
if not numpy.any(numpy.around(values / self._prior, 12) ==
numpy.around(1., 12)):
warnings.warn("Prior not in values array "
"for parameter %s" % self._name)
logging.getLogger("extra").warning(
"\n\nUsers are strongly advised to include the value of the "
"in all FitParameter value arrays as failure to do this "
"could result in unusual Fit results.\n\n This can be "
"achieved with an odd number of bins and low and high values "
"symmetric about prior. Some scale types - logscale_deviation "
"build the array of values around the prior, so it is "
"included by definition.\n")
log_text = "Values: %s\n" % str(values)
log_text += "Prior: %.4g\n" % self._prior
logging.getLogger("extra").debug("\n\n%s" % log_text)
def get_best_fit(self):
"""
Returns:
float: Best fit value of parameter - stored in
:attr:`_best_fit`.
Raises:
ValueError: If the value of :attr:`_best_fit` has not yet
been set.
"""
if self._best_fit is None:
raise ValueError("Best fit value for parameter" +
self._name + " has not been set")
return self._best_fit
def get_bin(self, value):
""" Get bin for value of parameter.
Args:
value (float): Value of parameter.
Returns:
int: Bin index.
"""
try:
return numpy.where(numpy.isclose(self.get_values(), value))[0][0]
except:
return int((value - self._low) / (self._high - self._low) *
self._bins)
def get_bin_boundaries(self):
""" ***PENDING DEPRECATION***
Returns an array of bin boundaries, based on the :attr:`_low`,
:attr:`_high` and :attr:`_bins` parameters, and any flags
(:attr:`_logscale` or :attr:`_logscale_deviation`) that have
been applied.
Returns:
(:class:`numpy.array`): Array of bin_baoundaries for the
parameter values stored in :attr:`_values`.
Warnings:
PendingDeprecationWarning: This method will be deprecated
soon. Bin boundaries shouldn't be required here as we are
referring to points on a grid, not bins.
"""
warnings.warn(PendingDeprecationWarning(
"Bin boundaries shouldn't be required here "
"as we are referring to points on a grid, not bins"))
if self._bin_boundaries is None: # Generate array of bin boundaries
if self._logscale:
if self._low <= 0.: # set low = -log(high)
low = -numpy.log(self._high)
logging.warning("Correcting fit parameter value <= 0.0")
logging.debug(" --> changed to %.4g (previously %.4g)" %
(numpy.exp(low), self._low))
else:
low = numpy.log(self._low)
high = numpy.log(self._high)
width = (numpy.log(high) - numpy.log(low)) / int(self._bins)
self._bin_boundaries = numpy.logspace(
low - 0.5*width, high + 0.5*width,
num=self._bins+1, base=numpy.e)
elif self._logscale_deviation:
delta = self._high - self._prior
width = numpy.log(delta + 1.) / int(self._bins / 2)
deltas = numpy.linspace(
0.5 * width, numpy.log(delta + 1.) + 0.5*width,
num=int((self._bins + 1) / 2))
pos = self._prior + numpy.exp(deltas) - 1.
neg = self._prior - numpy.exp(deltas[::-1]) + 1.
self._bin_boundaries = numpy.append(neg, pos)
else:
width = self.get_width()
self._bin_boundaries = numpy.linspace(self._low + 0.5*width,
self._high + 0.5*width,
self._bins + 1)
return self._bin_boundaries
def get_current_value(self):
"""
Returns:
float: Current value of fit parameter - stored in
:attr:`_current_value`
"""
if self._current_value is None:
raise ValueError("Current value not yet set " +
"for parameter " + self._name)
return self._current_value
def get_dimension(self):
"""
Returns:
string: Dimension to which fit parameter is applied.
"""
return self._dimension
def get_penalty_term(self):
""" Gets the value of the penalty term at the best fit.
Returns:
float: Penalty term value of parameter at best fit - stored in
:attr:`_penalty_term`.
Raises:
ValueError: If the value of :attr:`_penalty_term` has not yet
been set.
"""
if self._penalty_term is None:
raise ValueError("Penalty term value for parameter" +
self._name + " has not been set")
return self._penalty_term
def get_prior(self):
"""
Returns:
float: Prior value of fit parameter - stored in
:attr:`_prior`
"""
return self._prior
def get_sigma(self):
"""
Returns:
float: Sigma of fit parameter - stored in :attr:`_sigma`
"""
return self._sigma
def get_values(self):
""" Returns an array of values, based on the :attr:`_low`,
:attr:`_high` and :attr:`_bins` parameters, and any flags
(:attr:`_logscale` or :attr:`_logscale_deviation`) that have
been applied.
.. warning:: Calling this method with the
:attr:`logscale_deviation` flag enabled, may alter the value
of :attr:`_low`, as this scale must be symmetric about the
prior.
Returns:
(:class:`numpy.array`): Array of parameter values to test in
fit. Stored in :attr:`_values`.
"""
if self._values is None: # Generate array of values
if self._logscale:
# Create an array that is equally spaced in log-space
self._logger.info("Creating logscale array of values "
"for parameter %s" % self._name)
if self._low <= 0.: # set low = -log(high)
low = -numpy.log(self._high)
logging.warning("Correcting fit parameter value <= 0.0")
logging.debug(" --> changed to %.4g (previously %.4g)" %
(numpy.exp(low), self._low))
else:
low = numpy.log(self._low)
high = numpy.log(self._high)
self._values = numpy.logspace(low, high, num=self._bins,
base=numpy.e)
elif self._logscale_deviation:
# Create an array where absolute deviations from the prior
# increase linearly in logspace. The array is therefore
# approximately symmetrical about the prior, but the positive
# and negative deviations are treated separatedly, so can have
# an extended range on either side.
self._logger.info("Creating logscale_deviation array of "
"values for parameter %s" % self._name)
# Calculate maximum deviation above and below prior
delta_low = numpy.absolute(self._low - self._prior)
delta_high = numpy.absolute(self._high - self._prior)
# Calculate bins above and below, distributing evenly
bins_low = numpy.rint(
(delta_low) / (delta_low + delta_high) * (self._bins + 1))
bins_high = numpy.rint(
(delta_high) / (delta_low + delta_high) * (self._bins + 1))
# Calculate arrays of deviation, linear in logspace
deltas_low = numpy.linspace(
0., numpy.log(delta_low + 1.), bins_low)
deltas_high = numpy.linspace(
0., numpy.log(delta_high + 1.), bins_high)
# Create positive and negative arrays of values
# [::-1] reverses array
# Prior is included in low
low = self._prior - numpy.exp(deltas_low[::-1]) + 1.
# Prior not included in high
high = self._prior + numpy.exp(deltas_high[1:]) - 1.
self._values = numpy.append(low, high)
else: # Create a normal linear array
self._logger.info("Creating linear array of values "
"for parameter %s" % self._name)
self._values = numpy.linspace(
self._low, self._high, self._bins)
return self._values
def get_value_at(self, index):
""" Access the parameter value at a given index in the array.
Args:
index (int): Index of parameter value requested.
Returns:
float: Parameter value at the given index.
"""
return self.get_values()[index]
def get_value_index(self, value):
""" Get the index corresponding to a given parameter value.
Args:
value (float): Parameter value for which to get corresponding
index.
Returns:
int: Index of corresponding to the given parameter value.
.. warning:: If there are multiple occurences of ``value`` in
the array of parameter values, only the index of the first
occurence will be returned.
"""
indices = numpy.where(self.get_values() == value)[0]
if len(indices) == 0:
raise ValueError("No value %.2g found in parameter values " +
"for parameter %s." % (value, self._name))
return int(indices[0])
def set_best_fit(self, best_fit):
""" Set value for :attr:`_best_fit`.
Args:
best_fit (float): Best fit value for parameter
"""
self._best_fit = best_fit
def set_current_value(self, value):
""" Set value for :attr:`_current_value`.
Args:
value (float): Current value of fit parameter
"""
self._current_value = value
def set_par(self, **kwargs):
"""Set a fitting parameter's values after initialisation.
Args:
kwargs (dict): keyword arguments
.. note::
Keyword arguments include:
* prior (float): Value to set the prior to of the parameter
* sigma (float): Value to set the sigma to of the parameter
* low (float): Value to set the lower limit to of the parameter
* high (float): Value to set the higher limit to of the parameter
* bins (float): Value to set the size of the bins between low and
high of the parameter
* logscale (bool): Flag to create an logscale array of
values, rather than a linear array.
* base (float): Base to use when creating an logscale array.
Raises:
TypeError: Unknown variable type passed as a kwarg.
"""
for kw in kwargs:
self._logger.warning("Updating value for %s (%.4g --> %.4g)" %
(kw, self.__dict__["_"+kw], kwargs[kw]))
if kw == "prior":
self._prior = float(kwargs[kw])
elif kw == "sigma":
if kwargs[kw] is None:
self._logger.warning("Setting sigma explicitly as None - "
"No penalty term will be applied")
self._sigma = kwargs[kw]
elif kw == "low":
self._low = float(kwargs[kw])
elif kw == "high":
self._high = float(kwargs[kw])
elif kw == "bins":
self._bins = float(kwargs[kw])
elif kw == "logscale":
self._logscale = bool(kwargs[kw])
elif kw == "base":
self._base = float(kwargs[kw])
elif kw == "logscale_deviation":
self._logscale_deviation = bool(kwargs[kw])
elif kw == "dimension":
self._dimension = str(kwargs[kw])
else:
raise TypeError("Unhandled parameter name / type %s" % kw)
self._logger.warning("Setting _values and _bin_boundaries to None")
self._values = None
self._bin_boundaries = None
def set_penalty_term(self, penalty_term):
""" Set value for :attr:`_penalty_term`.
Args:
penalty_term (float): Value for penalty term of parameter at
best fit.
"""
self._penalty_term = penalty_term
def to_dict(self, basic=False):
""" Represent the properties of the parameter in a dictionary.
Args:
basic (bool, optional): If True, only the basic properties:
prior, sigma, low, high and bins are included.
.. note:: The attributes :attr:`_name`, :attr:`_dimension`,
:attr:`_values` and :attr:`_logger` are never included in
the dictionary. For the first two this is because it is
expected that the dictionary returned here will usually be
used as part of a larger dictionary where dimension and
parameter_name are keys. The :attr:`values` attribute is not
included because this is a lrge numpy array. The logger is
not included as this is for internal use only.
Returns:
dict: Representation of the parameter in the form of a
dictionary.
"""
parameter_dict = {}
# Add basic attributes
parameter_dict["prior"] = self._prior
parameter_dict["sigma"] = self._sigma
parameter_dict["low"] = self._low
parameter_dict["high"] = self._high
parameter_dict["bins"] = self._bins
parameter_dict["logscale"] = self._logscale
parameter_dict["base"] = self._base
parameter_dict["logscale_deviation"] = self._logscale_deviation
if basic:
return parameter_dict
# Add non-basic attributes
parameter_dict["current_value"] = self._current_value
parameter_dict["best_fit"] = self._best_fit
parameter_dict["penalty_term"] = self._best_fit
return parameter_dict
class RateParameter(FitParameter):
""" Data container that holds information for a rate parameter that
is included in the fit.
Args:
name (str): The name of this parameter
prior (float): The prior of the parameter
sigma (float): The sigma of the parameter
low (float): The lower limit to float the parameter from
high (float): The higher limit to float the parameter from
bins (int): The number of steps between low and high values
logscale (bool, optional): Flag to create an logscale array of
values, rather than a linear array.
base (float, optional): Base to use when creating an logscale array.
kwargs (dict): Other keyword arguments to pass to
:class:`FitParameter`
Attributes:
_logscale (bool): Flag to create an logscale array of values,
rather than a linear array.
_base (float): Base to use when creating an logscale array.
"""
def __init__(self, name, prior, sigma, low, high,
bins, logscale=None, base=numpy.e,
logscale_deviation=None, **kwargs):
super(RateParameter, self).__init__(
name, prior, sigma, low, high, bins, logscale=logscale,
base=base, logscale_deviation=logscale_deviation, **kwargs)
self._min = 0. # For rates
def apply_to(self, spectrum):
""" Scales spectrum to current value of rate parameter.
Args:
spectrum (:class:`Spectra`): Spectrum which should be scaled
to current rate value.
Returns:
(:class:`Spectra`): Scaled spectrum.
Raises:
ValueError: If :attr:`_current_value` is not set.
"""
if self._current_value is None:
raise ValueError("Current value of rate parameter %s "
"has not been set" % self._name)
spectrum.scale(self._current_value)
return spectrum
class ResolutionParameter(FitParameter):
""" Data container that holds information for a resulution parameter
that is included in the fit.
Args:
name (str): The name of this parameter
prior (float): The prior of the parameter
sigma (float): The sigma of the parameter
low (float): The lower limit to float the parameter from
high (float): The higher limit to float the parameter from
bins (int): The number of steps between low and high values
dimension (string): The spectral dimension to which the
resolution parameter applies.
kwargs (dict): Other keyword arguments to pass to
:class:`FitParameter`
"""
def __init__(self, name, prior, sigma, low,
high, bins, dimension, **kwargs):
super(ResolutionParameter, self).__init__(
name, prior, sigma, low, high, bins, dimension, **kwargs)
def apply_to(self, spectrum):
""" Smears spectrum to current value of resolution.
Args:
spectrum (:class:`Spectra`): Spectrum which should be smeared.
Returns:
(:class:`Spectra`): Smeared spectrum.
Raises:
ValueError: If :attr:`_current_value` is not set.
"""
if self._current_value is None:
raise ValueError("Current value of rate parameter %s "
"has not been set" % self._name)
cur_value = self._current_value
if cur_value > 10.:
smearer = smear.EnergySmearLY()
else:
smearer = smear.EnergySmearRes()
smearer.set_resolution(self._current_value)
spectrum = smearer.weighted_smear(spectrum, self._dimension)
return spectrum
def get_pre_convolved(self, directory, filename, added_dim=False):
""" Constructs the filename and directory from which a pre_convolved
spectrum can be loaded from.
.. note:: Before any calls to this function, the directory
and filename is should be of the form::
../dimension/syst/file_XXyy.hdf5
where dimension is the dimension you are applying systematics to e.g.
`energy_mc` and syst is the type of systematic e.g. `smear`.
XX and yy represent the syst value e.g. 200 and syst denoted type
e.g. ly for 200 NHits/MeV light yield.
For multiple dimensions and systematics then it is of the form::
../dimension/syst1/syst2/dimension/syst3/file_AAbb_CCdd_EEff.hdf5
where the order of directories and filename is the order in which
the systematics have been applied.
Args:
directory (string): Current or base directory containing
pre-convolved :class:`Spectra` object
filename (string): Current or base name of :class:`Spectra`
object
added_dim (bool, optional): If a dimension has just been added to the
directory then this flag is True.
Returns:
string: Directory containing pre-convolved :class:`Spectra`,
appended with name of this :class:`FitParameter`
string: Name of pre-convolved :class:`Spectra`, appended with
current value of this :class:`FitParameter`
Raises:
ValueError: If :attr:`_current_value` is not set.
"""
if self._current_value is None:
raise ValueError("Current value of fit parameter %s "
"has not been set" % self._name)
if directory[-1] != '/':
directory += '/'
if not added_dim:
dir_list = directory.split('/')
# Find last occurance of dimension and search from here
idx = dir_list[::-1].index(self._dimension)
if 'smear' not in dir_list[-idx:]:
directory += 'smear/'
else:
directory += 'smear/'
if self._current_value > 10.:
ext = 'ly'
else:
ext = 'rs'
value_string = str(self._current_value)
# Strip trailling zero in filename
value_string = value_string.rstrip('0').rstrip('.')
filename_list = filename.split('_')
temp_fname = ''
subbed = False
for x in filename_list:
if x[-2:] == ext:
x = value_string + ext
subbed = True
elif x[-7:] == ext + ".hdf5":
x = value_string + ext + ".hdf5"
subbed = True
if x[-5:] == ".hdf5":
temp_fname += x
else:
temp_fname += x + "_"
if not subbed:
temp_fname = temp_fname[:-5] + "_" + value_string + ext + ".hdf5"
return directory, temp_fname
class ScaleParameter(FitParameter):
""" Data container that holds information for a scale parameter
that is included in the fit.
Args:
name (str): The name of this parameter
prior (float): The prior of the parameter
sigma (float): The sigma of the parameter
low (float): The lower limit to float the parameter from
high (float): The higher limit to float the parameter from
bins (int): The number of steps between low and high values
dimension (string): The spectral dimension to which the scale
parameter applies.
kwargs (dict): Other keyword arguments to pass to
:class:`FitParameter`
"""
def __init__(self, name, prior, sigma, low,
high, bins, dimension, **kwargs):
super(ScaleParameter, self).__init__(
name, prior, sigma, low, high, bins, dimension, **kwargs)
def apply_to(self, spectrum):
""" Convolves spectrum with current value of scale parameter.
Args:
spectrum (:class:`Spectra`): Spectrum to be convolved.
Returns:
(:class:`Spectra`): Convolved spectrum.
Raises:
ValueError: If :attr:`_current_value` is not set.
"""
if self._current_value is None:
raise ValueError("Current value of scale parameter %s "
"has not been set" % self._name)
scaler = scale.Scale()
scaler.set_scale_factor(self._current_value)
return scaler.scale(spectrum, self._dimension)
def get_pre_convolved(self, directory, filename, added_dim=False):
""" Constructs the filename and directory from which a pre_convolved
spectrum can be loaded from.
.. note:: Before any calls to this function, the directory
and filename is should be of the form::
../dimension/syst/file_XXyy.hdf5
where dimension is the dimension you are applying systematics to e.g.
`energy_mc` and syst is the type of systematic e.g. `smear`.
XX and yy represent the syst value e.g. 200 and syst denoted type
e.g. ly for 200 NHits/MeV light yield.
For multiple dimensions and systematics then it is of the form::
../dimension/syst1/syst2/dimension/syst3/file_AAbb_CCdd_EEff.hdf5
where the order of directories and filename is the order in which
the systematics have been applied.
Args:
directory (string): Current or base directory containing
pre-convolved :class:`Spectra` object
filename (string): Current or base name of :class:`Spectra`
object
added_dim (bool, optional): If a dimension has just been added to the
directory then this flag is True.
Returns:
string: Directory containing pre-convolved :class:`Spectra`,
appended with name of this :class:`FitParameter`
string: Name of pre-convolved :class:`Spectra`, appended with
current value of this :class:`FitParameter`
Raises:
ValueError: If :attr:`_current_value` is not set.
"""
if self._current_value is None:
raise ValueError("Current value of fit parameter %s "
"has not been set" % self._name)
if directory[-1] != '/':
directory += '/'
if not added_dim:
dir_list = directory.split('/')
# Find last occurance of dimension and search from here
idx = dir_list[::-1].index(self._dimension)
if 'scale' not in dir_list[-idx:]:
directory += 'scale/'
else:
directory += 'scale/'
value_string = str(self._current_value)
# Strip trailling zero in filename
value_string = value_string.rstrip('0').rstrip('.')
filename_list = filename.split('_')
temp_fname = ''
subbed = False
for x in filename_list:
if x[-2:] == "sc":
x = value_string + "sc"
subbed = True
elif x[-7:] == "sc.hdf5":
x = value_string + "sc.hdf5"
subbed = True
if x[-5:] == ".hdf5":
temp_fname += x
else:
temp_fname += x + "_"
if not subbed:
temp_fname = temp_fname[:-5] + "_" + value_string + "sc.hdf5"
return directory, temp_fname
class ShiftParameter(FitParameter):
""" Data container that holds information for a shift parameter
that is included in the fit.
Args:
name (str): The name of this parameter
prior (float): The prior of the parameter
sigma (float): The sigma of the parameter
low (float): The lower limit to float the parameter from
high (float): The higher limit to float the parameter from
bins (int): The number of steps between low and high values
dimension (string): The spectral dimension to which the shift
parameter applies.
kwargs (dict): Other keyword arguments to pass to
:class:`FitParameter`
"""
def __init__(self, name, prior, sigma, low,
high, bins, dimension, **kwargs):
super(ShiftParameter, self).__init__(
name, prior, sigma, low, high, bins, dimension, **kwargs)
def apply_to(self, spectrum):
""" Convolves spectrum with current value of shift parameter.
Args:
spectrum (:class:`Spectra`): Spectrum to be convolved.
Returns:
(:class:`Spectra`): Convolved spectrum.
Raises:
ValueError: If :attr:`_current_value` is not set.
"""
if self._current_value is None:
raise ValueError("Current value of shift parameter %s "
"has not been set" % self._name)
shifter = shift.Shift()
shifter.set_shift(self._current_value)
return shifter.shift(spectrum, self._dimension)
def get_pre_convolved(self, directory, filename, added_dim=False):
""" Constructs the filename and directory from which a pre_convolved
spectrum can be loaded from.
.. note:: Before any calls to this function, the directory
and filename is should be of the form::
../dimension/syst/file_XXyy.hdf5
where dimension is the dimension you are applying systematics to e.g.
`energy_mc` and syst is the type of systematic e.g. `smear`.
XX and yy represent the syst value e.g. 200 and syst denoted type
e.g. ly for 200 NHits/MeV light yield.
For multiple dimensions and systematics then it is of the form::
../dimension/syst1/syst2/dimension/syst3/file_AAbb_CCdd_EEff.hdf5
where the order of directories and filename is the order in which
the systematics have been applied.
Args:
directory (string): Current or base directory containing
pre-convolved :class:`Spectra` object
filename (string): Current or base name of :class:`Spectra`
object
added_dim (bool, optional): If a dimension has just been added to the
directory then this flag is True.
Returns:
string: Directory containing pre-convolved :class:`Spectra`,
appended with name of this :class:`FitParameter`
string: Name of pre-convolved :class:`Spectra`, appended with
current value of this :class:`FitParameter`
Raises:
ValueError: If :attr:`_current_value` is not set.
"""
if self._current_value is None:
raise ValueError("Current value of fit parameter %s "
"has not been set" % self._name)
if directory[-1] != '/':
directory += '/'
if not added_dim:
dir_list = directory.split('/')
# Find last occurance of dimension and search from here
idx = dir_list[::-1].index(self._dimension)
if 'shift' not in dir_list[-idx:]:
directory += 'shift/'
else:
directory += 'shift/'
value_string = str(self._current_value)
# Strip trailling zero in filename
value_string = value_string.rstrip('0').rstrip('.')
filename_list = filename.split('_')
temp_fname = ''
subbed = False
for x in filename_list:
if x[-2:] == "sh":
x = value_string + "sh"
subbed = True
elif x[-7:] == "sh.hdf5":
x = value_string + "sh.hdf5"
subbed = True
if x[-5:] == ".hdf5":
temp_fname += x
else:
temp_fname += x + "_"
if not subbed:
temp_fname = temp_fname[:-5] + "_" + value_string + "sh.hdf5"
return directory, temp_fname
class SpectraParameter(Parameter):
"""Simple data container that holds information for a Spectra parameter
(i.e. axis of the spectrum).
Args:
name (str): The name of this parameter
low (float): The lower limit of this parameter
high (float): The upper limit of this parameter
bins (int): The number of bins for this parameter
"""
def __init__(self, name, low, high, bins):
"""Initialise SpectraParameter class
"""
super(SpectraParameter, self).__init__("spectra", name, low, high,
bins)
def get_bin(self, x):
""" Gets the bin index which contains value x.
Args:
x (float): Value you wish to find the bin index for.
Raises:
ValueError: If x is less than parameter lower bounds
ValueError: If x is more than parameter upper bounds
Returns:
int: Bin index
"""
if x < self._low:
raise ValueError("%s is below parameter lower bound %s"
% (x, self._low))
if x > self._high:
raise ValueError("%s is above parameter upper bound %s"
% (x, self._high))
return int((x - self._low) / self.get_width())
def get_bin_boundaries(self):
""" Returns the bin boundaries for the parameter
Returns:
:class:`numpy.ndarray`: Bin boundaries for the parameter.
"""
return numpy.linspace(self._low, self._high, self._bins+1)
def get_bin_centre(self, bin):
""" Calculates the central value of a given bin
Args:
bin (int): Bin number.
Raises:
TypeError: If bin is not int
ValueError: If bin is less than zero
ValueError: If bin is greater than the number of bins - 1
Returns:
float: value of bin centre
"""
if type(bin) != int and type(bin) != numpy.int64:
raise TypeError("Must pass an integer value")
if bin < 0:
raise ValueError("Bin number (%s) must be zero or positive" % bin)
if bin > self._bins - 1:
raise ValueError("Bin number (%s) is out of range. Max = %s"
% (bin, self._bins))
return self._low + (bin + 0.5)*self.get_width()
def get_bin_centres(self):
""" Returns the bin centres of the parameter
Returns:
:class:`numpy.ndarray`: Bin centres of parameter.
"""
return numpy.arange(self._low+self.get_width()*0.5,
self._high,
self.get_width())
def get_unit(self):
"""Get the default unit for a given parameter
Raises:
Exception: Unknown parameter.
Returns:
string: Unit of the parameter
"""
if self._name.split('_')[0] == "energy":
return "MeV"
if self._name.split('_')[0] == "radial":
return "mm"
def round(self, x):
""" Round the value to nearest bin edge
Args:
x (float): Value to round.
Returns:
float: The value of the closest bin edge to x
"""
return round(x/self.get_width())*self.get_width()
def set_par(self, **kwargs):
"""Set a limit / binning parameter after initialisation.
Args:
kwargs (dict): keyword arguments
.. note::
Keyword arguments include:
* low (float): Value to set the lower limit to of the parameter
* high (float): Value to set the higher limit to of the parameter
* bins (int): Value to set the number of bins of the parameter
Raises:
TypeError: Unknown variable type passed as a kwarg.
"""
for kw in kwargs:
if kw == "low":
self._low = float(kwargs[kw])
elif kw == "high":
self._high = float(kwargs[kw])
elif kw == "bins":
self._bins = int(kwargs[kw])
else:
raise TypeError("Unhandled parameter name / type %s" % kw)
|
|
"""Support for Xiaomi Philips Lights."""
import asyncio
import datetime
from datetime import timedelta
from functools import partial
import logging
from math import ceil
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_HS_COLOR,
ATTR_COLOR_TEMP,
ATTR_ENTITY_ID,
DOMAIN,
PLATFORM_SCHEMA,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
Light,
)
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_TOKEN
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.util import color, dt
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Xiaomi Philips Light"
DATA_KEY = "light.xiaomi_miio"
CONF_MODEL = "model"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): vol.All(cv.string, vol.Length(min=32, max=32)),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_MODEL): vol.In(
[
"philips.light.sread1",
"philips.light.ceiling",
"philips.light.zyceiling",
"philips.light.moonlight",
"philips.light.bulb",
"philips.light.candle",
"philips.light.candle2",
"philips.light.mono1",
"philips.light.downlight",
]
),
}
)
# The light does not accept cct values < 1
CCT_MIN = 1
CCT_MAX = 100
DELAYED_TURN_OFF_MAX_DEVIATION_SECONDS = 4
DELAYED_TURN_OFF_MAX_DEVIATION_MINUTES = 1
SUCCESS = ["ok"]
ATTR_MODEL = "model"
ATTR_SCENE = "scene"
ATTR_DELAYED_TURN_OFF = "delayed_turn_off"
ATTR_TIME_PERIOD = "time_period"
ATTR_NIGHT_LIGHT_MODE = "night_light_mode"
ATTR_AUTOMATIC_COLOR_TEMPERATURE = "automatic_color_temperature"
ATTR_REMINDER = "reminder"
ATTR_EYECARE_MODE = "eyecare_mode"
# Moonlight
ATTR_SLEEP_ASSISTANT = "sleep_assistant"
ATTR_SLEEP_OFF_TIME = "sleep_off_time"
ATTR_TOTAL_ASSISTANT_SLEEP_TIME = "total_assistant_sleep_time"
ATTR_BRAND_SLEEP = "brand_sleep"
ATTR_BRAND = "brand"
SERVICE_SET_SCENE = "xiaomi_miio_set_scene"
SERVICE_SET_DELAYED_TURN_OFF = "xiaomi_miio_set_delayed_turn_off"
SERVICE_REMINDER_ON = "xiaomi_miio_reminder_on"
SERVICE_REMINDER_OFF = "xiaomi_miio_reminder_off"
SERVICE_NIGHT_LIGHT_MODE_ON = "xiaomi_miio_night_light_mode_on"
SERVICE_NIGHT_LIGHT_MODE_OFF = "xiaomi_miio_night_light_mode_off"
SERVICE_EYECARE_MODE_ON = "xiaomi_miio_eyecare_mode_on"
SERVICE_EYECARE_MODE_OFF = "xiaomi_miio_eyecare_mode_off"
XIAOMI_MIIO_SERVICE_SCHEMA = vol.Schema({vol.Optional(ATTR_ENTITY_ID): cv.entity_ids})
SERVICE_SCHEMA_SET_SCENE = XIAOMI_MIIO_SERVICE_SCHEMA.extend(
{vol.Required(ATTR_SCENE): vol.All(vol.Coerce(int), vol.Clamp(min=1, max=6))}
)
SERVICE_SCHEMA_SET_DELAYED_TURN_OFF = XIAOMI_MIIO_SERVICE_SCHEMA.extend(
{vol.Required(ATTR_TIME_PERIOD): vol.All(cv.time_period, cv.positive_timedelta)}
)
SERVICE_TO_METHOD = {
SERVICE_SET_DELAYED_TURN_OFF: {
"method": "async_set_delayed_turn_off",
"schema": SERVICE_SCHEMA_SET_DELAYED_TURN_OFF,
},
SERVICE_SET_SCENE: {
"method": "async_set_scene",
"schema": SERVICE_SCHEMA_SET_SCENE,
},
SERVICE_REMINDER_ON: {"method": "async_reminder_on"},
SERVICE_REMINDER_OFF: {"method": "async_reminder_off"},
SERVICE_NIGHT_LIGHT_MODE_ON: {"method": "async_night_light_mode_on"},
SERVICE_NIGHT_LIGHT_MODE_OFF: {"method": "async_night_light_mode_off"},
SERVICE_EYECARE_MODE_ON: {"method": "async_eyecare_mode_on"},
SERVICE_EYECARE_MODE_OFF: {"method": "async_eyecare_mode_off"},
}
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the light from config."""
from miio import Device, DeviceException
if DATA_KEY not in hass.data:
hass.data[DATA_KEY] = {}
host = config.get(CONF_HOST)
name = config.get(CONF_NAME)
token = config.get(CONF_TOKEN)
model = config.get(CONF_MODEL)
_LOGGER.info("Initializing with host %s (token %s...)", host, token[:5])
devices = []
unique_id = None
if model is None:
try:
miio_device = Device(host, token)
device_info = miio_device.info()
model = device_info.model
unique_id = f"{model}-{device_info.mac_address}"
_LOGGER.info(
"%s %s %s detected",
model,
device_info.firmware_version,
device_info.hardware_version,
)
except DeviceException:
raise PlatformNotReady
if model == "philips.light.sread1":
from miio import PhilipsEyecare
light = PhilipsEyecare(host, token)
primary_device = XiaomiPhilipsEyecareLamp(name, light, model, unique_id)
devices.append(primary_device)
hass.data[DATA_KEY][host] = primary_device
secondary_device = XiaomiPhilipsEyecareLampAmbientLight(
name, light, model, unique_id
)
devices.append(secondary_device)
# The ambient light doesn't expose additional services.
# A hass.data[DATA_KEY] entry isn't needed.
elif model in ["philips.light.ceiling", "philips.light.zyceiling"]:
from miio import Ceil
light = Ceil(host, token)
device = XiaomiPhilipsCeilingLamp(name, light, model, unique_id)
devices.append(device)
hass.data[DATA_KEY][host] = device
elif model == "philips.light.moonlight":
from miio import PhilipsMoonlight
light = PhilipsMoonlight(host, token)
device = XiaomiPhilipsMoonlightLamp(name, light, model, unique_id)
devices.append(device)
hass.data[DATA_KEY][host] = device
elif model in [
"philips.light.bulb",
"philips.light.candle",
"philips.light.candle2",
"philips.light.downlight",
]:
from miio import PhilipsBulb
light = PhilipsBulb(host, token)
device = XiaomiPhilipsBulb(name, light, model, unique_id)
devices.append(device)
hass.data[DATA_KEY][host] = device
elif model == "philips.light.mono1":
from miio import PhilipsBulb
light = PhilipsBulb(host, token)
device = XiaomiPhilipsGenericLight(name, light, model, unique_id)
devices.append(device)
hass.data[DATA_KEY][host] = device
else:
_LOGGER.error(
"Unsupported device found! Please create an issue at "
"https://github.com/syssi/philipslight/issues "
"and provide the following data: %s",
model,
)
return False
async_add_entities(devices, update_before_add=True)
async def async_service_handler(service):
"""Map services to methods on Xiaomi Philips Lights."""
method = SERVICE_TO_METHOD.get(service.service)
params = {
key: value for key, value in service.data.items() if key != ATTR_ENTITY_ID
}
entity_ids = service.data.get(ATTR_ENTITY_ID)
if entity_ids:
target_devices = [
dev
for dev in hass.data[DATA_KEY].values()
if dev.entity_id in entity_ids
]
else:
target_devices = hass.data[DATA_KEY].values()
update_tasks = []
for target_device in target_devices:
if not hasattr(target_device, method["method"]):
continue
await getattr(target_device, method["method"])(**params)
update_tasks.append(target_device.async_update_ha_state(True))
if update_tasks:
await asyncio.wait(update_tasks)
for xiaomi_miio_service in SERVICE_TO_METHOD:
schema = SERVICE_TO_METHOD[xiaomi_miio_service].get(
"schema", XIAOMI_MIIO_SERVICE_SCHEMA
)
hass.services.async_register(
DOMAIN, xiaomi_miio_service, async_service_handler, schema=schema
)
class XiaomiPhilipsAbstractLight(Light):
"""Representation of a Abstract Xiaomi Philips Light."""
def __init__(self, name, light, model, unique_id):
"""Initialize the light device."""
self._name = name
self._light = light
self._model = model
self._unique_id = unique_id
self._brightness = None
self._available = False
self._state = None
self._state_attrs = {ATTR_MODEL: self._model}
@property
def should_poll(self):
"""Poll the light."""
return True
@property
def unique_id(self):
"""Return an unique ID."""
return self._unique_id
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def available(self):
"""Return true when state is known."""
return self._available
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return self._state_attrs
@property
def is_on(self):
"""Return true if light is on."""
return self._state
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def supported_features(self):
"""Return the supported features."""
return SUPPORT_BRIGHTNESS
async def _try_command(self, mask_error, func, *args, **kwargs):
"""Call a light command handling error messages."""
from miio import DeviceException
try:
result = await self.hass.async_add_executor_job(
partial(func, *args, **kwargs)
)
_LOGGER.debug("Response received from light: %s", result)
return result == SUCCESS
except DeviceException as exc:
_LOGGER.error(mask_error, exc)
self._available = False
return False
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
percent_brightness = ceil(100 * brightness / 255.0)
_LOGGER.debug("Setting brightness: %s %s%%", brightness, percent_brightness)
result = await self._try_command(
"Setting brightness failed: %s",
self._light.set_brightness,
percent_brightness,
)
if result:
self._brightness = brightness
else:
await self._try_command("Turning the light on failed.", self._light.on)
async def async_turn_off(self, **kwargs):
"""Turn the light off."""
await self._try_command("Turning the light off failed.", self._light.off)
async def async_update(self):
"""Fetch state from the device."""
from miio import DeviceException
try:
state = await self.hass.async_add_executor_job(self._light.status)
except DeviceException as ex:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._brightness = ceil((255 / 100.0) * state.brightness)
class XiaomiPhilipsGenericLight(XiaomiPhilipsAbstractLight):
"""Representation of a Generic Xiaomi Philips Light."""
def __init__(self, name, light, model, unique_id):
"""Initialize the light device."""
super().__init__(name, light, model, unique_id)
self._state_attrs.update({ATTR_SCENE: None, ATTR_DELAYED_TURN_OFF: None})
async def async_update(self):
"""Fetch state from the device."""
from miio import DeviceException
try:
state = await self.hass.async_add_executor_job(self._light.status)
except DeviceException as ex:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._brightness = ceil((255 / 100.0) * state.brightness)
delayed_turn_off = self.delayed_turn_off_timestamp(
state.delay_off_countdown,
dt.utcnow(),
self._state_attrs[ATTR_DELAYED_TURN_OFF],
)
self._state_attrs.update(
{ATTR_SCENE: state.scene, ATTR_DELAYED_TURN_OFF: delayed_turn_off}
)
async def async_set_scene(self, scene: int = 1):
"""Set the fixed scene."""
await self._try_command(
"Setting a fixed scene failed.", self._light.set_scene, scene
)
async def async_set_delayed_turn_off(self, time_period: timedelta):
"""Set delayed turn off."""
await self._try_command(
"Setting the turn off delay failed.",
self._light.delay_off,
time_period.total_seconds(),
)
@staticmethod
def delayed_turn_off_timestamp(
countdown: int, current: datetime, previous: datetime
):
"""Update the turn off timestamp only if necessary."""
if countdown is not None and countdown > 0:
new = current.replace(microsecond=0) + timedelta(seconds=countdown)
if previous is None:
return new
lower = timedelta(seconds=-DELAYED_TURN_OFF_MAX_DEVIATION_SECONDS)
upper = timedelta(seconds=DELAYED_TURN_OFF_MAX_DEVIATION_SECONDS)
diff = previous - new
if lower < diff < upper:
return previous
return new
return None
class XiaomiPhilipsBulb(XiaomiPhilipsGenericLight):
"""Representation of a Xiaomi Philips Bulb."""
def __init__(self, name, light, model, unique_id):
"""Initialize the light device."""
super().__init__(name, light, model, unique_id)
self._color_temp = None
@property
def color_temp(self):
"""Return the color temperature."""
return self._color_temp
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return 175
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return 333
@property
def supported_features(self):
"""Return the supported features."""
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_COLOR_TEMP in kwargs:
color_temp = kwargs[ATTR_COLOR_TEMP]
percent_color_temp = self.translate(
color_temp, self.max_mireds, self.min_mireds, CCT_MIN, CCT_MAX
)
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
percent_brightness = ceil(100 * brightness / 255.0)
if ATTR_BRIGHTNESS in kwargs and ATTR_COLOR_TEMP in kwargs:
_LOGGER.debug(
"Setting brightness and color temperature: "
"%s %s%%, %s mireds, %s%% cct",
brightness,
percent_brightness,
color_temp,
percent_color_temp,
)
result = await self._try_command(
"Setting brightness and color temperature failed: " "%s bri, %s cct",
self._light.set_brightness_and_color_temperature,
percent_brightness,
percent_color_temp,
)
if result:
self._color_temp = color_temp
self._brightness = brightness
elif ATTR_COLOR_TEMP in kwargs:
_LOGGER.debug(
"Setting color temperature: " "%s mireds, %s%% cct",
color_temp,
percent_color_temp,
)
result = await self._try_command(
"Setting color temperature failed: %s cct",
self._light.set_color_temperature,
percent_color_temp,
)
if result:
self._color_temp = color_temp
elif ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
percent_brightness = ceil(100 * brightness / 255.0)
_LOGGER.debug("Setting brightness: %s %s%%", brightness, percent_brightness)
result = await self._try_command(
"Setting brightness failed: %s",
self._light.set_brightness,
percent_brightness,
)
if result:
self._brightness = brightness
else:
await self._try_command("Turning the light on failed.", self._light.on)
async def async_update(self):
"""Fetch state from the device."""
from miio import DeviceException
try:
state = await self.hass.async_add_executor_job(self._light.status)
except DeviceException as ex:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._brightness = ceil((255 / 100.0) * state.brightness)
self._color_temp = self.translate(
state.color_temperature, CCT_MIN, CCT_MAX, self.max_mireds, self.min_mireds
)
delayed_turn_off = self.delayed_turn_off_timestamp(
state.delay_off_countdown,
dt.utcnow(),
self._state_attrs[ATTR_DELAYED_TURN_OFF],
)
self._state_attrs.update(
{ATTR_SCENE: state.scene, ATTR_DELAYED_TURN_OFF: delayed_turn_off}
)
@staticmethod
def translate(value, left_min, left_max, right_min, right_max):
"""Map a value from left span to right span."""
left_span = left_max - left_min
right_span = right_max - right_min
value_scaled = float(value - left_min) / float(left_span)
return int(right_min + (value_scaled * right_span))
class XiaomiPhilipsCeilingLamp(XiaomiPhilipsBulb):
"""Representation of a Xiaomi Philips Ceiling Lamp."""
def __init__(self, name, light, model, unique_id):
"""Initialize the light device."""
super().__init__(name, light, model, unique_id)
self._state_attrs.update(
{ATTR_NIGHT_LIGHT_MODE: None, ATTR_AUTOMATIC_COLOR_TEMPERATURE: None}
)
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return 175
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return 370
async def async_update(self):
"""Fetch state from the device."""
from miio import DeviceException
try:
state = await self.hass.async_add_executor_job(self._light.status)
except DeviceException as ex:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._brightness = ceil((255 / 100.0) * state.brightness)
self._color_temp = self.translate(
state.color_temperature, CCT_MIN, CCT_MAX, self.max_mireds, self.min_mireds
)
delayed_turn_off = self.delayed_turn_off_timestamp(
state.delay_off_countdown,
dt.utcnow(),
self._state_attrs[ATTR_DELAYED_TURN_OFF],
)
self._state_attrs.update(
{
ATTR_SCENE: state.scene,
ATTR_DELAYED_TURN_OFF: delayed_turn_off,
ATTR_NIGHT_LIGHT_MODE: state.smart_night_light,
ATTR_AUTOMATIC_COLOR_TEMPERATURE: state.automatic_color_temperature,
}
)
class XiaomiPhilipsEyecareLamp(XiaomiPhilipsGenericLight):
"""Representation of a Xiaomi Philips Eyecare Lamp 2."""
def __init__(self, name, light, model, unique_id):
"""Initialize the light device."""
super().__init__(name, light, model, unique_id)
self._state_attrs.update(
{ATTR_REMINDER: None, ATTR_NIGHT_LIGHT_MODE: None, ATTR_EYECARE_MODE: None}
)
async def async_update(self):
"""Fetch state from the device."""
from miio import DeviceException
try:
state = await self.hass.async_add_executor_job(self._light.status)
except DeviceException as ex:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._brightness = ceil((255 / 100.0) * state.brightness)
delayed_turn_off = self.delayed_turn_off_timestamp(
state.delay_off_countdown,
dt.utcnow(),
self._state_attrs[ATTR_DELAYED_TURN_OFF],
)
self._state_attrs.update(
{
ATTR_SCENE: state.scene,
ATTR_DELAYED_TURN_OFF: delayed_turn_off,
ATTR_REMINDER: state.reminder,
ATTR_NIGHT_LIGHT_MODE: state.smart_night_light,
ATTR_EYECARE_MODE: state.eyecare,
}
)
async def async_set_delayed_turn_off(self, time_period: timedelta):
"""Set delayed turn off."""
await self._try_command(
"Setting the turn off delay failed.",
self._light.delay_off,
round(time_period.total_seconds() / 60),
)
async def async_reminder_on(self):
"""Enable the eye fatigue notification."""
await self._try_command(
"Turning on the reminder failed.", self._light.reminder_on
)
async def async_reminder_off(self):
"""Disable the eye fatigue notification."""
await self._try_command(
"Turning off the reminder failed.", self._light.reminder_off
)
async def async_night_light_mode_on(self):
"""Turn the smart night light mode on."""
await self._try_command(
"Turning on the smart night light mode failed.",
self._light.smart_night_light_on,
)
async def async_night_light_mode_off(self):
"""Turn the smart night light mode off."""
await self._try_command(
"Turning off the smart night light mode failed.",
self._light.smart_night_light_off,
)
async def async_eyecare_mode_on(self):
"""Turn the eyecare mode on."""
await self._try_command(
"Turning on the eyecare mode failed.", self._light.eyecare_on
)
async def async_eyecare_mode_off(self):
"""Turn the eyecare mode off."""
await self._try_command(
"Turning off the eyecare mode failed.", self._light.eyecare_off
)
@staticmethod
def delayed_turn_off_timestamp(
countdown: int, current: datetime, previous: datetime
):
"""Update the turn off timestamp only if necessary."""
if countdown is not None and countdown > 0:
new = current.replace(second=0, microsecond=0) + timedelta(
minutes=countdown
)
if previous is None:
return new
lower = timedelta(minutes=-DELAYED_TURN_OFF_MAX_DEVIATION_MINUTES)
upper = timedelta(minutes=DELAYED_TURN_OFF_MAX_DEVIATION_MINUTES)
diff = previous - new
if lower < diff < upper:
return previous
return new
return None
class XiaomiPhilipsEyecareLampAmbientLight(XiaomiPhilipsAbstractLight):
"""Representation of a Xiaomi Philips Eyecare Lamp Ambient Light."""
def __init__(self, name, light, model, unique_id):
"""Initialize the light device."""
name = f"{name} Ambient Light"
if unique_id is not None:
unique_id = "{}-{}".format(unique_id, "ambient")
super().__init__(name, light, model, unique_id)
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
percent_brightness = ceil(100 * brightness / 255.0)
_LOGGER.debug(
"Setting brightness of the ambient light: %s %s%%",
brightness,
percent_brightness,
)
result = await self._try_command(
"Setting brightness of the ambient failed: %s",
self._light.set_ambient_brightness,
percent_brightness,
)
if result:
self._brightness = brightness
else:
await self._try_command(
"Turning the ambient light on failed.", self._light.ambient_on
)
async def async_turn_off(self, **kwargs):
"""Turn the light off."""
await self._try_command(
"Turning the ambient light off failed.", self._light.ambient_off
)
async def async_update(self):
"""Fetch state from the device."""
from miio import DeviceException
try:
state = await self.hass.async_add_executor_job(self._light.status)
except DeviceException as ex:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.ambient
self._brightness = ceil((255 / 100.0) * state.ambient_brightness)
class XiaomiPhilipsMoonlightLamp(XiaomiPhilipsBulb):
"""Representation of a Xiaomi Philips Zhirui Bedside Lamp."""
def __init__(self, name, light, model, unique_id):
"""Initialize the light device."""
super().__init__(name, light, model, unique_id)
self._hs_color = None
self._state_attrs.pop(ATTR_DELAYED_TURN_OFF)
self._state_attrs.update(
{
ATTR_SLEEP_ASSISTANT: None,
ATTR_SLEEP_OFF_TIME: None,
ATTR_TOTAL_ASSISTANT_SLEEP_TIME: None,
ATTR_BRAND_SLEEP: None,
ATTR_BRAND: None,
}
)
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return 153
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return 588
@property
def hs_color(self) -> tuple:
"""Return the hs color value."""
return self._hs_color
@property
def supported_features(self):
"""Return the supported features."""
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR | SUPPORT_COLOR_TEMP
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_COLOR_TEMP in kwargs:
color_temp = kwargs[ATTR_COLOR_TEMP]
percent_color_temp = self.translate(
color_temp, self.max_mireds, self.min_mireds, CCT_MIN, CCT_MAX
)
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
percent_brightness = ceil(100 * brightness / 255.0)
if ATTR_HS_COLOR in kwargs:
hs_color = kwargs[ATTR_HS_COLOR]
rgb = color.color_hs_to_RGB(*hs_color)
if ATTR_BRIGHTNESS in kwargs and ATTR_HS_COLOR in kwargs:
_LOGGER.debug(
"Setting brightness and color: " "%s %s%%, %s",
brightness,
percent_brightness,
rgb,
)
result = await self._try_command(
"Setting brightness and color failed: " "%s bri, %s color",
self._light.set_brightness_and_rgb,
percent_brightness,
rgb,
)
if result:
self._hs_color = hs_color
self._brightness = brightness
elif ATTR_BRIGHTNESS in kwargs and ATTR_COLOR_TEMP in kwargs:
_LOGGER.debug(
"Setting brightness and color temperature: "
"%s %s%%, %s mireds, %s%% cct",
brightness,
percent_brightness,
color_temp,
percent_color_temp,
)
result = await self._try_command(
"Setting brightness and color temperature failed: " "%s bri, %s cct",
self._light.set_brightness_and_color_temperature,
percent_brightness,
percent_color_temp,
)
if result:
self._color_temp = color_temp
self._brightness = brightness
elif ATTR_HS_COLOR in kwargs:
_LOGGER.debug("Setting color: %s", rgb)
result = await self._try_command(
"Setting color failed: %s", self._light.set_rgb, rgb
)
if result:
self._hs_color = hs_color
elif ATTR_COLOR_TEMP in kwargs:
_LOGGER.debug(
"Setting color temperature: " "%s mireds, %s%% cct",
color_temp,
percent_color_temp,
)
result = await self._try_command(
"Setting color temperature failed: %s cct",
self._light.set_color_temperature,
percent_color_temp,
)
if result:
self._color_temp = color_temp
elif ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
percent_brightness = ceil(100 * brightness / 255.0)
_LOGGER.debug("Setting brightness: %s %s%%", brightness, percent_brightness)
result = await self._try_command(
"Setting brightness failed: %s",
self._light.set_brightness,
percent_brightness,
)
if result:
self._brightness = brightness
else:
await self._try_command("Turning the light on failed.", self._light.on)
async def async_update(self):
"""Fetch state from the device."""
from miio import DeviceException
try:
state = await self.hass.async_add_executor_job(self._light.status)
except DeviceException as ex:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._brightness = ceil((255 / 100.0) * state.brightness)
self._color_temp = self.translate(
state.color_temperature, CCT_MIN, CCT_MAX, self.max_mireds, self.min_mireds
)
self._hs_color = color.color_RGB_to_hs(*state.rgb)
self._state_attrs.update(
{
ATTR_SCENE: state.scene,
ATTR_SLEEP_ASSISTANT: state.sleep_assistant,
ATTR_SLEEP_OFF_TIME: state.sleep_off_time,
ATTR_TOTAL_ASSISTANT_SLEEP_TIME: state.total_assistant_sleep_time,
ATTR_BRAND_SLEEP: state.brand_sleep,
ATTR_BRAND: state.brand,
}
)
async def async_set_delayed_turn_off(self, time_period: timedelta):
"""Set delayed turn off. Unsupported."""
return
|
|
#!/usr/bin/env python
#
# Copyright (c) 2012 Eran Sandler (eran@sandler.co.il), http://eran.sandler.co.il, http://forecastcloudy.net
# Copyright (C) 2012-2013 Dustin Kirkland <kirkland@byobu.co>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import urllib2
import argparse
try:
import simplejson as json
except ImportError:
import json
EC2_REGIONS = [
"us-east-1",
"us-west-1",
"us-west-2",
"eu-west-1",
"ap-southeast-1",
"ap-southeast-2",
"ap-northeast-1",
"sa-east-1"
]
EC2_INSTANCE_TYPES = [
"t1.micro",
"m1.small",
"m1.medium",
"m1.large",
"m1.xlarge",
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"c1.medium",
"c1.xlarge",
"cc1.4xlarge",
"cc2.8xlarge",
"cg1.4xlarge",
"cr1.8xlarge",
"m3.xlarge",
"m3.2xlarge",
"hi1.4xlarge",
"hs1.8xlarge"
]
EC2_OS_TYPES = [
"linux",
"mswin"
]
JSON_NAME_TO_EC2_REGIONS_API = {
"us-east" : "us-east-1",
"us-east-1" : "us-east-1",
"us-west" : "us-west-1",
"us-west-1" : "us-west-1",
"us-west-2" : "us-west-2",
"eu-ireland" : "eu-west-1",
"eu-west-1" : "eu-west-1",
"apac-sin" : "ap-southeast-1",
"ap-southeast-1" : "ap-southeast-1",
"ap-southeast-2" : "ap-southeast-2",
"apac-syd" : "ap-southeast-2",
"apac-tokyo" : "ap-northeast-1",
"ap-northeast-1" : "ap-northeast-1",
"sa-east-1" : "sa-east-1"
}
EC2_REGIONS_API_TO_JSON_NAME = {
"us-east-1" : "us-east",
"us-west-1" : "us-west",
"us-west-2" : "us-west-2",
"eu-west-1" : "eu-ireland",
"ap-southeast-1" : "apac-sin",
"ap-southeast-2" : "apac-syd",
"ap-northeast-1" : "apac-tokyo",
"sa-east-1" : "sa-east-1"
}
INSTANCES_ON_DEMAND_URL = "http://aws.amazon.com/ec2/pricing/pricing-on-demand-instances.json"
INSTANCES_RESERVED_LIGHT_UTILIZATION_LINUX_URL = "http://aws.amazon.com/ec2/pricing/ri-light-linux.json"
INSTANCES_RESERVED_LIGHT_UTILIZATION_WINDOWS_URL = "http://aws.amazon.com/ec2/pricing/ri-light-mswin.json"
INSTNACES_RESERVED_MEDIUM_UTILIZATION_LINUX_URL = "http://aws.amazon.com/ec2/pricing/ri-medium-linux.json"
INSTANCES_RESERVED_MEDIUM_UTILIZATION_WINDOWS_URL = "http://aws.amazon.com/ec2/pricing/ri-medium-mswin.json"
INSTANCES_RESERVED_HEAVY_UTILIZATION_LINUX_URL = "http://aws.amazon.com/ec2/pricing/ri-heavy-linux.json"
INSTANCES_RESERVED_HEAVY_UTILIZATION_WINDOWS_URL = "http://aws.amazon.com/ec2/pricing/ri-heavy-mswin.json"
INSTANCES_RESERVED_OS_TYPE_BY_URL = {
INSTANCES_RESERVED_LIGHT_UTILIZATION_LINUX_URL : "linux",
INSTANCES_RESERVED_LIGHT_UTILIZATION_WINDOWS_URL : "mswin",
INSTNACES_RESERVED_MEDIUM_UTILIZATION_LINUX_URL : "linux",
INSTANCES_RESERVED_MEDIUM_UTILIZATION_WINDOWS_URL : "mswin",
INSTANCES_RESERVED_HEAVY_UTILIZATION_LINUX_URL : "linux",
INSTANCES_RESERVED_HEAVY_UTILIZATION_WINDOWS_URL : "mswin"
}
INSTANCES_RESERVED_UTILIZATION_TYPE_BY_URL = {
INSTANCES_RESERVED_LIGHT_UTILIZATION_LINUX_URL : "light",
INSTANCES_RESERVED_LIGHT_UTILIZATION_WINDOWS_URL : "light",
INSTNACES_RESERVED_MEDIUM_UTILIZATION_LINUX_URL : "medium",
INSTANCES_RESERVED_MEDIUM_UTILIZATION_WINDOWS_URL : "medium",
INSTANCES_RESERVED_HEAVY_UTILIZATION_LINUX_URL : "heavy",
INSTANCES_RESERVED_HEAVY_UTILIZATION_WINDOWS_URL : "heavy"
}
DEFAULT_CURRENCY = "USD"
INSTANCE_TYPE_MAPPING = {
"stdODI" : "m1",
"uODI" : "t1",
"hiMemODI" : "m2",
"hiCPUODI" : "c1",
"clusterComputeI" : "cc1",
"clusterGPUI" : "cg1",
"hiIoODI" : "hi1",
"secgenstdODI" : "m3",
"hiStoreODI": "hs1",
"clusterHiMemODI": "cr1",
# Reserved Instance Types
"stdResI" : "m1",
"uResI" : "t1",
"hiMemResI" : "m2",
"hiCPUResI" : "c1",
"clusterCompResI" : "cc1",
"clusterGPUResI" : "cg1",
"hiIoResI" : "hi1",
"secgenstdResI" : "m3",
"hiStoreResI": "hs1",
"clusterHiMemResI": "cr1"
}
INSTANCE_SIZE_MAPPING = {
"u" : "micro",
"sm" : "small",
"med" : "medium",
"lg" : "large",
"xl" : "xlarge",
"xxl" : "2xlarge",
"xxxxl" : "4xlarge",
"xxxxxxxxl" : "8xlarge"
}
def _load_data(url):
f = urllib2.urlopen(url)
return json.loads(f.read())
def get_ec2_reserved_instances_prices(filter_region=None, filter_instance_type=None, filter_os_type=None):
""" Get EC2 reserved instances prices. Results can be filtered by region """
get_specific_region = (filter_region is not None)
if get_specific_region:
filter_region = EC2_REGIONS_API_TO_JSON_NAME[filter_region]
get_specific_instance_type = (filter_instance_type is not None)
get_specific_os_type = (filter_os_type is not None)
currency = DEFAULT_CURRENCY
urls = [
INSTANCES_RESERVED_LIGHT_UTILIZATION_LINUX_URL,
INSTANCES_RESERVED_LIGHT_UTILIZATION_WINDOWS_URL,
INSTNACES_RESERVED_MEDIUM_UTILIZATION_LINUX_URL,
INSTANCES_RESERVED_MEDIUM_UTILIZATION_WINDOWS_URL,
INSTANCES_RESERVED_HEAVY_UTILIZATION_LINUX_URL,
INSTANCES_RESERVED_HEAVY_UTILIZATION_WINDOWS_URL
]
result_regions = []
result_regions_index = {}
result = {
"config" : {
"currency" : currency,
},
"regions" : result_regions
}
for u in urls:
os_type = INSTANCES_RESERVED_OS_TYPE_BY_URL[u]
utilization_type = INSTANCES_RESERVED_UTILIZATION_TYPE_BY_URL[u]
data = _load_data(u)
if "config" in data and data["config"] and "regions" in data["config"] and data["config"]["regions"]:
for r in data["config"]["regions"]:
if "region" in r and r["region"]:
if get_specific_region and filter_region != r["region"]:
continue
region_name = JSON_NAME_TO_EC2_REGIONS_API[r["region"]]
if region_name in result_regions_index:
instance_types = result_regions_index[region_name]["instanceTypes"]
else:
instance_types = []
result_regions.append({
"region" : region_name,
"instanceTypes" : instance_types
})
result_regions_index[region_name] = result_regions[-1]
if "instanceTypes" in r:
for it in r["instanceTypes"]:
instance_type = INSTANCE_TYPE_MAPPING[it["type"]]
if "sizes" in it:
for s in it["sizes"]:
instance_size = INSTANCE_SIZE_MAPPING[s["size"]]
prices = {
"1year" : {
"hourly" : None,
"upfront" : None
},
"3year" : {
"hourly" : None,
"upfront" : None
}
}
_type = "%s.%s" % (instance_type, instance_size)
if _type == "cc1.8xlarge":
# Fix conflict where cc1 and cc2 share the same type
_type = "cc2.8xlarge"
if get_specific_instance_type and _type != filter_instance_type:
continue
if get_specific_os_type and os_type != filter_os_type:
continue
instance_types.append({
"type" : _type,
"os" : os_type,
"utilization" : utilization_type,
"prices" : prices
})
for price_data in s["valueColumns"]:
price = None
try:
price = float(price_data["prices"][currency])
except ValueError:
price = None
if price_data["name"] == "yrTerm1":
prices["1year"]["upfront"] = price
elif price_data["name"] == "yrTerm1Hourly":
prices["1year"]["hourly"] = price
elif price_data["name"] == "yrTerm3":
prices["3year"]["upfront"] = price
elif price_data["name"] == "yrTerm3Hourly":
prices["3year"]["hourly"] = price
return result
def get_ec2_ondemand_instances_prices(filter_region=None, filter_instance_type=None, filter_os_type=None):
""" Get EC2 on-demand instances prices. Results can be filtered by region """
get_specific_region = (filter_region is not None)
if get_specific_region:
filter_region = EC2_REGIONS_API_TO_JSON_NAME[filter_region]
get_specific_instance_type = (filter_instance_type is not None)
get_specific_os_type = (filter_os_type is not None)
currency = DEFAULT_CURRENCY
result_regions = []
result = {
"config" : {
"currency" : currency,
"unit" : "perhr"
},
"regions" : result_regions
}
data = _load_data(INSTANCES_ON_DEMAND_URL)
if "config" in data and data["config"] and "regions" in data["config"] and data["config"]["regions"]:
for r in data["config"]["regions"]:
if "region" in r and r["region"]:
if get_specific_region and filter_region != r["region"]:
continue
region_name = JSON_NAME_TO_EC2_REGIONS_API[r["region"]]
instance_types = []
if "instanceTypes" in r:
for it in r["instanceTypes"]:
instance_type = INSTANCE_TYPE_MAPPING[it["type"]]
if "sizes" in it:
for s in it["sizes"]:
instance_size = INSTANCE_SIZE_MAPPING[s["size"]]
for price_data in s["valueColumns"]:
price = None
try:
price = float(price_data["prices"][currency])
except ValueError:
price = None
_type = "%s.%s" % (instance_type, instance_size)
if _type == "cc1.8xlarge":
# Fix conflict where cc1 and cc2 share the same type
_type = "cc2.8xlarge"
if get_specific_instance_type and _type != filter_instance_type:
continue
if get_specific_os_type and price_data["name"] != filter_os_type:
continue
instance_types.append({
"type" : _type,
"os" : price_data["name"],
"price" : price
})
result_regions.append({
"region" : region_name,
"instanceTypes" : instance_types
})
return result
return None
if __name__ == "__main__":
def none_as_string(v):
if not v:
return ""
else:
return v
try:
import argparse
except ImportError:
print "ERROR: You are running Python < 2.7. Please use pip to install argparse: pip install argparse"
parser = argparse.ArgumentParser(add_help=True, description="Print out the current prices of EC2 instances")
parser.add_argument("--type", "-t", help="Show ondemand or reserved instances", choices=["ondemand", "reserved"], required=True)
parser.add_argument("--filter-region", "-fr", help="Filter results to a specific region", choices=EC2_REGIONS, default=None)
parser.add_argument("--filter-type", "-ft", help="Filter results to a specific instance type", choices=EC2_INSTANCE_TYPES, default=None)
parser.add_argument("--filter-os-type", "-fo", help="Filter results to a specific os type", choices=EC2_OS_TYPES, default=None)
parser.add_argument("--format", "-f", choices=["json", "table", "csv"], help="Output format", default="table")
args = parser.parse_args()
if args.format == "table":
try:
from prettytable import PrettyTable
except ImportError:
print "ERROR: Please install 'prettytable' using pip: pip install prettytable"
data = None
if args.type == "ondemand":
data = get_ec2_ondemand_instances_prices(args.filter_region, args.filter_type, args.filter_os_type)
elif args.type == "reserved":
data = get_ec2_reserved_instances_prices(args.filter_region, args.filter_type, args.filter_os_type)
if args.format == "json":
print json.dumps(data)
elif args.format == "table":
x = PrettyTable()
if args.type == "ondemand":
try:
x.set_field_names(["region", "type", "os", "price"])
except AttributeError:
x.field_names = ["region", "type", "os", "price"]
try:
x.aligns[-1] = "l"
except AttributeError:
x.align["price"] = "l"
for r in data["regions"]:
region_name = r["region"]
for it in r["instanceTypes"]:
x.add_row([region_name, it["type"], it["os"], none_as_string(it["price"])])
elif args.type == "reserved":
try:
x.set_field_names(["region", "type", "os", "utilization", "term", "price", "upfront"])
except AttributeError:
x.field_names = ["region", "type", "os", "utilization", "term", "price", "upfront"]
try:
x.aligns[-1] = "l"
x.aligns[-2] = "l"
except AttributeError:
x.align["price"] = "l"
x.align["upfront"] = "l"
for r in data["regions"]:
region_name = r["region"]
for it in r["instanceTypes"]:
for term in it["prices"]:
x.add_row([region_name, it["type"], it["os"], it["utilization"], term, none_as_string(it["prices"][term]["hourly"]), none_as_string(it["prices"][term]["upfront"])])
print x
elif args.format == "csv":
if args.type == "ondemand":
print "region,type,os,price"
for r in data["regions"]:
region_name = r["region"]
for it in r["instanceTypes"]:
print "%s,%s,%s,%s" % (region_name, it["type"], it["os"], none_as_string(it["price"]))
elif args.type == "reserved":
print "region,type,os,utilization,term,price,upfront"
for r in data["regions"]:
region_name = r["region"]
for it in r["instanceTypes"]:
for term in it["prices"]:
print "%s,%s,%s,%s,%s,%s,%s" % (region_name, it["type"], it["os"], it["utilization"], term, none_as_string(it["prices"][term]["hourly"]), none_as_string(it["prices"][term]["upfront"]))
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2008 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Support for programmatically generating markup streams from Python code using
a very simple syntax. The main entry point to this module is the `tag` object
(which is actually an instance of the ``ElementFactory`` class). You should
rarely (if ever) need to directly import and use any of the other classes in
this module.
Elements can be created using the `tag` object using attribute access. For
example:
>>> doc = tag.p('Some text and ', tag.a('a link', href='http://example.org/'), '.')
>>> doc
<Element "p">
This produces an `Element` instance which can be further modified to add child
nodes and attributes. This is done by "calling" the element: positional
arguments are added as child nodes (alternatively, the `Element.append` method
can be used for that purpose), whereas keywords arguments are added as
attributes:
>>> doc(tag.br)
<Element "p">
>>> print doc
<p>Some text and <a href="http://example.org/">a link</a>.<br/></p>
If an attribute name collides with a Python keyword, simply append an underscore
to the name:
>>> doc(class_='intro')
<Element "p">
>>> print doc
<p class="intro">Some text and <a href="http://example.org/">a link</a>.<br/></p>
As shown above, an `Element` can easily be directly rendered to XML text by
printing it or using the Python ``str()`` function. This is basically a
shortcut for converting the `Element` to a stream and serializing that
stream:
>>> stream = doc.generate()
>>> stream #doctest: +ELLIPSIS
<genshi.core.Stream object at ...>
>>> print stream
<p class="intro">Some text and <a href="http://example.org/">a link</a>.<br/></p>
The `tag` object also allows creating "fragments", which are basically lists
of nodes (elements or text) that don't have a parent element. This can be useful
for creating snippets of markup that are attached to a parent element later (for
example in a template). Fragments are created by calling the `tag` object, which
returns an object of type `Fragment`:
>>> fragment = tag('Hello, ', tag.em('world'), '!')
>>> fragment
<Fragment>
>>> print fragment
Hello, <em>world</em>!
"""
try:
set
except NameError:
from sets import Set as set
from pyxer.template.genshi.core import Attrs, Markup, Namespace, QName, Stream, \
START, END, TEXT
__all__ = ['Fragment', 'Element', 'ElementFactory', 'tag']
__docformat__ = 'restructuredtext en'
class Fragment(object):
"""Represents a markup fragment, which is basically just a list of element
or text nodes.
"""
__slots__ = ['children']
def __init__(self):
"""Create a new fragment."""
self.children = []
def __add__(self, other):
return Fragment()(self, other)
def __call__(self, *args):
"""Append any positional arguments as child nodes.
:see: `append`
"""
map(self.append, args)
return self
def __iter__(self):
return self._generate()
def __repr__(self):
return '<%s>' % self.__class__.__name__
def __str__(self):
return str(self.generate())
def __unicode__(self):
return unicode(self.generate())
def __html__(self):
return Markup(self.generate())
def append(self, node):
"""Append an element or string as child node.
:param node: the node to append; can be an `Element`, `Fragment`, or a
`Stream`, or a Python string or number
"""
if isinstance(node, (Stream, Element, basestring, int, float, long)):
# For objects of a known/primitive type, we avoid the check for
# whether it is iterable for better performance
self.children.append(node)
elif isinstance(node, Fragment):
self.children.extend(node.children)
elif node is not None:
try:
map(self.append, iter(node))
except TypeError:
self.children.append(node)
def _generate(self):
for child in self.children:
if isinstance(child, Fragment):
for event in child._generate():
yield event
elif isinstance(child, Stream):
for event in child:
yield event
else:
if not isinstance(child, basestring):
child = unicode(child)
yield TEXT, child, (None, -1, -1)
def generate(self):
"""Return a markup event stream for the fragment.
:rtype: `Stream`
"""
return Stream(self._generate())
def _kwargs_to_attrs(kwargs):
attrs = []
names = set()
for name, value in kwargs.items():
name = name.rstrip('_').replace('_', '-')
if value is not None and name not in names:
attrs.append((QName(name), unicode(value)))
names.add(name)
return Attrs(attrs)
class Element(Fragment):
"""Simple XML output generator based on the builder pattern.
Construct XML elements by passing the tag name to the constructor:
>>> print Element('strong')
<strong/>
Attributes can be specified using keyword arguments. The values of the
arguments will be converted to strings and any special XML characters
escaped:
>>> print Element('textarea', rows=10, cols=60)
<textarea rows="10" cols="60"/>
>>> print Element('span', title='1 < 2')
<span title="1 < 2"/>
>>> print Element('span', title='"baz"')
<span title=""baz""/>
The " character is escaped using a numerical entity.
The order in which attributes are rendered is undefined.
If an attribute value evaluates to `None`, that attribute is not included
in the output:
>>> print Element('a', name=None)
<a/>
Attribute names that conflict with Python keywords can be specified by
appending an underscore:
>>> print Element('div', class_='warning')
<div class="warning"/>
Nested elements can be added to an element using item access notation.
The call notation can also be used for this and for adding attributes
using keyword arguments, as one would do in the constructor.
>>> print Element('ul')(Element('li'), Element('li'))
<ul><li/><li/></ul>
>>> print Element('a')('Label')
<a>Label</a>
>>> print Element('a')('Label', href="target")
<a href="target">Label</a>
Text nodes can be nested in an element by adding strings instead of
elements. Any special characters in the strings are escaped automatically:
>>> print Element('em')('Hello world')
<em>Hello world</em>
>>> print Element('em')(42)
<em>42</em>
>>> print Element('em')('1 < 2')
<em>1 < 2</em>
This technique also allows mixed content:
>>> print Element('p')('Hello ', Element('b')('world'))
<p>Hello <b>world</b></p>
Quotes are not escaped inside text nodes:
>>> print Element('p')('"Hello"')
<p>"Hello"</p>
Elements can also be combined with other elements or strings using the
addition operator, which results in a `Fragment` object that contains the
operands:
>>> print Element('br') + 'some text' + Element('br')
<br/>some text<br/>
Elements with a namespace can be generated using the `Namespace` and/or
`QName` classes:
>>> from genshi.core import Namespace
>>> xhtml = Namespace('http://www.w3.org/1999/xhtml')
>>> print Element(xhtml.html, lang='en')
<html xmlns="http://www.w3.org/1999/xhtml" lang="en"/>
"""
__slots__ = ['tag', 'attrib']
def __init__(self, tag_, **attrib):
Fragment.__init__(self)
self.tag = QName(tag_)
self.attrib = _kwargs_to_attrs(attrib)
def __call__(self, *args, **kwargs):
"""Append any positional arguments as child nodes, and keyword arguments
as attributes.
:return: the element itself so that calls can be chained
:rtype: `Element`
:see: `Fragment.append`
"""
self.attrib |= _kwargs_to_attrs(kwargs)
Fragment.__call__(self, *args)
return self
def __repr__(self):
return '<%s "%s">' % (self.__class__.__name__, self.tag)
def _generate(self):
yield START, (self.tag, self.attrib), (None, -1, -1)
for kind, data, pos in Fragment._generate(self):
yield kind, data, pos
yield END, self.tag, (None, -1, -1)
def generate(self):
"""Return a markup event stream for the fragment.
:rtype: `Stream`
"""
return Stream(self._generate())
class ElementFactory(object):
"""Factory for `Element` objects.
A new element is created simply by accessing a correspondingly named
attribute of the factory object:
>>> factory = ElementFactory()
>>> print factory.foo
<foo/>
>>> print factory.foo(id=2)
<foo id="2"/>
Markup fragments (lists of nodes without a parent element) can be created
by calling the factory:
>>> print factory('Hello, ', factory.em('world'), '!')
Hello, <em>world</em>!
A factory can also be bound to a specific namespace:
>>> factory = ElementFactory('http://www.w3.org/1999/xhtml')
>>> print factory.html(lang="en")
<html xmlns="http://www.w3.org/1999/xhtml" lang="en"/>
The namespace for a specific element can be altered on an existing factory
by specifying the new namespace using item access:
>>> factory = ElementFactory()
>>> print factory.html(factory['http://www.w3.org/2000/svg'].g(id=3))
<html><g xmlns="http://www.w3.org/2000/svg" id="3"/></html>
Usually, the `ElementFactory` class is not be used directly. Rather, the
`tag` instance should be used to create elements.
"""
def __init__(self, namespace=None):
"""Create the factory, optionally bound to the given namespace.
:param namespace: the namespace URI for any created elements, or `None`
for no namespace
"""
if namespace and not isinstance(namespace, Namespace):
namespace = Namespace(namespace)
self.namespace = namespace
def __call__(self, *args):
"""Create a fragment that has the given positional arguments as child
nodes.
:return: the created `Fragment`
:rtype: `Fragment`
"""
return Fragment()(*args)
def __getitem__(self, namespace):
"""Return a new factory that is bound to the specified namespace.
:param namespace: the namespace URI or `Namespace` object
:return: an `ElementFactory` that produces elements bound to the given
namespace
:rtype: `ElementFactory`
"""
return ElementFactory(namespace)
def __getattr__(self, name):
"""Create an `Element` with the given name.
:param name: the tag name of the element to create
:return: an `Element` with the specified name
:rtype: `Element`
"""
return Element(self.namespace and self.namespace[name] or name)
tag = ElementFactory()
"""Global `ElementFactory` bound to the default namespace.
:type: `ElementFactory`
"""
|
|
import mock
from django.test import TestCase
from django.contrib.auth.models import User
from django.http import HttpRequest
from mediaviewer.models.usersettings import UserSettings
from mediaviewer.models.request import Request
from mediaviewer.views.requests import (addrequests,
ajaxvote,
ajaxdone,
ajaxgiveup,
requests,
)
class TestRequests(TestCase):
def setUp(self):
self.filter_patcher = mock.patch(
'mediaviewer.views.requests.Request.objects.filter')
self.mock_filter = self.filter_patcher.start()
self.addCleanup(self.filter_patcher.stop)
self.setSiteWideContext_patcher = mock.patch(
'mediaviewer.views.requests.setSiteWideContext')
self.mock_setSiteWideContext = self.setSiteWideContext_patcher.start()
self.addCleanup(self.setSiteWideContext_patcher.stop)
self.render_patcher = mock.patch(
'mediaviewer.views.requests.render')
self.mock_render = self.render_patcher.start()
self.addCleanup(self.render_patcher.stop)
self.change_password_patcher = mock.patch(
'mediaviewer.views.password_reset.change_password')
self.mock_change_password = self.change_password_patcher.start()
self.addCleanup(self.change_password_patcher.stop)
self.user = mock.MagicMock(User)
self.settings = mock.MagicMock()
self.settings.force_password_change = False
self.user.settings.return_value = self.settings
self.request = mock.MagicMock(HttpRequest)
self.request.user = self.user
self.test_requestObj = mock.MagicMock(Request)
self.mock_filter.return_value = [self.test_requestObj]
def test_valid(self):
expected_context = {
'items': [self.test_requestObj],
'user': self.user,
'active_page': 'requests',
'title': 'Requests',
}
expected = self.mock_render.return_value
actual = requests(self.request)
self.assertEqual(expected, actual)
self.mock_filter.assert_called_once_with(done=False)
self.mock_setSiteWideContext.assert_called_once_with(
expected_context,
self.request,
includeMessages=True)
self.mock_render.assert_called_once_with(
self.request,
'mediaviewer/request.html',
expected_context)
def test_force_password_change(self):
self.settings.force_password_change = True
expected = self.mock_change_password.return_value
actual = requests(self.request)
self.assertEqual(expected, actual)
self.mock_change_password.assert_called_once_with(self.request)
class TestAddRequests(TestCase):
def setUp(self):
self.HttpResponseRedirect_patcher = mock.patch(
'mediaviewer.views.requests.HttpResponseRedirect')
self.mock_httpResponseRedirect = (
self.HttpResponseRedirect_patcher.start())
self.addCleanup(self.HttpResponseRedirect_patcher.stop)
self.reverse_patcher = mock.patch('mediaviewer.views.requests.reverse')
self.mock_reverse = self.reverse_patcher.start()
self.addCleanup(self.reverse_patcher.stop)
self.request_new_patcher = mock.patch(
'mediaviewer.views.requests.Request.new')
self.mock_request_new = self.request_new_patcher.start()
self.addCleanup(self.request_new_patcher.stop)
self.request_vote_new_patcher = mock.patch(
'mediaviewer.views.requests.RequestVote.new')
self.mock_request_vote_new = self.request_vote_new_patcher.start()
self.addCleanup(self.request_vote_new_patcher.stop)
self.change_password_patcher = mock.patch(
'mediaviewer.views.password_reset.change_password')
self.mock_change_password = self.change_password_patcher.start()
self.addCleanup(self.change_password_patcher.stop)
self.test_user = User.objects.create_superuser('test_user',
'test@user.com',
'password')
self.settings = mock.MagicMock()
self.settings.force_password_change = False
self.test_user.settings = lambda: self.settings
self.request = mock.MagicMock()
self.request.POST = {'newrequest': 'new request'}
self.request.user = self.test_user
def test_valid(self):
expected = self.mock_httpResponseRedirect.return_value
actual = addrequests(self.request)
self.assertEqual(expected, actual)
self.mock_reverse.assert_called_once_with('mediaviewer:requests')
self.mock_httpResponseRedirect.assert_called_once_with(
self.mock_reverse.return_value)
self.mock_request_new.assert_called_once_with('new request',
self.test_user)
self.mock_request_vote_new.assert_called_once_with(
self.mock_request_new.return_value,
self.test_user)
def test_force_password_change(self):
self.settings.force_password_change = True
expected = self.mock_change_password.return_value
actual = addrequests(self.request)
self.assertEqual(expected, actual)
self.mock_change_password.assert_called_once_with(self.request)
class TestAjaxVote(TestCase):
def setUp(self):
self.HttpResponse_patcher = mock.patch(
'mediaviewer.views.requests.HttpResponse')
self.mock_httpResponse = self.HttpResponse_patcher.start()
self.addCleanup(self.HttpResponse_patcher.stop)
self.dumps_patcher = mock.patch(
'mediaviewer.views.requests.json.dumps')
self.mock_dumps = self.dumps_patcher.start()
self.addCleanup(self.dumps_patcher.stop)
self.get_object_patcher = mock.patch(
'mediaviewer.views.requests.get_object_or_404')
self.mock_get_object = self.get_object_patcher.start()
self.addCleanup(self.get_object_patcher.stop)
self.new_patcher = mock.patch(
'mediaviewer.views.requests.RequestVote.new')
self.mock_new = self.new_patcher.start()
self.addCleanup(self.new_patcher.stop)
self.test_user = User.objects.create_superuser('test_user',
'test@user.com',
'password')
self.request = mock.MagicMock()
self.request.POST = {'requestid': 123}
self.request.user = self.test_user
self.mock_requestObj = mock.MagicMock()
self.mock_get_object.return_value = self.mock_requestObj
def test_request_exists(self):
expected = self.mock_httpResponse.return_value
actual = ajaxvote(self.request)
self.assertEqual(expected, actual)
self.mock_new.assert_called_once_with(self.mock_requestObj,
self.test_user)
self.mock_requestObj.numberOfVotes.assert_called_once_with()
self.mock_dumps.assert_called_once_with({
'numberOfVotes': self.mock_requestObj.numberOfVotes.return_value,
'requestid': 123})
self.mock_httpResponse.assert_called_once_with(
self.mock_dumps.return_value,
content_type='application/javascript')
def test_user_not_authenticated(self):
self.test_user = mock.MagicMock()
self.test_user.is_authenticated.return_value = False
self.request.user = self.test_user
self.assertRaisesMessage(
Exception,
'User not authenticated. Refresh and try again.',
ajaxvote,
self.request)
# TODO: Add tests for user messages
class TestAjaxDone(TestCase):
def setUp(self):
self.HttpResponse_patcher = mock.patch(
'mediaviewer.views.requests.HttpResponse')
self.mock_httpResponse = self.HttpResponse_patcher.start()
self.addCleanup(self.HttpResponse_patcher.stop)
self.dumps_patcher = mock.patch(
'mediaviewer.views.requests.json.dumps')
self.mock_dumps = self.dumps_patcher.start()
self.addCleanup(self.dumps_patcher.stop)
self.get_object_patcher = mock.patch(
'mediaviewer.views.requests.get_object_or_404')
self.mock_get_object = self.get_object_patcher.start()
self.addCleanup(self.get_object_patcher.stop)
self.user = mock.create_autospec(User)
self.user.username = 'test_logged_in_user'
self.user.is_authenticated.return_value = True
self.user.is_staff = True
self.settings = mock.create_autospec(UserSettings)
self.settings.force_password_change = False
self.user.settings.return_value = self.settings
self.request = mock.MagicMock()
self.request.POST = {'requestid': 123}
self.request.user = self.user
def test_user_not_authenticated(self):
self.user.is_authenticated.return_value = False
expected_response = {
'errmsg': 'User not authenticated. Refresh and try again.'}
expected = self.mock_httpResponse.return_value
actual = ajaxdone(self.request)
self.assertEqual(expected, actual)
self.mock_dumps.assert_called_once_with(expected_response)
self.mock_httpResponse.assert_called_once_with(
self.mock_dumps.return_value,
content_type='application/javascript')
def test_user_not_staff(self):
self.user.is_staff = False
expected_response = {
'errmsg': 'User is not a staffer'}
expected = self.mock_httpResponse.return_value
actual = ajaxdone(self.request)
self.assertEqual(expected, actual)
self.mock_dumps.assert_called_once_with(expected_response)
self.mock_httpResponse.assert_called_once_with(
self.mock_dumps.return_value,
content_type='application/javascript')
def test_valid(self):
expected_response = {
'errmsg': '',
'message': 'Marked done!',
'requestid': 123}
expected = self.mock_httpResponse.return_value
actual = ajaxdone(self.request)
self.assertEqual(expected, actual)
self.mock_dumps.assert_called_once_with(expected_response)
self.mock_httpResponse.assert_called_once_with(
self.mock_dumps.return_value,
content_type='application/javascript')
class TestGiveUp(TestCase):
def setUp(self):
self.HttpResponse_patcher = mock.patch(
'mediaviewer.views.requests.HttpResponse')
self.mock_httpResponse = self.HttpResponse_patcher.start()
self.addCleanup(self.HttpResponse_patcher.stop)
self.dumps_patcher = mock.patch(
'mediaviewer.views.requests.json.dumps')
self.mock_dumps = self.dumps_patcher.start()
self.addCleanup(self.dumps_patcher.stop)
self.get_object_patcher = mock.patch(
'mediaviewer.views.requests.get_object_or_404')
self.mock_get_object = self.get_object_patcher.start()
self.addCleanup(self.get_object_patcher.stop)
self.user = mock.create_autospec(User)
self.user.username = 'test_logged_in_user'
self.user.is_authenticated.return_value = True
self.user.is_staff = True
self.settings = mock.create_autospec(UserSettings)
self.settings.force_password_change = False
self.user.settings.return_value = self.settings
self.request = mock.MagicMock()
self.request.POST = {'requestid': 123}
self.request.user = self.user
def test_user_not_authenticated(self):
self.user.is_authenticated.return_value = False
expected_response = {
'errmsg': 'User not authenticated. Refresh and try again.'}
expected = self.mock_httpResponse.return_value
actual = ajaxgiveup(self.request)
self.assertEqual(expected, actual)
self.mock_dumps.assert_called_once_with(expected_response)
self.mock_httpResponse.assert_called_once_with(
self.mock_dumps.return_value,
content_type='application/javascript')
def test_user_not_staff(self):
self.user.is_staff = False
expected_response = {
'errmsg': 'User is not a staffer'}
expected = self.mock_httpResponse.return_value
actual = ajaxgiveup(self.request)
self.assertEqual(expected, actual)
self.mock_dumps.assert_called_once_with(expected_response)
self.mock_httpResponse.assert_called_once_with(
self.mock_dumps.return_value,
content_type='application/javascript')
def test_valid(self):
expected_response = {
'errmsg': '',
'message': 'Give up!',
'requestid': 123}
expected = self.mock_httpResponse.return_value
actual = ajaxgiveup(self.request)
self.assertEqual(expected, actual)
self.mock_dumps.assert_called_once_with(expected_response)
self.mock_httpResponse.assert_called_once_with(
self.mock_dumps.return_value,
content_type='application/javascript')
|
|
# Copyright 2012 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import contextlib
import time
import mock
from oslo.config import cfg
from neutron.agent.linux import ip_lib
from neutron.agent.linux import ovs_lib
from neutron.openstack.common import log
from neutron.plugins.common import constants as p_const
from neutron.plugins.openvswitch.agent import ovs_neutron_agent
from neutron.plugins.openvswitch.common import constants
from neutron.tests import base
# Useful global dummy variables.
NET_UUID = '3faeebfe-5d37-11e1-a64b-000c29d5f0a7'
LS_ID = 42
LV_ID = 42
LV_IDS = [42, 43]
VIF_ID = '404deaec-5d37-11e1-a64b-000c29d5f0a8'
VIF_MAC = '3c:09:24:1e:78:23'
OFPORT_NUM = 1
VIF_PORT = ovs_lib.VifPort('port', OFPORT_NUM,
VIF_ID, VIF_MAC, 'switch')
VIF_PORTS = {VIF_ID: VIF_PORT}
LVM = ovs_neutron_agent.LocalVLANMapping(LV_ID, 'gre', None, LS_ID, VIF_PORTS)
LVM_FLAT = ovs_neutron_agent.LocalVLANMapping(
LV_ID, 'flat', 'net1', LS_ID, VIF_PORTS)
LVM_VLAN = ovs_neutron_agent.LocalVLANMapping(
LV_ID, 'vlan', 'net1', LS_ID, VIF_PORTS)
FIXED_IPS = [{'subnet_id': 'my-subnet-uuid',
'ip_address': '1.1.1.1'}]
VM_DEVICE_OWNER = "compute:None"
TUN_OFPORTS = {p_const.TYPE_GRE: {'ip1': '11', 'ip2': '12'}}
BCAST_MAC = "01:00:00:00:00:00/01:00:00:00:00:00"
UCAST_MAC = "00:00:00:00:00:00/01:00:00:00:00:00"
class DummyPort:
def __init__(self, interface_id):
self.interface_id = interface_id
class DummyVlanBinding:
def __init__(self, network_id, vlan_id):
self.network_id = network_id
self.vlan_id = vlan_id
class TunnelTest(base.BaseTestCase):
USE_VETH_INTERCONNECTION = False
VETH_MTU = None
def setUp(self):
super(TunnelTest, self).setUp()
cfg.CONF.set_default('firewall_driver',
'neutron.agent.firewall.NoopFirewallDriver',
group='SECURITYGROUP')
cfg.CONF.set_override('report_interval', 0, 'AGENT')
self.INT_BRIDGE = 'integration_bridge'
self.TUN_BRIDGE = 'tunnel_bridge'
self.MAP_TUN_BRIDGE = 'tun_br_map'
self.NET_MAPPING = {'net1': self.MAP_TUN_BRIDGE}
self.INT_OFPORT = 11111
self.TUN_OFPORT = 22222
self.MAP_TUN_INT_OFPORT = 33333
self.MAP_TUN_PHY_OFPORT = 44444
self.inta = mock.Mock()
self.intb = mock.Mock()
self.ovs_bridges = {self.INT_BRIDGE: mock.Mock(),
self.TUN_BRIDGE: mock.Mock(),
self.MAP_TUN_BRIDGE: mock.Mock(),
}
self.ovs_int_ofports = {
'patch-tun': self.TUN_OFPORT,
'int-%s' % self.MAP_TUN_BRIDGE: self.MAP_TUN_INT_OFPORT
}
self.mock_bridge = mock.patch.object(ovs_lib, 'OVSBridge').start()
self.mock_bridge.side_effect = (lambda br_name, root_helper:
self.ovs_bridges[br_name])
self.mock_int_bridge = self.ovs_bridges[self.INT_BRIDGE]
self.mock_int_bridge.add_port.return_value = self.MAP_TUN_INT_OFPORT
self.mock_int_bridge.add_patch_port.side_effect = (
lambda tap, peer: self.ovs_int_ofports[tap])
self.mock_map_tun_bridge = self.ovs_bridges[self.MAP_TUN_BRIDGE]
self.mock_map_tun_bridge.br_name = self.MAP_TUN_BRIDGE
self.mock_map_tun_bridge.add_port.return_value = (
self.MAP_TUN_PHY_OFPORT)
self.mock_map_tun_bridge.add_patch_port.return_value = (
self.MAP_TUN_PHY_OFPORT)
self.mock_tun_bridge = self.ovs_bridges[self.TUN_BRIDGE]
self.mock_tun_bridge.add_port.return_value = self.INT_OFPORT
self.mock_tun_bridge.add_patch_port.return_value = self.INT_OFPORT
self.device_exists = mock.patch.object(ip_lib, 'device_exists').start()
self.device_exists.return_value = True
self.ipdevice = mock.patch.object(ip_lib, 'IPDevice').start()
self.ipwrapper = mock.patch.object(ip_lib, 'IPWrapper').start()
add_veth = self.ipwrapper.return_value.add_veth
add_veth.return_value = [self.inta, self.intb]
self.get_bridges = mock.patch.object(ovs_lib, 'get_bridges').start()
self.get_bridges.return_value = [self.INT_BRIDGE,
self.TUN_BRIDGE,
self.MAP_TUN_BRIDGE]
self.execute = mock.patch('neutron.agent.linux.utils.execute').start()
self._define_expected_calls()
def _define_expected_calls(self):
self.mock_bridge_expected = [
mock.call(self.INT_BRIDGE, 'sudo'),
mock.call(self.MAP_TUN_BRIDGE, 'sudo'),
mock.call(self.TUN_BRIDGE, 'sudo'),
]
self.mock_int_bridge = self.ovs_bridges[self.INT_BRIDGE]
self.mock_int_bridge_expected = [
mock.call.create(),
mock.call.set_secure_mode(),
mock.call.delete_port('patch-tun'),
mock.call.remove_all_flows(),
mock.call.add_flow(priority=1, actions='normal'),
mock.call.add_flow(priority=0, table=constants.CANARY_TABLE,
actions='drop'),
]
self.mock_map_tun_bridge_expected = [
mock.call.remove_all_flows(),
mock.call.add_flow(priority=1, actions='normal'),
mock.call.delete_port('phy-%s' % self.MAP_TUN_BRIDGE),
mock.call.add_patch_port('phy-%s' % self.MAP_TUN_BRIDGE,
constants.NONEXISTENT_PEER),
]
self.mock_int_bridge_expected += [
mock.call.delete_port('int-%s' % self.MAP_TUN_BRIDGE),
mock.call.add_patch_port('int-%s' % self.MAP_TUN_BRIDGE,
constants.NONEXISTENT_PEER),
]
self.mock_int_bridge_expected += [
mock.call.add_flow(priority=2,
in_port=self.MAP_TUN_INT_OFPORT,
actions='drop'),
mock.call.set_db_attribute(
'Interface', 'int-%s' % self.MAP_TUN_BRIDGE,
'options:peer', 'phy-%s' % self.MAP_TUN_BRIDGE),
]
self.mock_map_tun_bridge_expected += [
mock.call.add_flow(priority=2,
in_port=self.MAP_TUN_PHY_OFPORT,
actions='drop'),
mock.call.set_db_attribute(
'Interface', 'phy-%s' % self.MAP_TUN_BRIDGE,
'options:peer', 'int-%s' % self.MAP_TUN_BRIDGE),
]
self.mock_tun_bridge_expected = [
mock.call.reset_bridge(),
mock.call.add_patch_port('patch-int', 'patch-tun'),
]
self.mock_int_bridge_expected += [
mock.call.add_patch_port('patch-tun', 'patch-int')
]
self.mock_tun_bridge_expected += [
mock.call.remove_all_flows(),
mock.call.add_flow(priority=1,
actions="resubmit(,%s)" %
constants.PATCH_LV_TO_TUN,
in_port=self.INT_OFPORT),
mock.call.add_flow(priority=0, actions="drop"),
mock.call.add_flow(priority=0, table=constants.PATCH_LV_TO_TUN,
dl_dst=UCAST_MAC,
actions="resubmit(,%s)" %
constants.UCAST_TO_TUN),
mock.call.add_flow(priority=0, table=constants.PATCH_LV_TO_TUN,
dl_dst=BCAST_MAC,
actions="resubmit(,%s)" %
constants.FLOOD_TO_TUN),
]
for tunnel_type in constants.TUNNEL_NETWORK_TYPES:
self.mock_tun_bridge_expected.append(
mock.call.add_flow(
table=constants.TUN_TABLE[tunnel_type],
priority=0,
actions="drop"))
learned_flow = ("table=%s,"
"priority=1,"
"hard_timeout=300,"
"NXM_OF_VLAN_TCI[0..11],"
"NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[],"
"load:0->NXM_OF_VLAN_TCI[],"
"load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[],"
"output:NXM_OF_IN_PORT[]" %
constants.UCAST_TO_TUN)
self.mock_tun_bridge_expected += [
mock.call.add_flow(table=constants.LEARN_FROM_TUN,
priority=1,
actions="learn(%s),output:%s" %
(learned_flow, self.INT_OFPORT)),
mock.call.add_flow(table=constants.UCAST_TO_TUN,
priority=0,
actions="resubmit(,%s)" %
constants.FLOOD_TO_TUN),
mock.call.add_flow(table=constants.FLOOD_TO_TUN,
priority=0,
actions="drop")
]
self.device_exists_expected = []
self.ipdevice_expected = []
self.ipwrapper_expected = [mock.call('sudo')]
self.get_bridges_expected = [mock.call('sudo'), mock.call('sudo')]
self.inta_expected = []
self.intb_expected = []
self.execute_expected = []
def _build_agent(self, **kwargs):
kwargs.setdefault('integ_br', self.INT_BRIDGE)
kwargs.setdefault('tun_br', self.TUN_BRIDGE)
kwargs.setdefault('local_ip', '10.0.0.1')
kwargs.setdefault('bridge_mappings', self.NET_MAPPING)
kwargs.setdefault('root_helper', 'sudo')
kwargs.setdefault('polling_interval', 2)
kwargs.setdefault('tunnel_types', ['gre'])
kwargs.setdefault('veth_mtu', self.VETH_MTU)
kwargs.setdefault('use_veth_interconnection',
self.USE_VETH_INTERCONNECTION)
return ovs_neutron_agent.OVSNeutronAgent(**kwargs)
def _verify_mock_call(self, mock_obj, expected):
mock_obj.assert_has_calls(expected)
self.assertEqual(len(mock_obj.mock_calls), len(expected))
def _verify_mock_calls(self):
self._verify_mock_call(self.mock_bridge, self.mock_bridge_expected)
self._verify_mock_call(self.mock_int_bridge,
self.mock_int_bridge_expected)
self._verify_mock_call(self.mock_map_tun_bridge,
self.mock_map_tun_bridge_expected)
self._verify_mock_call(self.mock_tun_bridge,
self.mock_tun_bridge_expected)
self._verify_mock_call(self.device_exists, self.device_exists_expected)
self._verify_mock_call(self.ipdevice, self.ipdevice_expected)
self._verify_mock_call(self.ipwrapper, self.ipwrapper_expected)
self._verify_mock_call(self.get_bridges, self.get_bridges_expected)
self._verify_mock_call(self.inta, self.inta_expected)
self._verify_mock_call(self.intb, self.intb_expected)
self._verify_mock_call(self.execute, self.execute_expected)
def test_construct(self):
agent = self._build_agent()
self.assertEqual(agent.agent_id, 'ovs-agent-%s' % cfg.CONF.host)
self._verify_mock_calls()
# TODO(ethuleau): Initially, local ARP responder is be dependent to the
# ML2 l2 population mechanism driver.
# The next two tests use l2_pop flag to test ARP responder
def test_construct_with_arp_responder(self):
self._build_agent(l2_population=True, arp_responder=True)
self.mock_tun_bridge_expected.insert(
5, mock.call.add_flow(table=constants.PATCH_LV_TO_TUN,
priority=1,
proto="arp",
dl_dst="ff:ff:ff:ff:ff:ff",
actions="resubmit(,%s)" %
constants.ARP_RESPONDER)
)
self.mock_tun_bridge_expected.insert(
12, mock.call.add_flow(table=constants.ARP_RESPONDER,
priority=0,
actions="resubmit(,%s)" %
constants.FLOOD_TO_TUN)
)
self._verify_mock_calls()
def test_construct_without_arp_responder(self):
self._build_agent(l2_population=False, arp_responder=True)
self._verify_mock_calls()
def test_construct_vxlan(self):
self._build_agent(tunnel_types=['vxlan'])
self._verify_mock_calls()
def test_provision_local_vlan(self):
ofports = ','.join(TUN_OFPORTS[p_const.TYPE_GRE].values())
self.mock_tun_bridge_expected += [
mock.call.mod_flow(table=constants.FLOOD_TO_TUN,
dl_vlan=LV_ID,
actions="strip_vlan,"
"set_tunnel:%s,output:%s" %
(LS_ID, ofports)),
mock.call.add_flow(table=constants.TUN_TABLE['gre'],
priority=1,
tun_id=LS_ID,
actions="mod_vlan_vid:%s,resubmit(,%s)" %
(LV_ID, constants.LEARN_FROM_TUN)),
]
a = self._build_agent()
a.available_local_vlans = set([LV_ID])
a.tun_br_ofports = TUN_OFPORTS
a.provision_local_vlan(NET_UUID, p_const.TYPE_GRE, None, LS_ID)
self._verify_mock_calls()
def test_provision_local_vlan_flat(self):
action_string = 'strip_vlan,normal'
self.mock_map_tun_bridge_expected.append(
mock.call.add_flow(priority=4, in_port=self.MAP_TUN_PHY_OFPORT,
dl_vlan=LV_ID, actions=action_string))
action_string = 'mod_vlan_vid:%s,normal' % LV_ID
self.mock_int_bridge_expected.append(
mock.call.add_flow(priority=3, in_port=self.INT_OFPORT,
dl_vlan=65535, actions=action_string))
a = self._build_agent()
a.available_local_vlans = set([LV_ID])
a.phys_brs['net1'] = self.mock_map_tun_bridge
a.phys_ofports['net1'] = self.MAP_TUN_PHY_OFPORT
a.int_ofports['net1'] = self.INT_OFPORT
a.provision_local_vlan(NET_UUID, p_const.TYPE_FLAT, 'net1', LS_ID)
self._verify_mock_calls()
def test_provision_local_vlan_flat_fail(self):
a = self._build_agent()
a.provision_local_vlan(NET_UUID, p_const.TYPE_FLAT, 'net2', LS_ID)
self._verify_mock_calls()
def test_provision_local_vlan_vlan(self):
action_string = 'mod_vlan_vid:%s,normal' % LS_ID
self.mock_map_tun_bridge_expected.append(
mock.call.add_flow(priority=4, in_port=self.MAP_TUN_PHY_OFPORT,
dl_vlan=LV_ID, actions=action_string))
action_string = 'mod_vlan_vid:%s,normal' % LS_ID
self.mock_int_bridge_expected.append(
mock.call.add_flow(priority=3, in_port=self.INT_OFPORT,
dl_vlan=LV_ID, actions=action_string))
a = self._build_agent()
a.available_local_vlans = set([LV_ID])
a.phys_brs['net1'] = self.mock_map_tun_bridge
a.phys_ofports['net1'] = self.MAP_TUN_PHY_OFPORT
a.int_ofports['net1'] = self.INT_OFPORT
a.provision_local_vlan(NET_UUID, p_const.TYPE_VLAN, 'net1', LS_ID)
self._verify_mock_calls()
def test_provision_local_vlan_vlan_fail(self):
a = self._build_agent()
a.provision_local_vlan(NET_UUID, p_const.TYPE_VLAN, 'net2', LS_ID)
self._verify_mock_calls()
def test_reclaim_local_vlan(self):
self.mock_tun_bridge_expected += [
mock.call.delete_flows(
table=constants.TUN_TABLE['gre'], tun_id=LS_ID),
mock.call.delete_flows(dl_vlan=LVM.vlan)
]
a = self._build_agent()
a.available_local_vlans = set()
a.local_vlan_map[NET_UUID] = LVM
a.reclaim_local_vlan(NET_UUID)
self.assertIn(LVM.vlan, a.available_local_vlans)
self._verify_mock_calls()
def test_reclaim_local_vlan_flat(self):
self.mock_map_tun_bridge_expected.append(
mock.call.delete_flows(
in_port=self.MAP_TUN_PHY_OFPORT, dl_vlan=LVM_FLAT.vlan))
self.mock_int_bridge_expected.append(
mock.call.delete_flows(
dl_vlan=65535, in_port=self.INT_OFPORT))
a = self._build_agent()
a.phys_brs['net1'] = self.mock_map_tun_bridge
a.phys_ofports['net1'] = self.MAP_TUN_PHY_OFPORT
a.int_ofports['net1'] = self.INT_OFPORT
a.available_local_vlans = set()
a.local_vlan_map[NET_UUID] = LVM_FLAT
a.reclaim_local_vlan(NET_UUID)
self.assertIn(LVM_FLAT.vlan, a.available_local_vlans)
self._verify_mock_calls()
def test_reclaim_local_vlan_vlan(self):
self.mock_map_tun_bridge_expected.append(
mock.call.delete_flows(
in_port=self.MAP_TUN_PHY_OFPORT, dl_vlan=LVM_VLAN.vlan))
self.mock_int_bridge_expected.append(
mock.call.delete_flows(
dl_vlan=LV_ID, in_port=self.INT_OFPORT))
a = self._build_agent()
a.phys_brs['net1'] = self.mock_map_tun_bridge
a.phys_ofports['net1'] = self.MAP_TUN_PHY_OFPORT
a.int_ofports['net1'] = self.INT_OFPORT
a.available_local_vlans = set()
a.local_vlan_map[NET_UUID] = LVM_VLAN
a.reclaim_local_vlan(NET_UUID)
self.assertIn(LVM_VLAN.vlan, a.available_local_vlans)
self._verify_mock_calls()
def test_port_bound(self):
self.mock_int_bridge_expected += [
mock.call.db_get_val('Port', VIF_PORT.port_name, 'tag'),
mock.call.set_db_attribute('Port', VIF_PORT.port_name,
'tag', str(LVM.vlan)),
mock.call.delete_flows(in_port=VIF_PORT.ofport)
]
a = self._build_agent()
a.local_vlan_map[NET_UUID] = LVM
a.local_dvr_map = {}
a.port_bound(VIF_PORT, NET_UUID, 'gre', None, LS_ID,
FIXED_IPS, VM_DEVICE_OWNER, False)
self._verify_mock_calls()
def test_port_unbound(self):
with mock.patch.object(ovs_neutron_agent.OVSNeutronAgent,
'reclaim_local_vlan') as reclaim_local_vlan:
a = self._build_agent()
a.local_vlan_map[NET_UUID] = LVM
a.port_unbound(VIF_ID, NET_UUID)
reclaim_local_vlan.assert_called_once_with(NET_UUID)
self._verify_mock_calls()
def test_port_dead(self):
self.mock_int_bridge_expected += [
mock.call.db_get_val('Port', VIF_PORT.port_name, 'tag'),
mock.call.set_db_attribute(
'Port', VIF_PORT.port_name,
'tag', ovs_neutron_agent.DEAD_VLAN_TAG),
mock.call.add_flow(priority=2, in_port=VIF_PORT.ofport,
actions='drop')
]
a = self._build_agent()
a.available_local_vlans = set([LV_ID])
a.local_vlan_map[NET_UUID] = LVM
a.port_dead(VIF_PORT)
self._verify_mock_calls()
def test_tunnel_update(self):
tunnel_port = '9999'
self.mock_tun_bridge.add_tunnel_port.return_value = tunnel_port
self.mock_tun_bridge_expected += [
mock.call.add_tunnel_port('gre-1', '10.0.10.1', '10.0.0.1',
'gre', 4789, True),
mock.call.add_flow(priority=1, in_port=tunnel_port,
actions='resubmit(,3)')
]
a = self._build_agent()
a.tunnel_update(
mock.sentinel.ctx, tunnel_id='1', tunnel_ip='10.0.10.1',
tunnel_type=p_const.TYPE_GRE)
self._verify_mock_calls()
def test_tunnel_update_self(self):
a = self._build_agent()
a.tunnel_update(
mock.sentinel.ctx, tunnel_id='1', tunnel_ip='10.0.0.1')
self._verify_mock_calls()
def test_daemon_loop(self):
reply2 = {'current': set(['tap0']),
'added': set(['tap2']),
'removed': set([])}
reply3 = {'current': set(['tap2']),
'added': set([]),
'removed': set(['tap0'])}
self.mock_int_bridge_expected += [
mock.call.dump_flows_for_table(constants.CANARY_TABLE),
mock.call.dump_flows_for_table(constants.CANARY_TABLE)
]
with contextlib.nested(
mock.patch.object(log.ContextAdapter, 'exception'),
mock.patch.object(ovs_neutron_agent.OVSNeutronAgent,
'scan_ports'),
mock.patch.object(ovs_neutron_agent.OVSNeutronAgent,
'process_network_ports'),
mock.patch.object(ovs_neutron_agent.OVSNeutronAgent,
'tunnel_sync'),
mock.patch.object(time, 'sleep')
) as (log_exception, scan_ports, process_network_ports,
ts, time_sleep):
log_exception.side_effect = Exception(
'Fake exception to get out of the loop')
scan_ports.side_effect = [reply2, reply3]
process_network_ports.side_effect = [
False, Exception('Fake exception to get out of the loop')]
q_agent = self._build_agent()
# Hack to test loop
# We start method and expect it will raise after 2nd loop
# If something goes wrong, assert_has_calls below will catch it
try:
q_agent.daemon_loop()
except Exception:
pass
# FIXME(salv-orlando): There should not be assertions on log messages
log_exception.assert_called_once_with(
"Error while processing VIF ports")
scan_ports.assert_has_calls([
mock.call(set(), set()),
mock.call(set(['tap0']), set())
])
process_network_ports.assert_has_calls([
mock.call({'current': set(['tap0']),
'removed': set([]),
'added': set(['tap2'])}, False),
mock.call({'current': set(['tap2']),
'removed': set(['tap0']),
'added': set([])}, False)
])
self._verify_mock_calls()
class TunnelTestUseVethInterco(TunnelTest):
USE_VETH_INTERCONNECTION = True
def _define_expected_calls(self):
self.mock_bridge_expected = [
mock.call(self.INT_BRIDGE, 'sudo'),
mock.call(self.MAP_TUN_BRIDGE, 'sudo'),
mock.call(self.TUN_BRIDGE, 'sudo'),
]
self.mock_int_bridge_expected = [
mock.call.create(),
mock.call.set_secure_mode(),
mock.call.delete_port('patch-tun'),
mock.call.remove_all_flows(),
mock.call.add_flow(priority=1, actions='normal'),
mock.call.add_flow(table=constants.CANARY_TABLE, priority=0,
actions="drop")
]
self.mock_map_tun_bridge_expected = [
mock.call.remove_all_flows(),
mock.call.add_flow(priority=1, actions='normal'),
mock.call.delete_port('phy-%s' % self.MAP_TUN_BRIDGE),
mock.call.add_port(self.intb),
]
self.mock_int_bridge_expected += [
mock.call.delete_port('int-%s' % self.MAP_TUN_BRIDGE),
mock.call.add_port(self.inta)
]
self.mock_int_bridge_expected += [
mock.call.add_flow(priority=2,
in_port=self.MAP_TUN_INT_OFPORT,
actions='drop')
]
self.mock_map_tun_bridge_expected += [
mock.call.add_flow(priority=2,
in_port=self.MAP_TUN_PHY_OFPORT,
actions='drop')
]
self.mock_tun_bridge_expected = [
mock.call.reset_bridge(),
mock.call.add_patch_port('patch-int', 'patch-tun'),
]
self.mock_int_bridge_expected += [
mock.call.add_patch_port('patch-tun', 'patch-int')
]
self.mock_tun_bridge_expected += [
mock.call.remove_all_flows(),
mock.call.add_flow(priority=1,
in_port=self.INT_OFPORT,
actions="resubmit(,%s)" %
constants.PATCH_LV_TO_TUN),
mock.call.add_flow(priority=0, actions='drop'),
mock.call.add_flow(priority=0,
table=constants.PATCH_LV_TO_TUN,
dl_dst=UCAST_MAC,
actions="resubmit(,%s)" %
constants.UCAST_TO_TUN),
mock.call.add_flow(priority=0,
table=constants.PATCH_LV_TO_TUN,
dl_dst=BCAST_MAC,
actions="resubmit(,%s)" %
constants.FLOOD_TO_TUN),
]
for tunnel_type in constants.TUNNEL_NETWORK_TYPES:
self.mock_tun_bridge_expected.append(
mock.call.add_flow(
table=constants.TUN_TABLE[tunnel_type],
priority=0,
actions="drop"))
learned_flow = ("table=%s,"
"priority=1,"
"hard_timeout=300,"
"NXM_OF_VLAN_TCI[0..11],"
"NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[],"
"load:0->NXM_OF_VLAN_TCI[],"
"load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[],"
"output:NXM_OF_IN_PORT[]" %
constants.UCAST_TO_TUN)
self.mock_tun_bridge_expected += [
mock.call.add_flow(table=constants.LEARN_FROM_TUN,
priority=1,
actions="learn(%s),output:%s" %
(learned_flow, self.INT_OFPORT)),
mock.call.add_flow(table=constants.UCAST_TO_TUN,
priority=0,
actions="resubmit(,%s)" %
constants.FLOOD_TO_TUN),
mock.call.add_flow(table=constants.FLOOD_TO_TUN,
priority=0,
actions="drop")
]
self.device_exists_expected = [
mock.call('int-%s' % self.MAP_TUN_BRIDGE, 'sudo'),
]
self.ipdevice_expected = [
mock.call('int-%s' % self.MAP_TUN_BRIDGE, 'sudo'),
mock.call().link.delete()
]
self.ipwrapper_expected = [
mock.call('sudo'),
mock.call().add_veth('int-%s' % self.MAP_TUN_BRIDGE,
'phy-%s' % self.MAP_TUN_BRIDGE)
]
self.get_bridges_expected = [mock.call('sudo'), mock.call('sudo')]
self.inta_expected = [mock.call.link.set_up()]
self.intb_expected = [mock.call.link.set_up()]
self.execute_expected = [mock.call(['udevadm', 'settle',
'--timeout=10'])]
class TunnelTestWithMTU(TunnelTestUseVethInterco):
VETH_MTU = 1500
def _define_expected_calls(self):
super(TunnelTestWithMTU, self)._define_expected_calls()
self.inta_expected.append(mock.call.link.set_mtu(self.VETH_MTU))
self.intb_expected.append(mock.call.link.set_mtu(self.VETH_MTU))
|
|
from __future__ import absolute_import
from __future__ import print_function
import math
import re
from decimal import Decimal
from .adjacency_graphs import adjacency_graphs
from six.moves import range
# on qwerty, 'g' has degree 6, being adjacent to 'ftyhbv'. '\' has degree 1.
# this calculates the average over all keys.
def calc_average_degree(graph):
average = 0
for key, neighbors in graph.items():
average += len([n for n in neighbors if n])
average /= len([k for k in graph.keys()])
return average
BRUTEFORCE_CARDINALITY = 10
MIN_GUESSES_BEFORE_GROWING_SEQUENCE = 10000
MIN_SUBMATCH_GUESSES_SINGLE_CHAR = 10
MIN_SUBMATCH_GUESSES_MULTI_CHAR = 50
REFERENCE_YEAR = 2000
def binom(n, k):
"""
Returns binomial coefficient (n choose k).
"""
# http://blog.plover.com/math/choose.html
if k > n:
return 0
if k == 0:
return 1
result = 1
for denom in range(1, k + 1):
result *= n
result /= denom
n -= 1
return result
def log10(n):
"""
Returns logarithm of n in base 10.
"""
return math.log(float(n), 10)
def log2(n):
"""
Returns logarithm of n in base 2.
"""
return math.log(n, 2)
def factorial(n):
"""
Return factorial of n
"""
if n < 2:
return 1
f = 1
for i in range(1, n):
f *= i
return f
def insert_val_to_arr(array, index, value, default=None):
if (len(array) - 1) > index:
array[index] = value
else:
for i in range(len(array), index+1):
array.append(default)
array[index] = value
def most_guessable_match_sequence(password, matches, _exclude_additive=False):
optimal_product = [[] for _ in range(len(password)+1)]
backpointers = [[] for _ in range(len(password)+1)]
max_l = 0
optimal_l = None
def make_bruteforce_match(i, j):
return {
"pattern": "bruteforce",
"token": password[i:j+1],
"i": i,
"j": j
}
def score(guess_product, sequence_length):
result = math.factorial(sequence_length) * guess_product
if not _exclude_additive:
result += math.pow(MIN_GUESSES_BEFORE_GROWING_SEQUENCE, sequence_length - 1)
return result
for k in range(len(password)):
backpointers[k] = []
optimal_product[k] = []
optimal_score = Decimal("Infinity")
for prev_l in range(max_l + 1):
# for each new k, starting scenario to try to beat: bruteforce matches
# involving the lowest-possible l. three cases:
#
# 1. all-bruteforce match (for length-1 sequences.)
# 2. extending a previous bruteforce match
# (possible when optimal[k-1][l] ends in bf.)
# 3. starting a new single-char bruteforce match
# (possible when optimal[k-1][l] exists but does not end in bf.)
#
# otherwise: there is no bruteforce starting scenario that might be better
# than already-discovered lower-l sequences.
consider_bruteforce = True
bf_j = k
try:
if prev_l == 0:
bf_i = 0
new_l = 1
elif "pattern" in backpointers[k-1][prev_l] and \
backpointers[k-1][prev_l]["pattern"] == "bruteforce":
bf_i = backpointers[k-1][prev_l]["i"]
new_l = prev_l
elif backpointers[k-1][prev_l] is not None:
bf_i = k
new_l = prev_l + 1
else:
# bf_i = 0
# new_l = None
consider_bruteforce = False
except:
# bf_i = 0
# new_l = None
consider_bruteforce = False
if consider_bruteforce:
bf_match = make_bruteforce_match(bf_i, bf_j)
prev_j = k - len(bf_match["token"]) # end of preceeding match
candidate_product = estimate_guesses(bf_match, password)
if new_l > 1:
candidate_product *= optimal_product[prev_j][new_l - 1]
candidate_score = score(candidate_product, new_l)
if candidate_score < optimal_score:
optimal_score = candidate_score
# optimal_product[k][new_l] = candidate_product
insert_val_to_arr(optimal_product[k], new_l, candidate_product)
optimal_l = new_l
max_l = max(max_l, new_l)
# backpointers[k][new_l] = bf_match
insert_val_to_arr(backpointers[k], new_l, bf_match)
# now try beating those bruteforce starting scenarios.
# for each match m ending at k, see if forming a (prev_l + 1) sequence
# ending at m is better than the current optimum.
for match in matches:
if match["j"] != k:
continue
i, j = (match["i"], match["j"])
if prev_l == 0:
# if forming a len-1 sequence [match], match.i must fully cover [0..k]
if i != 0:
continue
else:
# it's only possible to form a new potentially-optimal sequence ending at
# match when there's an optimal length-prev_l sequence ending at match.i-1.
try:
if not optimal_product[i-1][prev_l]:
continue
except:
continue
candidate_product = estimate_guesses(match, password)
if prev_l > 0:
candidate_product *= optimal_product[i-1][prev_l]
candidate_score = score(candidate_product, prev_l + 1)
if candidate_score < optimal_score:
optimal_score = candidate_score
insert_val_to_arr(optimal_product[k], prev_l + 1, candidate_product)
# optimal_product[k][prev_l+1] = candidate_product
optimal_l = prev_l + 1
max_l = max(max_l, prev_l+1)
insert_val_to_arr(backpointers[k], prev_l + 1, match)
# backpointers[k][prev_l+1] = match
# walk backwards and decode the optimal sequence
match_sequence = []
l = optimal_l
k = len(password) - 1
while k >= 0:
match = backpointers[k][l]
match_sequence.append(match)
k = match["i"] - 1
l -= 1
match_sequence.reverse()
# final result object
return {
"password": password,
"guesses": optimal_score,
"guesses_log10": log10(optimal_score),
"sequence": match_sequence
}
# ------------------------------------------------------------------------------
# guess estimation -- one function per match pattern ---------------------------
# ------------------------------------------------------------------------------
def estimate_guesses(match, password):
if "guesses" in match and match["guesses"]:
return match["guesses"] # a match's guess estimate doesn't change. cache it.
min_guesses = 1
if len(match["token"]) < len(password):
if len(match["token"]) == 1:
min_guesses = MIN_SUBMATCH_GUESSES_SINGLE_CHAR
else:
min_guesses = MIN_SUBMATCH_GUESSES_MULTI_CHAR
estimation_functions = {
"bruteforce": bruteforce_guesses,
"dictionary": dictionary_guesses,
"spatial": spatial_guesses,
"repeat": repeat_guesses,
"sequence": sequence_guesses,
"regex": regex_guesses,
"date": date_guesses
}
guesses = estimation_functions[match["pattern"]](match)
if not isinstance(guesses, (int, float)):
print("hoge")
match["guesses"] = max(guesses, min_guesses)
match["guesses_log10"] = log10(match["guesses"])
return match["guesses"]
def bruteforce_guesses(match):
guesses = math.pow(BRUTEFORCE_CARDINALITY, len(match["token"]))
# small detail: make bruteforce matches at minimum one guess bigger than smallest allowed
# submatch guesses, such that non-bruteforce submatches over the same [i..j] take precidence.
if len(match["token"]) == 1:
min_guesses = MIN_SUBMATCH_GUESSES_SINGLE_CHAR + 1
else:
min_guesses = MIN_SUBMATCH_GUESSES_MULTI_CHAR + 1
return max(guesses, min_guesses)
def repeat_guesses(match):
return match["base_guesses"] * match["repeat_count"]
def sequence_guesses(match):
first_chr = match["token"][0]
# lower guesses for obvious starting points
if first_chr in ["a", "A", "z", "Z", "0", "1", "9"]:
base_guesses = 4
else:
if first_chr.isdigit():
base_guesses = 10 # digits
else:
# could give a higher base for uppercase,
# assigning 26 to both upper and lower sequences is more conservative.
base_guesses = 26
if not match["ascending"]:
# need to try a descending sequence in addition to every ascending sequence ->
# 2x guesses
base_guesses *= 2
return base_guesses * len(match["token"])
MIN_YEAR_SPACE = 20
def regex_guesses(match):
char_class_bases = {
"alpha_lower": 26,
"alpha_upper": 26,
"alpha": 52,
"alphanumeric": 62,
"digits": 10,
"symbols": 33
}
if "regex_name" in match and match["regex_name"] in char_class_bases:
return math.pow(char_class_bases[match["regex_name"]], len(match["token"]))
elif "regex_name" in match and match["regex_name"] == "recent_year":
# conservative estimate of year space: num years from REFERENCE_YEAR.
# if year is close to REFERENCE_YEAR, estimate a year space of MIN_YEAR_SPACE.
year_space = math.fabs(int(match["regex_match"].group(0))) - REFERENCE_YEAR
year_space = max(year_space, MIN_YEAR_SPACE)
return year_space
def date_guesses(match):
# base guesses: (year distance from REFERENCE_YEAR) * num_days * num_years
year_space = max(math.fabs(match["year"] - REFERENCE_YEAR), MIN_YEAR_SPACE)
guesses = year_space * 31 * 12
# double for four-digit years
if "has_full_year" in match and match["has_full_year"]:
guesses *= 2
# add factor of 4 for separator selection (one of ~4 choices)
if "separator" in match and match["separator"]:
guesses *= 4
return guesses
KEYBOARD_AVERAGE_DEGREE = calc_average_degree(adjacency_graphs["qwerty"])
# slightly different for keypad/mac keypad, but close enough
KEYPAD_AVERAGE_DEGREE = calc_average_degree(adjacency_graphs["keypad"])
KEYBOARD_STARTING_POSITIONS = len([k for k, v in adjacency_graphs["qwerty"].items()])
KEYPAD_STARTING_POSITIONS = len([k for k, v in adjacency_graphs["keypad"].items()])
def spatial_guesses(match):
if "graph" in match and match["graph"] in ['qwerty', 'dvorak']:
s = KEYBOARD_STARTING_POSITIONS
d = KEYBOARD_AVERAGE_DEGREE
else:
s = KEYPAD_STARTING_POSITIONS
d = KEYPAD_AVERAGE_DEGREE
guesses = 0
L = len(match["token"])
t = match["turns"]
# estimate the number of possible patterns w/ length L or less with t turns or less.
for i in range(2, L + 1):
possible_turns = min(t, i - 1)
for j in range(1, possible_turns + 1):
guesses += binom(i - 1, j - 1) * s * math.pow(d, j)
# add extra guesses for shifted keys. (% instead of 5, A instead of a.)
# math is similar to extra guesses of l33t substitutions in dictionary matches.
if "shifted_count" in match and match["shifted_count"]:
S = match["shifted_count"]
U = len(match["token"]) - match["shifted_count"] # unshifted count
if S == 0 or U == 0:
guesses *= 2
else:
shifted_variations = 0
for i in range(1, min(S, U) + 1):
shifted_variations += binom(S + U, i)
guesses *= shifted_variations
return guesses
def dictionary_guesses(match):
match["base_guesses"] = match["rank"] # keep these as properties for display purposes
match["uppercase_variations"] = uppercase_variations(match)
match["l33t_variations"] = l33t_variations(match)
reversed_variations = 2 if "reversed" in match and match["reversed"] else 1
return match["base_guesses"] * match["uppercase_variations"] * match["l33t_variations"] * reversed_variations
START_UPPER = r"^[A-Z][^A-Z]+$"
END_UPPER = r"^[^A-Z]+[A-Z]$"
ALL_UPPER = r"^[^a-z]+$"
ALL_LOWER = r"^[^A-Z]+$"
NO_LETTER = r"^$"
def uppercase_variations(match):
word = match["token"]
if re.search(ALL_LOWER, word) or re.search(NO_LETTER, word):
return 1
# a capitalized word is the most common capitalization scheme,
# so it only doubles the search space (uncapitalized + capitalized).
# allcaps and end-capitalized are common enough too, underestimate as 2x factor to be safe.
for regex in [START_UPPER, END_UPPER, ALL_UPPER]:
if re.search(regex, word):
return 2
# otherwise calculate the number of ways to capitalize U+L uppercase+lowercase letters
# with U uppercase letters or less. or, if there's more uppercase than lower (for eg. PASSwORD),
# the number of ways to lowercase U+L letters with L lowercase letters or less.
U = len([c for c in word if re.match(u"[A-Z]", c)])
L = len([c for c in word if re.match(u"[a-z]", c)])
variations = 0
for i in range(1, min(U, L) + 1):
variations += binom(U + L, i)
return variations
def l33t_variations(match):
if "l33t" not in match or not match["l33t"]:
return 1
variations = 1
for subbed, unsubbed in match["sub"].items():
# lower-case match.token before calculating: capitalization shouldn't affect l33t calc.
c_list = match["token"].lower()
num_subbed = len([c for c in c_list if c == subbed])
num_unsubbed = len([c for c in c_list if c == unsubbed])
if num_subbed == 0 or num_unsubbed == 0:
# for this sub, password is either fully subbed (444) or fully unsubbed (aaa)
# treat that as doubling the space (attacker needs to try fully subbed chars in addition to
# unsubbed.)
variations *= 2
else:
# this case is similar to capitalization:
# with aa44a, U = 3, S = 2, attacker needs to try unsubbed + one sub + two subs
p = min(num_unsubbed, num_subbed)
possibilities = 0
for i in range(1, p+1):
possibilities += binom(num_unsubbed + num_subbed, i)
variations *= possibilities
return variations
|
|
#===============================================================================
# Copyright (c) 2018, Max Zwiessele
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of paramz.tests.model_tests nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
import unittest
import numpy as np
from paramz.core import HierarchyError
from paramz import transformations
from paramz.parameterized import Parameterized
from paramz.param import Param, ParamConcatenation
from paramz.model import Model
from unittest.case import SkipTest
from paramz.tests.parameterized_tests import P, M
class ModelTest(unittest.TestCase):
def setUp(self):
self.testmodel = M('testmodel')
self.testmodel.kern = P('rbf')
self.testmodel.likelihood = P('Gaussian_noise', variance=Param('variance', np.random.uniform(0.1, 0.5), transformations.Logexp()))
self.testmodel.link_parameter(self.testmodel.kern)
self.testmodel.link_parameter(self.testmodel.likelihood)
variance=Param('variance', np.random.uniform(0.1, 0.5), transformations.Logexp())
lengthscale=Param('lengthscale', np.random.uniform(.1, 1, 1), transformations.Logexp())
self.testmodel.kern.variance = variance
self.testmodel.kern.lengthscale = lengthscale
self.testmodel.kern.link_parameter(lengthscale)
self.testmodel.kern.link_parameter(variance)
self.testmodel.trigger_update()
#=============================================================================
# GP_regression. | Value | Constraint | Prior | Tied to
# rbf.variance | 1.0 | +ve | |
# rbf.lengthscale | 1.0 | +ve | |
# Gaussian_noise.variance | 1.0 | +ve | |
#=============================================================================
def test_pydot(self):
try:
import pydot
G = self.testmodel.build_pydot()
testmodel_node_labels = set(['testmodel',
'lengthscale',
'variance',
'Cacher(heres_johnny)\n limit=5\n \\#cached=1',
'rbf',
'Cacher(heres_johnny)\n limit=5\n \\#cached=1',
'Gaussian_noise',
'variance'])
testmodel_edges = set([tuple(e) for e in [['variance', 'Gaussian_noise'],
['Gaussian_noise', 'Cacher(heres_johnny)\n limit=5\n \\#cached=1'],
['rbf', 'rbf'],
['Gaussian_noise', 'variance'],
['testmodel', 'Gaussian_noise'],
['lengthscale', 'rbf'],
['rbf', 'lengthscale'],
['rbf', 'testmodel'],
['variance', 'rbf'],
['testmodel', 'rbf'],
['testmodel', 'testmodel'],
['Gaussian_noise', 'testmodel'],
['Gaussian_noise', 'Gaussian_noise'],
['rbf', 'variance'],
['rbf', 'Cacher(heres_johnny)\n limit=5\n \\#cached=1']]])
self.assertSetEqual(set([n.get_label() for n in G.get_nodes()]), testmodel_node_labels)
edges = set()
for e in G.get_edges():
points = e.obj_dict['points']
edges.add(tuple(G.get_node(p)[0].get_label() for p in points))
self.assertSetEqual(edges, testmodel_edges)
except ImportError:
raise SkipTest("pydot not available")
def test_optimize_preferred(self):
self.testmodel.update_toggle()
self.testmodel.optimize(messages=True, xtol=0, ftol=0, gtol=1e-6, bfgs_factor=1)
self.testmodel.optimize(messages=False)
np.testing.assert_array_less(self.testmodel.gradient, np.ones(self.testmodel.size)*1e-2)
def test_optimize_scg(self):
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.testmodel.optimize('scg', messages=1, max_f_eval=10, max_iters=100)
self.testmodel.optimize('scg', messages=0, xtol=0, ftol=0, gtol=1e-6, max_iters=2)
self.testmodel.optimize('scg', messages=0, xtol=0, ftol=20, gtol=0, max_iters=2)
self.testmodel.optimize('scg', messages=0, xtol=20, ftol=0, gtol=0, max_iters=2)
np.testing.assert_array_less(self.testmodel.gradient, np.ones(self.testmodel.size)*1e-1)
def test_optimize_tnc(self):
from paramz.optimization.optimization import opt_tnc
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.testmodel.optimize_restarts(1, messages=1, optimizer=opt_tnc(), verbose=False)
self.testmodel.optimize('tnc', messages=1, xtol=0, ftol=0, gtol=1e-6)
np.testing.assert_array_less(self.testmodel.gradient, np.ones(self.testmodel.size)*1e-2)
# self.assertDictEqual(self.testmodel.optimization_runs[-1].__getstate__(), {})
def test_optimize_rprop(self):
try:
import climin
except ImportError:
raise SkipTest("climin not installed, skipping test")
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.testmodel.optimize('rprop', messages=1)
np.testing.assert_array_less(self.testmodel.gradient, np.ones(self.testmodel.size)*1e-2)
def test_optimize_ada(self):
try:
import climin
except ImportError:
raise SkipTest("climin not installed, skipping test")
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.testmodel.trigger_update()
self.testmodel.optimize('adadelta', messages=1, step_rate=1, momentum=1)
np.testing.assert_array_less(self.testmodel.gradient, np.ones(self.testmodel.size)*1e-2)
def test_optimize_adam(self):
try:
import climin
except ImportError:
raise SkipTest("climin not installed, skipping test")
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.testmodel.trigger_update()
self.testmodel.optimize('adam', messages=1, step_rate=1., momentum=1.)
np.testing.assert_array_less(self.testmodel.gradient, np.ones(self.testmodel.size)*1e-2)
def test_optimize_org_bfgs(self):
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
with np.errstate(divide='ignore'):
self.testmodel.optimize_restarts(1, messages=0, optimizer='org-bfgs', xtol=0, ftol=0, gtol=1e-6)
self.testmodel.optimize(messages=1, optimizer='org-bfgs')
np.testing.assert_array_less(self.testmodel.gradient, np.ones(self.testmodel.size)*1e-2)
def test_optimize_fix(self):
self.testmodel.fix()
self.assertTrue(self.testmodel.checkgrad())
self.assertTrue(self.testmodel.checkgrad(1))
self.testmodel.optimize(messages=1)
def test_optimize_cgd(self):
self.assertRaises(KeyError, self.testmodel.optimize, 'cgd', messages=1)
def test_optimize_simplex(self):
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.testmodel.optimize('simplex', messages=1, xtol=0, ftol=0, gtol=1e-6)
self.testmodel.optimize('simplex', start=self.testmodel.optimizer_array, messages=0)
np.testing.assert_array_less(self.testmodel.gradient, np.ones(self.testmodel.size)*1e-2)
def test_optimize_error(self):
class M(Model):
def __init__(self, name, **kwargs):
super(M, self).__init__(name=name)
for k, val in kwargs.items():
self.__setattr__(k, val)
self.link_parameter(self.__getattribute__(k))
self._allowed_failures = 1
def objective_function(self):
raise ValueError('Some error occured')
def log_likelihood(self):
raise ValueError('Some error occured')
def parameters_changed(self):
#self._obj = (self.param_array**2).sum()
self.gradient[:] = 2*self.param_array
testmodel = M("test", var=Param('test', np.random.normal(0,1,(20))))
testmodel.optimize_restarts(2, messages=0, optimizer='org-bfgs', xtol=0, ftol=0, gtol=1e-6, robust=True)
self.assertRaises(ValueError, testmodel.optimize_restarts, 1, messages=0, optimizer='org-bfgs', xtol=0, ftol=0, gtol=1e-6, robust=False)
def test_optimize_restarts(self):
m = self.testmodel.copy()
m.optimize_restarts(2, messages=0, xtol=0, ftol=0, gtol=1e-6, robust=False)
np.testing.assert_array_less(m.gradient, np.ones(self.testmodel.size)*1e-2)
self.assertIs(len(m.optimization_runs), 2)
def test_optimize_restarts_parallel(self):
m = self.testmodel.copy()
m.optimize_restarts(2, messages=0, xtol=0, ftol=0, gtol=1e-6, robust=False, parallel=True)
np.testing.assert_array_less(m.gradient, np.ones(self.testmodel.size) * 1e-2)
self.assertIs(len(m.optimization_runs), 2)
def test_raveled_index(self):
self.assertListEqual(self.testmodel._raveled_index_for(self.testmodel['.*variance']).tolist(), [1, 2])
self.assertListEqual(self.testmodel.kern.lengthscale._raveled_index_for(None).tolist(), [0])
def test_constraints_testmodel(self):
self.testmodel['.*rbf'].constrain_negative()
self.assertListEqual(self.testmodel.constraints[transformations.NegativeLogexp()].tolist(), [0,1])
self.testmodel['.*lengthscale'].constrain_bounded(0,1)
self.assertListEqual(self.testmodel.constraints[transformations.NegativeLogexp()].tolist(), [1])
self.assertListEqual(self.testmodel.constraints[transformations.Logistic(0, 1)].tolist(), [0])
self.testmodel[''].unconstrain_negative()
self.assertListEqual(self.testmodel.constraints[transformations.NegativeLogexp()].tolist(), [])
self.assertListEqual(self.testmodel.constraints[transformations.Logistic(0, 1)].tolist(), [0])
self.testmodel['.*lengthscale'].unconstrain_bounded(0,1)
self.assertListEqual(self.testmodel.constraints[transformations.Logistic(0, 1)].tolist(), [])
def test_constraints_set_direct(self):
self.testmodel['.*rbf'].constrain_negative()
self.testmodel['.*lengthscale'].constrain_bounded(0,1)
self.testmodel['.*variance'].fix()
self.assertListEqual(self.testmodel.constraints[transformations.__fixed__].tolist(), [1,2])
self.assertListEqual(self.testmodel.constraints[transformations.Logistic(0,1)].tolist(), [0])
self.assertListEqual(self.testmodel.constraints[transformations.NegativeLogexp()].tolist(), [1])
cache_constraints = self.testmodel.constraints.copy()
self.testmodel.unconstrain()
self.testmodel.likelihood.fix()
self.assertListEqual(self.testmodel._fixes_.tolist(), [transformations.UNFIXED, transformations.UNFIXED, transformations.FIXED])
self.assertListEqual(self.testmodel.constraints[transformations.__fixed__].tolist(), [2])
self.assertListEqual(self.testmodel.constraints[transformations.Logistic(0,1)].tolist(), [])
self.assertListEqual(self.testmodel.constraints[transformations.NegativeLogexp()].tolist(), [])
self.testmodel.constraints = cache_constraints
self.assertListEqual(self.testmodel.constraints[transformations.__fixed__].tolist(), [1,2])
self.assertListEqual(self.testmodel.constraints[transformations.Logistic(0,1)].tolist(), [0])
self.assertListEqual(self.testmodel.constraints[transformations.NegativeLogexp()].tolist(), [1])
self.assertListEqual(self.testmodel._fixes_.tolist(), [transformations.UNFIXED, transformations.FIXED, transformations.FIXED])
self.assertIs(self.testmodel.constraints, self.testmodel.likelihood.constraints._param_index_ops)
self.assertIs(self.testmodel.constraints, self.testmodel.kern.constraints._param_index_ops)
#self.assertSequenceEqual(cache_str, str(self.testmodel), None, str)
def test_updates(self):
val = float(self.testmodel.objective_function())
self.testmodel.update_toggle()
self.testmodel.kern.randomize(np.random.normal, loc=1, scale=.2)
self.testmodel.likelihood.randomize()
self.assertEqual(val, self.testmodel.objective_function())
self.testmodel.update_model(True)
self.assertNotEqual(val, self.testmodel.objective_function())
def test_set_gradients(self):
self.testmodel.gradient = 10.
np.testing.assert_array_equal(self.testmodel.gradient, 10.)
self.testmodel.kern.lengthscale.gradient = 15
np.testing.assert_array_equal(self.testmodel.gradient, [15., 10., 10.])
def test_fixing_optimize(self):
self.testmodel.kern.lengthscale.fix()
val = float(self.testmodel.kern.lengthscale)
self.testmodel.randomize()
self.assertEqual(val, self.testmodel.kern.lengthscale)
self.testmodel.optimize(max_iters=2)
def test_regular_expression_misc(self):
self.assertTrue(self.testmodel[''].checkgrad())
self.testmodel['.*rbf'][:] = 10
self.testmodel[''][2] = 11
np.testing.assert_array_equal(self.testmodel.param_array, [10,10,11])
np.testing.assert_((self.testmodel[''][:2] == [10,10]).all())
self.testmodel.kern.lengthscale.fix()
val = float(self.testmodel.kern.lengthscale)
self.testmodel.randomize()
self.assertEqual(val, self.testmodel.kern.lengthscale)
variances = self.testmodel['.*var'].values()
self.testmodel['.*var'].fix()
self.testmodel.randomize()
np.testing.assert_equal(variances, self.testmodel['.*var'].values())
self.testmodel[''] = 1.0
self.maxDiff = None
self.testmodel[''].unconstrain()
self.assertSequenceEqual(self.testmodel[''].__str__(VT100=False), " index | testmodel.rbf.lengthscale | constraints\n [0] | 1.00000000 | \n ----- | testmodel.rbf.variance | -----------\n [0] | 1.00000000 | \n ----- | testmodel.Gaussian_noise.variance | -----------\n [0] | 1.00000000 | ")
def test_fix_unfix(self):
default_constraints = dict(self.testmodel.constraints.items())
self.testmodel['.*lengthscale'].fix()
fixed = self.testmodel.constraints[transformations.__fixed__]
self.assertListEqual(fixed.tolist(), [0])
unfixed = self.testmodel.kern.lengthscale.unfix()
self.testmodel['.*lengthscale'].constrain_positive()
self.assertListEqual(unfixed.tolist(), [0])
fixed = self.testmodel['.*rbf'].fix()
fixed = self.testmodel.constraints[transformations.__fixed__]
self.assertListEqual(fixed.tolist(), [0,1])
unfixed = self.testmodel.kern.unfix()
self.assertListEqual(unfixed.tolist(), [0,1])
fixed = self.testmodel.constraints[transformations.__fixed__]
self.testmodel['.*rbf'].unfix()
np.testing.assert_array_equal(fixed, self.testmodel.constraints[transformations.__fixed__])
#print default_constraints
test_constraints = dict(self.testmodel.constraints.items())
for k in default_constraints:
np.testing.assert_array_equal(default_constraints[k], test_constraints[k])
def test_fix_unfix_constraints(self):
self.testmodel.constrain_bounded(0,1)
self.testmodel['.*variance'].constrain(transformations.Logexp())
self.testmodel['.*Gauss'].constrain_bounded(0.3, 0.7)
before_constraints = dict(self.testmodel.constraints.items())
self.testmodel.fix()
test_constraints = dict(self.testmodel.constraints.items())
for k in before_constraints:
np.testing.assert_array_equal(before_constraints[k], test_constraints[k])
np.testing.assert_array_equal(test_constraints[transformations.__fixed__], [0,1,2])
# Assert fixing works and does not randomize the - say - lengthscale:
val = float(self.testmodel.kern.lengthscale)
self.testmodel.randomize()
self.assertEqual(val, self.testmodel.kern.lengthscale)
self.testmodel.unfix()
test_constraints = dict(self.testmodel.constraints.items())
for k in before_constraints:
np.testing.assert_array_equal(before_constraints[k], test_constraints[k])
def test_fix_constrain(self):
# save the constraints as they where:
before_constraints = dict(self.testmodel.constraints.items())
# fix
self.testmodel.fix()
test_constraints = dict(self.testmodel.constraints.items())
# make sure fixes are in place:
np.testing.assert_array_equal(test_constraints[transformations.__fixed__], [0,1,2])
# make sure, the constraints still exist
for k in before_constraints:
np.testing.assert_array_equal(before_constraints[k], test_constraints[k])
# override fix and previous constraint:
self.testmodel.likelihood.constrain_bounded(0,1)
# lik not fixed anymore
np.testing.assert_array_equal(self.testmodel.constraints[transformations.__fixed__], [0,1])
# previous constraints still in place:
np.testing.assert_array_equal(self.testmodel.constraints[transformations.Logexp()], [0,1])
# lik bounded
np.testing.assert_array_equal(self.testmodel.constraints[transformations.Logistic(0,1)], [2])
def test_caching_offswitch(self):
self.assertEqual(len(self.testmodel.kern.cache), 1)
[self.assertEqual(len(c.cached_outputs), 1) for c in self.testmodel.kern.cache.values()]
self.testmodel.disable_caching()
self.testmodel.trigger_update()
[self.assertFalse(c.cacher_enabled) for c in self.testmodel.kern.cache.values()]
self.assertFalse(self.testmodel.kern.cache.caching_enabled)
self.assertFalse(self.testmodel.likelihood.cache.caching_enabled)
self.assertTrue(self.testmodel.checkgrad())
self.assertEqual(len(self.testmodel.kern.cache), 1)
[self.assertEqual(len(c.cached_outputs), 0) for c in self.testmodel.kern.cache.values()]
self.testmodel.enable_caching()
self.testmodel.trigger_update()
self.assertEqual(len(self.testmodel.kern.cache), 1)
[self.assertEqual(len(c.cached_outputs), 1) for c in self.testmodel.kern.cache.values()]
def test_checkgrad(self):
self.assertTrue(self.testmodel.checkgrad(1))
self.assertTrue(self.testmodel.checkgrad())
self.assertTrue(self.testmodel.rbf.variance.checkgrad(1))
self.assertTrue(self.testmodel.rbf.variance.checkgrad())
self.assertTrue(self.testmodel._checkgrad(verbose=1))
self.assertTrue(self.testmodel._checkgrad(verbose=0))
def test_printing(self):
print(self.testmodel.hierarchy_name(False))
self.assertEqual(self.testmodel.num_params, 2)
self.assertEqual(self.testmodel.kern.lengthscale.num_params, 0)
def test_hierarchy_error(self):
self.assertRaises(HierarchyError, self.testmodel.link_parameter, self.testmodel.parameters[0])
p2 = P('Gaussian_noise', variance=Param('variance', np.random.uniform(0.1, 0.5), transformations.Logexp()))
self.testmodel.link_parameter(p2.variance)
self.assertTrue(self.testmodel.checkgrad())
self.assertRaises(HierarchyError, self.testmodel.unlink_parameter, p2)
self.assertRaises(HierarchyError, self.testmodel.unlink_parameter, 'not a parameter')
def test_set_get(self):
self.testmodel.likelihood.variance = 10
self.assertIsInstance(self.testmodel.likelihood.variance, Param)
np.testing.assert_array_equal(self.testmodel.likelihood[:], [10])
def test_get_by_name(self):
self.testmodel.likelihood.variance = 10
self.assertIsInstance(self.testmodel.likelihood.variance, Param)
np.testing.assert_array_equal(self.testmodel.likelihood[:], [10])
def test_likelihood_replicate(self):
m = self.testmodel
m2 = self.testmodel.copy(memo={})
np.testing.assert_array_equal(self.testmodel[:], m2[:])
np.testing.assert_equal(m.log_likelihood(), m2.log_likelihood())
m.randomize()
m2[:] = m[''].values()
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
m.randomize()
m2[''] = m[:]
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
m.randomize()
m2[:] = m[:]
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
m.randomize()
m2[''] = m['']
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
m.kern.lengthscale.randomize()
m2[:] = m[:]
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
m.Gaussian_noise.randomize()
m2[:] = m[:]
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
m['.*var'] = 2
m2['.*var'] = m['.*var']
np.testing.assert_almost_equal(m.log_likelihood(), m2.log_likelihood())
np.testing.assert_array_equal(self.testmodel[''].values(), m2[''].values())
np.testing.assert_array_equal(self.testmodel[:], m2[''].values())
np.testing.assert_array_equal(self.testmodel[''].values(), m2[:])
np.testing.assert_array_equal(self.testmodel['.*variance'].values(), m2['.*variance'].values())
np.testing.assert_array_equal(self.testmodel['.*len'].values, m2['.*len'].values)
np.testing.assert_array_equal(self.testmodel['.*rbf'].values(), m2['.*rbf'].values())
def test_set_empty(self):
pars = self.testmodel[:].copy()
self.testmodel.rbf[:] = None
np.testing.assert_array_equal(self.testmodel[:], pars)
def test_set_error(self):
self.assertRaises(ValueError, self.testmodel.__setitem__, slice(None), 'test')
def test_empty_parameterized(self):
#print(ParamConcatenation([self.testmodel.rbf, self.testmodel.likelihood.variance]))
self.testmodel.name = 'anothername'
self.testmodel.link_parameter(Parameterized('empty'))
hmm = Parameterized('test')
self.testmodel.kern.test = hmm
self.testmodel.kern.link_parameter(hmm)
self.testmodel.kern.test.link_parameter(Param('test1',1))
self.assertIsInstance(self.testmodel['.*test1$'], Param)
self.assertIsInstance(self.testmodel['.*test$'], Parameterized)
self.assertIsInstance(self.testmodel['.*empty'], Parameterized)
self.assertIsInstance(self.testmodel['.*test'], ParamConcatenation)
self.assertIsInstance(self.testmodel['.*rbf$'], Parameterized)
self.assertIs(self.testmodel['rbf.variance'], self.testmodel.rbf.variance)
self.assertIs(self.testmodel['rbf$'], self.testmodel.rbf)
def test_likelihood_set(self):
m = self.testmodel
m2 = self.testmodel.copy()
np.testing.assert_equal(m.log_likelihood(), m2.log_likelihood())
m.kern.lengthscale.randomize()
m2.kern.lengthscale = m.kern.lengthscale
np.testing.assert_equal(m.log_likelihood(), m2.log_likelihood())
m.kern.lengthscale.randomize()
m2['.*lengthscale'] = m.kern.lengthscale
np.testing.assert_equal(m.log_likelihood(), m2.log_likelihood())
m.kern.lengthscale.randomize()
m2['.*lengthscale'] = m.kern['.*lengthscale']
np.testing.assert_equal(m.log_likelihood(), m2.log_likelihood())
m.kern.lengthscale.randomize()
m2.kern.lengthscale = m.kern['.*lengthscale']
np.testing.assert_equal(m.log_likelihood(), m2.log_likelihood())
np.testing.assert_array_equal(self.testmodel[''].values(), m2[''].values())
np.testing.assert_array_equal(self.testmodel[:], m2[''].values())
np.testing.assert_array_equal(self.testmodel[''].values(), m2[:])
np.testing.assert_array_equal(self.testmodel['.*variance'].values(), m2['.*variance'].values())
np.testing.assert_array_equal(self.testmodel['.*len'], m2['.*len'])
np.testing.assert_array_equal(self.testmodel['.*rbf'][0], m2['.*rbf'][0])
np.testing.assert_array_equal(self.testmodel['.*rbf'][1], m2['.*rbf'][1])
|
|
import types
import weakref
from .lock import allocate_lock
class BaseTypeByIdentity(object):
is_array_type = False
is_raw_function = False
def get_c_name(self, replace_with='', context='a C file'):
result = self.c_name_with_marker
assert result.count('&') == 1
# some logic duplication with ffi.getctype()... :-(
replace_with = replace_with.strip()
if replace_with:
if replace_with.startswith('*') and '&[' in result:
replace_with = '(%s)' % replace_with
elif not replace_with[0] in '[(':
replace_with = ' ' + replace_with
result = result.replace('&', replace_with)
if '$' in result:
from .ffiplatform import VerificationError
raise VerificationError(
"cannot generate '%s' in %s: unknown type name"
% (self._get_c_name(), context))
return result
def _get_c_name(self):
return self.c_name_with_marker.replace('&', '')
def has_c_name(self):
return '$' not in self._get_c_name()
def get_cached_btype(self, ffi, finishlist, can_delay=False):
try:
BType = ffi._cached_btypes[self]
except KeyError:
BType = self.build_backend_type(ffi, finishlist)
BType2 = ffi._cached_btypes.setdefault(self, BType)
assert BType2 is BType
return BType
def __repr__(self):
return '<%s>' % (self._get_c_name(),)
def _get_items(self):
return [(name, getattr(self, name)) for name in self._attrs_]
class BaseType(BaseTypeByIdentity):
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self._get_items() == other._get_items())
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.__class__, tuple(self._get_items())))
class VoidType(BaseType):
_attrs_ = ()
def __init__(self):
self.c_name_with_marker = 'void&'
def build_backend_type(self, ffi, finishlist):
return global_cache(self, ffi, 'new_void_type')
void_type = VoidType()
class PrimitiveType(BaseType):
_attrs_ = ('name',)
ALL_PRIMITIVE_TYPES = {
'char': 'c',
'short': 'i',
'int': 'i',
'long': 'i',
'long long': 'i',
'signed char': 'i',
'unsigned char': 'i',
'unsigned short': 'i',
'unsigned int': 'i',
'unsigned long': 'i',
'unsigned long long': 'i',
'float': 'f',
'double': 'f',
'long double': 'f',
'_Bool': 'i',
# the following types are not primitive in the C sense
'wchar_t': 'c',
'int8_t': 'i',
'uint8_t': 'i',
'int16_t': 'i',
'uint16_t': 'i',
'int32_t': 'i',
'uint32_t': 'i',
'int64_t': 'i',
'uint64_t': 'i',
'intptr_t': 'i',
'uintptr_t': 'i',
'ptrdiff_t': 'i',
'size_t': 'i',
'ssize_t': 'i',
}
def __init__(self, name):
assert name in self.ALL_PRIMITIVE_TYPES
self.name = name
self.c_name_with_marker = name + '&'
def is_char_type(self):
return self.ALL_PRIMITIVE_TYPES[self.name] == 'c'
def is_integer_type(self):
return self.ALL_PRIMITIVE_TYPES[self.name] == 'i'
def is_float_type(self):
return self.ALL_PRIMITIVE_TYPES[self.name] == 'f'
def build_backend_type(self, ffi, finishlist):
return global_cache(self, ffi, 'new_primitive_type', self.name)
class BaseFunctionType(BaseType):
_attrs_ = ('args', 'result', 'ellipsis')
def __init__(self, args, result, ellipsis):
self.args = args
self.result = result
self.ellipsis = ellipsis
#
reprargs = [arg._get_c_name() for arg in self.args]
if self.ellipsis:
reprargs.append('...')
reprargs = reprargs or ['void']
replace_with = self._base_pattern % (', '.join(reprargs),)
self.c_name_with_marker = (
self.result.c_name_with_marker.replace('&', replace_with))
class RawFunctionType(BaseFunctionType):
# Corresponds to a C type like 'int(int)', which is the C type of
# a function, but not a pointer-to-function. The backend has no
# notion of such a type; it's used temporarily by parsing.
_base_pattern = '(&)(%s)'
is_raw_function = True
def build_backend_type(self, ffi, finishlist):
from . import api
raise api.CDefError("cannot render the type %r: it is a function "
"type, not a pointer-to-function type" % (self,))
def as_function_pointer(self):
return FunctionPtrType(self.args, self.result, self.ellipsis)
class FunctionPtrType(BaseFunctionType):
_base_pattern = '(*&)(%s)'
def build_backend_type(self, ffi, finishlist):
result = self.result.get_cached_btype(ffi, finishlist)
args = []
for tp in self.args:
args.append(tp.get_cached_btype(ffi, finishlist))
return global_cache(self, ffi, 'new_function_type',
tuple(args), result, self.ellipsis)
class PointerType(BaseType):
_attrs_ = ('totype',)
_base_pattern = " *&"
_base_pattern_array = "(*&)"
def __init__(self, totype):
self.totype = totype
if totype.is_array_type:
extra = self._base_pattern_array
else:
extra = self._base_pattern
self.c_name_with_marker = totype.c_name_with_marker.replace('&', extra)
def build_backend_type(self, ffi, finishlist):
BItem = self.totype.get_cached_btype(ffi, finishlist, can_delay=True)
return global_cache(self, ffi, 'new_pointer_type', BItem)
voidp_type = PointerType(void_type)
class ConstPointerType(PointerType):
_base_pattern = " const *&"
_base_pattern_array = "(const *&)"
const_voidp_type = ConstPointerType(void_type)
class NamedPointerType(PointerType):
_attrs_ = ('totype', 'name')
def __init__(self, totype, name):
PointerType.__init__(self, totype)
self.name = name
self.c_name_with_marker = name + '&'
class ArrayType(BaseType):
_attrs_ = ('item', 'length')
is_array_type = True
def __init__(self, item, length):
self.item = item
self.length = length
#
if length is None:
brackets = '&[]'
elif length == '...':
brackets = '&[/*...*/]'
else:
brackets = '&[%d]' % length
self.c_name_with_marker = (
self.item.c_name_with_marker.replace('&', brackets))
def resolve_length(self, newlength):
return ArrayType(self.item, newlength)
def build_backend_type(self, ffi, finishlist):
if self.length == '...':
from . import api
raise api.CDefError("cannot render the type %r: unknown length" %
(self,))
self.item.get_cached_btype(ffi, finishlist) # force the item BType
BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist)
return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length)
char_array_type = ArrayType(PrimitiveType('char'), None)
class StructOrUnionOrEnum(BaseTypeByIdentity):
_attrs_ = ('name',)
forcename = None
def build_c_name_with_marker(self):
name = self.forcename or '%s %s' % (self.kind, self.name)
self.c_name_with_marker = name + '&'
def force_the_name(self, forcename):
self.forcename = forcename
self.build_c_name_with_marker()
def get_official_name(self):
assert self.c_name_with_marker.endswith('&')
return self.c_name_with_marker[:-1]
class StructOrUnion(StructOrUnionOrEnum):
fixedlayout = None
completed = False
partial = False
packed = False
def __init__(self, name, fldnames, fldtypes, fldbitsize):
self.name = name
self.fldnames = fldnames
self.fldtypes = fldtypes
self.fldbitsize = fldbitsize
self.build_c_name_with_marker()
def enumfields(self):
for name, type, bitsize in zip(self.fldnames, self.fldtypes,
self.fldbitsize):
if name == '' and isinstance(type, StructOrUnion):
# nested anonymous struct/union
for result in type.enumfields():
yield result
else:
yield (name, type, bitsize)
def force_flatten(self):
# force the struct or union to have a declaration that lists
# directly all fields returned by enumfields(), flattening
# nested anonymous structs/unions.
names = []
types = []
bitsizes = []
for name, type, bitsize in self.enumfields():
names.append(name)
types.append(type)
bitsizes.append(bitsize)
self.fldnames = tuple(names)
self.fldtypes = tuple(types)
self.fldbitsize = tuple(bitsizes)
def get_cached_btype(self, ffi, finishlist, can_delay=False):
BType = StructOrUnionOrEnum.get_cached_btype(self, ffi, finishlist,
can_delay)
if not can_delay:
self.finish_backend_type(ffi, finishlist)
return BType
def finish_backend_type(self, ffi, finishlist):
if self.completed:
if self.completed != 2:
raise NotImplementedError("recursive structure declaration "
"for '%s'" % (self.name,))
return
BType = ffi._cached_btypes[self]
if self.fldtypes is None:
return # not completing it: it's an opaque struct
#
self.completed = 1
#
if self.fixedlayout is None:
fldtypes = [tp.get_cached_btype(ffi, finishlist)
for tp in self.fldtypes]
lst = list(zip(self.fldnames, fldtypes, self.fldbitsize))
sflags = 0
if self.packed:
sflags = 8 # SF_PACKED
ffi._backend.complete_struct_or_union(BType, lst, self,
-1, -1, sflags)
#
else:
fldtypes = []
fieldofs, fieldsize, totalsize, totalalignment = self.fixedlayout
for i in range(len(self.fldnames)):
fsize = fieldsize[i]
ftype = self.fldtypes[i]
#
if isinstance(ftype, ArrayType) and ftype.length == '...':
# fix the length to match the total size
BItemType = ftype.item.get_cached_btype(ffi, finishlist)
nlen, nrest = divmod(fsize, ffi.sizeof(BItemType))
if nrest != 0:
self._verification_error(
"field '%s.%s' has a bogus size?" % (
self.name, self.fldnames[i] or '{}'))
ftype = ftype.resolve_length(nlen)
self.fldtypes = (self.fldtypes[:i] + (ftype,) +
self.fldtypes[i+1:])
#
BFieldType = ftype.get_cached_btype(ffi, finishlist)
if isinstance(ftype, ArrayType) and ftype.length is None:
assert fsize == 0
else:
bitemsize = ffi.sizeof(BFieldType)
if bitemsize != fsize:
self._verification_error(
"field '%s.%s' is declared as %d bytes, but is "
"really %d bytes" % (self.name,
self.fldnames[i] or '{}',
bitemsize, fsize))
fldtypes.append(BFieldType)
#
lst = list(zip(self.fldnames, fldtypes, self.fldbitsize, fieldofs))
ffi._backend.complete_struct_or_union(BType, lst, self,
totalsize, totalalignment)
self.completed = 2
def _verification_error(self, msg):
from .ffiplatform import VerificationError
raise VerificationError(msg)
def check_not_partial(self):
if self.partial and self.fixedlayout is None:
from . import ffiplatform
raise ffiplatform.VerificationMissing(self._get_c_name())
def build_backend_type(self, ffi, finishlist):
self.check_not_partial()
finishlist.append(self)
#
return global_cache(self, ffi, 'new_%s_type' % self.kind,
self.get_official_name(), key=self)
class StructType(StructOrUnion):
kind = 'struct'
class UnionType(StructOrUnion):
kind = 'union'
class EnumType(StructOrUnionOrEnum):
kind = 'enum'
partial = False
partial_resolved = False
def __init__(self, name, enumerators, enumvalues, baseinttype=None):
self.name = name
self.enumerators = enumerators
self.enumvalues = enumvalues
self.baseinttype = baseinttype
self.build_c_name_with_marker()
def force_the_name(self, forcename):
StructOrUnionOrEnum.force_the_name(self, forcename)
if self.forcename is None:
name = self.get_official_name()
self.forcename = '$' + name.replace(' ', '_')
def check_not_partial(self):
if self.partial and not self.partial_resolved:
from . import ffiplatform
raise ffiplatform.VerificationMissing(self._get_c_name())
def build_backend_type(self, ffi, finishlist):
self.check_not_partial()
base_btype = self.build_baseinttype(ffi, finishlist)
return global_cache(self, ffi, 'new_enum_type',
self.get_official_name(),
self.enumerators, self.enumvalues,
base_btype, key=self)
def build_baseinttype(self, ffi, finishlist):
if self.baseinttype is not None:
return self.baseinttype.get_cached_btype(ffi, finishlist)
#
if self.enumvalues:
smallest_value = min(self.enumvalues)
largest_value = max(self.enumvalues)
else:
smallest_value = 0
largest_value = 0
if smallest_value < 0: # needs a signed type
sign = 1
candidate1 = PrimitiveType("int")
candidate2 = PrimitiveType("long")
else:
sign = 0
candidate1 = PrimitiveType("unsigned int")
candidate2 = PrimitiveType("unsigned long")
btype1 = candidate1.get_cached_btype(ffi, finishlist)
btype2 = candidate2.get_cached_btype(ffi, finishlist)
size1 = ffi.sizeof(btype1)
size2 = ffi.sizeof(btype2)
if (smallest_value >= ((-1) << (8*size1-1)) and
largest_value < (1 << (8*size1-sign))):
return btype1
if (smallest_value >= ((-1) << (8*size2-1)) and
largest_value < (1 << (8*size2-sign))):
return btype2
raise api.CDefError("%s values don't all fit into either 'long' "
"or 'unsigned long'" % self._get_c_name())
def unknown_type(name, structname=None):
if structname is None:
structname = '$%s' % name
tp = StructType(structname, None, None, None)
tp.force_the_name(name)
return tp
def unknown_ptr_type(name, structname=None):
if structname is None:
structname = '*$%s' % name
tp = StructType(structname, None, None, None)
return NamedPointerType(tp, name)
global_lock = allocate_lock()
def global_cache(srctype, ffi, funcname, *args, **kwds):
key = kwds.pop('key', (funcname, args))
assert not kwds
try:
return ffi._backend.__typecache[key]
except KeyError:
pass
except AttributeError:
# initialize the __typecache attribute, either at the module level
# if ffi._backend is a module, or at the class level if ffi._backend
# is some instance.
if isinstance(ffi._backend, types.ModuleType):
ffi._backend.__typecache = weakref.WeakValueDictionary()
else:
type(ffi._backend).__typecache = weakref.WeakValueDictionary()
try:
res = getattr(ffi._backend, funcname)(*args)
except NotImplementedError as e:
raise NotImplementedError("%s: %r: %s" % (funcname, srctype, e))
# note that setdefault() on WeakValueDictionary is not atomic
# and contains a rare bug (http://bugs.python.org/issue19542);
# we have to use a lock and do it ourselves
cache = ffi._backend.__typecache
with global_lock:
res1 = cache.get(key)
if res1 is None:
cache[key] = res
return res
else:
return res1
def pointer_cache(ffi, BType):
return global_cache('?', ffi, 'new_pointer_type', BType)
def attach_exception_info(e, name):
if e.args and type(e.args[0]) is str:
e.args = ('%s: %s' % (name, e.args[0]),) + e.args[1:]
|
|
"""Support for Modbus switches."""
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_COMMAND_OFF, CONF_COMMAND_ON, CONF_NAME, CONF_SLAVE, STATE_ON)
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.restore_state import RestoreEntity
from . import CONF_HUB, DEFAULT_HUB, DOMAIN as MODBUS_DOMAIN
_LOGGER = logging.getLogger(__name__)
CONF_COIL = 'coil'
CONF_COILS = 'coils'
CONF_REGISTER = 'register'
CONF_REGISTER_TYPE = 'register_type'
CONF_REGISTERS = 'registers'
CONF_STATE_OFF = 'state_off'
CONF_STATE_ON = 'state_on'
CONF_VERIFY_REGISTER = 'verify_register'
CONF_VERIFY_STATE = 'verify_state'
REGISTER_TYPE_HOLDING = 'holding'
REGISTER_TYPE_INPUT = 'input'
REGISTERS_SCHEMA = vol.Schema({
vol.Required(CONF_COMMAND_OFF): cv.positive_int,
vol.Required(CONF_COMMAND_ON): cv.positive_int,
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_REGISTER): cv.positive_int,
vol.Optional(CONF_HUB, default=DEFAULT_HUB): cv.string,
vol.Optional(CONF_REGISTER_TYPE, default=REGISTER_TYPE_HOLDING):
vol.In([REGISTER_TYPE_HOLDING, REGISTER_TYPE_INPUT]),
vol.Optional(CONF_SLAVE): cv.positive_int,
vol.Optional(CONF_STATE_OFF): cv.positive_int,
vol.Optional(CONF_STATE_ON): cv.positive_int,
vol.Optional(CONF_VERIFY_REGISTER): cv.positive_int,
vol.Optional(CONF_VERIFY_STATE, default=True): cv.boolean,
})
COILS_SCHEMA = vol.Schema({
vol.Required(CONF_COIL): cv.positive_int,
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_SLAVE): cv.positive_int,
vol.Optional(CONF_HUB, default=DEFAULT_HUB): cv.string,
})
PLATFORM_SCHEMA = vol.All(
cv.has_at_least_one_key(CONF_COILS, CONF_REGISTERS),
PLATFORM_SCHEMA.extend({
vol.Optional(CONF_COILS): [COILS_SCHEMA],
vol.Optional(CONF_REGISTERS): [REGISTERS_SCHEMA],
}))
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Read configuration and create Modbus devices."""
switches = []
if CONF_COILS in config:
for coil in config.get(CONF_COILS):
hub_name = coil.get(CONF_HUB)
hub = hass.data[MODBUS_DOMAIN][hub_name]
switches.append(ModbusCoilSwitch(
hub, coil.get(CONF_NAME), coil.get(CONF_SLAVE),
coil.get(CONF_COIL)))
if CONF_REGISTERS in config:
for register in config.get(CONF_REGISTERS):
hub_name = register.get(CONF_HUB)
hub = hass.data[MODBUS_DOMAIN][hub_name]
switches.append(ModbusRegisterSwitch(
hub,
register.get(CONF_NAME),
register.get(CONF_SLAVE),
register.get(CONF_REGISTER),
register.get(CONF_COMMAND_ON),
register.get(CONF_COMMAND_OFF),
register.get(CONF_VERIFY_STATE),
register.get(CONF_VERIFY_REGISTER),
register.get(CONF_REGISTER_TYPE),
register.get(CONF_STATE_ON),
register.get(CONF_STATE_OFF)))
add_entities(switches)
class ModbusCoilSwitch(ToggleEntity, RestoreEntity):
"""Representation of a Modbus coil switch."""
def __init__(self, hub, name, slave, coil):
"""Initialize the coil switch."""
self._hub = hub
self._name = name
self._slave = int(slave) if slave else None
self._coil = int(coil)
self._is_on = None
async def async_added_to_hass(self):
"""Handle entity which will be added."""
state = await self.async_get_last_state()
if not state:
return
self._is_on = state.state == STATE_ON
@property
def is_on(self):
"""Return true if switch is on."""
return self._is_on
@property
def name(self):
"""Return the name of the switch."""
return self._name
def turn_on(self, **kwargs):
"""Set switch on."""
self._hub.write_coil(self._slave, self._coil, True)
def turn_off(self, **kwargs):
"""Set switch off."""
self._hub.write_coil(self._slave, self._coil, False)
def update(self):
"""Update the state of the switch."""
result = self._hub.read_coils(self._slave, self._coil, 1)
try:
self._is_on = bool(result.bits[0])
except AttributeError:
_LOGGER.error(
'No response from hub %s, slave %s, coil %s',
self._hub.name, self._slave, self._coil)
class ModbusRegisterSwitch(ModbusCoilSwitch):
"""Representation of a Modbus register switch."""
# pylint: disable=super-init-not-called
def __init__(self, hub, name, slave, register, command_on, command_off,
verify_state, verify_register, register_type, state_on,
state_off):
"""Initialize the register switch."""
self._hub = hub
self._name = name
self._slave = slave
self._register = register
self._command_on = command_on
self._command_off = command_off
self._verify_state = verify_state
self._verify_register = (
verify_register if verify_register else self._register)
self._register_type = register_type
if state_on is not None:
self._state_on = state_on
else:
self._state_on = self._command_on
if state_off is not None:
self._state_off = state_off
else:
self._state_off = self._command_off
self._is_on = None
def turn_on(self, **kwargs):
"""Set switch on."""
self._hub.write_register(self._slave, self._register, self._command_on)
if not self._verify_state:
self._is_on = True
def turn_off(self, **kwargs):
"""Set switch off."""
self._hub.write_register(
self._slave, self._register, self._command_off)
if not self._verify_state:
self._is_on = False
def update(self):
"""Update the state of the switch."""
if not self._verify_state:
return
value = 0
if self._register_type == REGISTER_TYPE_INPUT:
result = self._hub.read_input_registers(
self._slave, self._register, 1)
else:
result = self._hub.read_holding_registers(
self._slave, self._register, 1)
try:
value = int(result.registers[0])
except AttributeError:
_LOGGER.error(
"No response from hub %s, slave %s, register %s",
self._hub.name, self._slave, self._verify_register)
if value == self._state_on:
self._is_on = True
elif value == self._state_off:
self._is_on = False
else:
_LOGGER.error(
"Unexpected response from hub %s, slave %s "
"register %s, got 0x%2x",
self._hub.name, self._slave, self._verify_register, value)
|
|
# -*- coding: utf-8 -*-
"""
flask_security.core
~~~~~~~~~~~~~~~~~~~
Flask-Security core module
:copyright: (c) 2012 by Matt Wright.
:copyright: (c) 2017 by CERN.
:copyright: (c) 2017 by ETH Zurich, Swiss Data Science Center.
:license: MIT, see LICENSE for more details.
"""
from datetime import datetime
import pkg_resources
from flask import current_app, render_template
from flask_babelex import Domain
from flask_login import UserMixin as BaseUserMixin
from flask_login import AnonymousUserMixin, LoginManager, current_user
from flask_principal import Identity, Principal, RoleNeed, UserNeed, \
identity_loaded
from itsdangerous import URLSafeTimedSerializer
from passlib.context import CryptContext
from werkzeug.datastructures import ImmutableList
from werkzeug.local import LocalProxy
from .forms import ChangePasswordForm, ConfirmRegisterForm, \
ForgotPasswordForm, LoginForm, PasswordlessLoginForm, RegisterForm, \
ResetPasswordForm, SendConfirmationForm
from .utils import config_value as cv
from .utils import _, get_config, hash_data, localize_callback, string_types, \
url_for_security, verify_hash, send_mail
from .views import create_blueprint
# Convenient references
_security = LocalProxy(lambda: current_app.extensions['security'])
#: Default Flask-Security configuration
_default_config = {
'BLUEPRINT_NAME': 'security',
'CLI_ROLES_NAME': 'roles',
'CLI_USERS_NAME': 'users',
'URL_PREFIX': None,
'SUBDOMAIN': None,
'FLASH_MESSAGES': True,
'I18N_DOMAIN': 'flask_security',
'PASSWORD_HASH': 'bcrypt',
'PASSWORD_SALT': None,
'PASSWORD_SINGLE_HASH': {
'django_argon2',
'django_bcrypt_sha256',
'django_pbkdf2_sha256',
'django_pbkdf2_sha1',
'django_bcrypt',
'django_salted_md5',
'django_salted_sha1',
'django_des_crypt',
'plaintext',
},
'LOGIN_URL': '/login',
'LOGOUT_URL': '/logout',
'REGISTER_URL': '/register',
'RESET_URL': '/reset',
'CHANGE_URL': '/change',
'CONFIRM_URL': '/confirm',
'POST_LOGIN_VIEW': '/',
'POST_LOGOUT_VIEW': '/',
'CONFIRM_ERROR_VIEW': None,
'POST_REGISTER_VIEW': None,
'POST_CONFIRM_VIEW': None,
'POST_RESET_VIEW': None,
'POST_CHANGE_VIEW': None,
'UNAUTHORIZED_VIEW': lambda: None,
'FORGOT_PASSWORD_TEMPLATE': 'security/forgot_password.html',
'LOGIN_USER_TEMPLATE': 'security/login_user.html',
'REGISTER_USER_TEMPLATE': 'security/register_user.html',
'RESET_PASSWORD_TEMPLATE': 'security/reset_password.html',
'CHANGE_PASSWORD_TEMPLATE': 'security/change_password.html',
'SEND_CONFIRMATION_TEMPLATE': 'security/send_confirmation.html',
'SEND_LOGIN_TEMPLATE': 'security/send_login.html',
'CONFIRMABLE': False,
'REGISTERABLE': False,
'RECOVERABLE': False,
'TRACKABLE': False,
'PASSWORDLESS': False,
'CHANGEABLE': False,
'SEND_REGISTER_EMAIL': True,
'SEND_PASSWORD_CHANGE_EMAIL': True,
'SEND_PASSWORD_RESET_EMAIL': True,
'SEND_PASSWORD_RESET_NOTICE_EMAIL': True,
'LOGIN_WITHIN': '1 days',
'CONFIRM_EMAIL_WITHIN': '5 days',
'RESET_PASSWORD_WITHIN': '5 days',
'LOGIN_WITHOUT_CONFIRMATION': False,
'EMAIL_SENDER': LocalProxy(lambda: current_app.config.get(
'MAIL_DEFAULT_SENDER', 'no-reply@localhost'
)),
'TOKEN_AUTHENTICATION_KEY': 'auth_token',
'TOKEN_AUTHENTICATION_HEADER': 'Authentication-Token',
'TOKEN_MAX_AGE': None,
'CONFIRM_SALT': 'confirm-salt',
'RESET_SALT': 'reset-salt',
'LOGIN_SALT': 'login-salt',
'CHANGE_SALT': 'change-salt',
'REMEMBER_SALT': 'remember-salt',
'DEFAULT_REMEMBER_ME': False,
'DEFAULT_HTTP_AUTH_REALM': _('Login Required'),
'EMAIL_SUBJECT_REGISTER': _('Welcome'),
'EMAIL_SUBJECT_CONFIRM': _('Please confirm your email'),
'EMAIL_SUBJECT_PASSWORDLESS': _('Login instructions'),
'EMAIL_SUBJECT_PASSWORD_NOTICE': _('Your password has been reset'),
'EMAIL_SUBJECT_PASSWORD_CHANGE_NOTICE': _(
'Your password has been changed'),
'EMAIL_SUBJECT_PASSWORD_RESET': _('Password reset instructions'),
'EMAIL_PLAINTEXT': True,
'EMAIL_HTML': True,
'USER_IDENTITY_ATTRIBUTES': ['email'],
'PASSWORD_SCHEMES': [
'bcrypt',
'des_crypt',
'pbkdf2_sha256',
'pbkdf2_sha512',
'sha256_crypt',
'sha512_crypt',
# And always last one...
'plaintext'
],
'DEPRECATED_PASSWORD_SCHEMES': ['auto'],
'HASHING_SCHEMES': [
'sha256_crypt',
'hex_md5',
],
'DEPRECATED_HASHING_SCHEMES': ['hex_md5'],
'DATETIME_FACTORY': datetime.utcnow,
}
#: Default Flask-Security messages
_default_messages = {
'UNAUTHORIZED': (
_('You do not have permission to view this resource.'), 'error'),
'CONFIRM_REGISTRATION': (
_('Thank you. Confirmation instructions '
'have been sent to %(email)s.'),
'success'),
'EMAIL_CONFIRMED': (
_('Thank you. Your email has been confirmed.'), 'success'),
'ALREADY_CONFIRMED': (
_('Your email has already been confirmed.'), 'info'),
'INVALID_CONFIRMATION_TOKEN': (
_('Invalid confirmation token.'), 'error'),
'EMAIL_ALREADY_ASSOCIATED': (
_('%(email)s is already associated with an account.'), 'error'),
'PASSWORD_MISMATCH': (
_('Password does not match'), 'error'),
'RETYPE_PASSWORD_MISMATCH': (
_('Passwords do not match'), 'error'),
'INVALID_REDIRECT': (
_('Redirections outside the domain are forbidden'), 'error'),
'PASSWORD_RESET_REQUEST': (
_('Instructions to reset your password have been sent to %(email)s.'),
'info'),
'PASSWORD_RESET_EXPIRED': (
_('You did not reset your password within %(within)s. '
'New instructions have been sent to %(email)s.'), 'error'),
'INVALID_RESET_PASSWORD_TOKEN': (
_('Invalid reset password token.'), 'error'),
'CONFIRMATION_REQUIRED': (
_('Email requires confirmation.'), 'error'),
'CONFIRMATION_REQUEST': (
_('Confirmation instructions have been sent to %(email)s.'), 'info'),
'CONFIRMATION_EXPIRED': (
_('You did not confirm your email within %(within)s. '
'New instructions to confirm your email have been sent '
'to %(email)s.'), 'error'),
'LOGIN_EXPIRED': (
_('You did not login within %(within)s. New instructions to login '
'have been sent to %(email)s.'), 'error'),
'LOGIN_EMAIL_SENT': (
_('Instructions to login have been sent to %(email)s.'), 'success'),
'INVALID_LOGIN_TOKEN': (
_('Invalid login token.'), 'error'),
'DISABLED_ACCOUNT': (
_('Account is disabled.'), 'error'),
'EMAIL_NOT_PROVIDED': (
_('Email not provided'), 'error'),
'INVALID_EMAIL_ADDRESS': (
_('Invalid email address'), 'error'),
'PASSWORD_NOT_PROVIDED': (
_('Password not provided'), 'error'),
'PASSWORD_NOT_SET': (
_('No password is set for this user'), 'error'),
'PASSWORD_INVALID_LENGTH': (
_('Password must be at least 6 characters'), 'error'),
'USER_DOES_NOT_EXIST': (
_('Specified user does not exist'), 'error'),
'INVALID_PASSWORD': (
_('Invalid password'), 'error'),
'PASSWORDLESS_LOGIN_SUCCESSFUL': (
_('You have successfully logged in.'), 'success'),
'FORGOT_PASSWORD': (
_('Forgot password?'), 'info'),
'PASSWORD_RESET': (
_('You successfully reset your password and you have been logged in '
'automatically.'), 'success'),
'PASSWORD_IS_THE_SAME': (
_('Your new password must be different than your previous password.'),
'error'),
'PASSWORD_CHANGE': (
_('You successfully changed your password.'), 'success'),
'LOGIN': (
_('Please log in to access this page.'), 'info'),
'REFRESH': (
_('Please reauthenticate to access this page.'), 'info'),
}
_default_forms = {
'login_form': LoginForm,
'confirm_register_form': ConfirmRegisterForm,
'register_form': RegisterForm,
'forgot_password_form': ForgotPasswordForm,
'reset_password_form': ResetPasswordForm,
'change_password_form': ChangePasswordForm,
'send_confirmation_form': SendConfirmationForm,
'passwordless_login_form': PasswordlessLoginForm,
}
def _user_loader(user_id):
return _security.datastore.find_user(id=user_id)
def _request_loader(request):
header_key = _security.token_authentication_header
args_key = _security.token_authentication_key
header_token = request.headers.get(header_key, None)
token = request.args.get(args_key, header_token)
if request.is_json:
data = request.get_json(silent=True) or {}
if isinstance(data, dict):
token = data.get(args_key, token)
try:
data = _security.remember_token_serializer.loads(
token, max_age=_security.token_max_age)
user = _security.datastore.find_user(id=data[0])
if user and verify_hash(data[1], user.password):
return user
except:
pass
return _security.login_manager.anonymous_user()
def _identity_loader():
if not isinstance(current_user._get_current_object(), AnonymousUserMixin):
identity = Identity(current_user.id)
return identity
def _on_identity_loaded(sender, identity):
if hasattr(current_user, 'id'):
identity.provides.add(UserNeed(current_user.id))
for role in getattr(current_user, 'roles', []):
identity.provides.add(RoleNeed(role.name))
identity.user = current_user
def _get_login_manager(app, anonymous_user):
lm = LoginManager()
lm.anonymous_user = anonymous_user or AnonymousUser
lm.localize_callback = localize_callback
lm.login_view = '%s.login' % cv('BLUEPRINT_NAME', app=app)
lm.user_loader(_user_loader)
lm.request_loader(_request_loader)
if cv('FLASH_MESSAGES', app=app):
lm.login_message, lm.login_message_category = cv('MSG_LOGIN', app=app)
lm.needs_refresh_message, lm.needs_refresh_message_category = cv(
'MSG_REFRESH', app=app)
else:
lm.login_message = None
lm.needs_refresh_message = None
lm.init_app(app)
return lm
def _get_principal(app):
p = Principal(app, use_sessions=False)
p.identity_loader(_identity_loader)
return p
def _get_pwd_context(app):
pw_hash = cv('PASSWORD_HASH', app=app)
schemes = cv('PASSWORD_SCHEMES', app=app)
deprecated = cv('DEPRECATED_PASSWORD_SCHEMES', app=app)
if pw_hash not in schemes:
allowed = (', '.join(schemes[:-1]) + ' and ' + schemes[-1])
raise ValueError(
"Invalid password hashing scheme %r. Allowed values are %s" %
(pw_hash, allowed))
return CryptContext(
schemes=schemes,
default=pw_hash,
deprecated=deprecated)
def _get_i18n_domain(app):
return Domain(
pkg_resources.resource_filename('flask_security', 'translations'),
domain=cv('I18N_DOMAIN', app=app)
)
def _get_hashing_context(app):
schemes = cv('HASHING_SCHEMES', app=app)
deprecated = cv('DEPRECATED_HASHING_SCHEMES', app=app)
return CryptContext(
schemes=schemes,
deprecated=deprecated)
def _get_serializer(app, name):
secret_key = app.config.get('SECRET_KEY')
salt = app.config.get('SECURITY_%s_SALT' % name.upper())
return URLSafeTimedSerializer(secret_key=secret_key, salt=salt)
def _get_state(app, datastore, anonymous_user=None, **kwargs):
for key, value in get_config(app).items():
kwargs[key.lower()] = value
kwargs.update(dict(
app=app,
datastore=datastore,
principal=_get_principal(app),
pwd_context=_get_pwd_context(app),
hashing_context=_get_hashing_context(app),
i18n_domain=_get_i18n_domain(app),
remember_token_serializer=_get_serializer(app, 'remember'),
login_serializer=_get_serializer(app, 'login'),
reset_serializer=_get_serializer(app, 'reset'),
confirm_serializer=_get_serializer(app, 'confirm'),
_context_processors={},
_send_mail_task=None,
_unauthorized_callback=None
))
if 'login_manager' not in kwargs:
kwargs['login_manager'] = _get_login_manager(
app, anonymous_user)
for key, value in _default_forms.items():
if key not in kwargs or not kwargs[key]:
kwargs[key] = value
return _SecurityState(**kwargs)
def _context_processor():
return dict(url_for_security=url_for_security, security=_security)
class RoleMixin(object):
"""Mixin for `Role` model definitions"""
def __eq__(self, other):
return (self.name == other or
self.name == getattr(other, 'name', None))
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.name)
class UserMixin(BaseUserMixin):
"""Mixin for `User` model definitions"""
@property
def is_active(self):
"""Returns `True` if the user is active."""
return self.active
def get_auth_token(self):
"""Returns the user's authentication token."""
data = [str(self.id), hash_data(self.password)]
return _security.remember_token_serializer.dumps(data)
def has_role(self, role):
"""Returns `True` if the user identifies with the specified role.
:param role: A role name or `Role` instance"""
if isinstance(role, string_types):
return role in (role.name for role in self.roles)
else:
return role in self.roles
def get_security_payload(self):
"""Serialize user object as response payload."""
return {'id': str(self.id)}
class AnonymousUser(AnonymousUserMixin):
"""AnonymousUser definition"""
def __init__(self):
self.roles = ImmutableList()
def has_role(self, *args):
"""Returns `False`"""
return False
class _SecurityState(object):
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key.lower(), value)
def _add_ctx_processor(self, endpoint, fn):
group = self._context_processors.setdefault(endpoint, [])
fn not in group and group.append(fn)
def _run_ctx_processor(self, endpoint):
rv = {}
for g in [None, endpoint]:
for fn in self._context_processors.setdefault(g, []):
rv.update(fn())
return rv
def context_processor(self, fn):
self._add_ctx_processor(None, fn)
def forgot_password_context_processor(self, fn):
self._add_ctx_processor('forgot_password', fn)
def login_context_processor(self, fn):
self._add_ctx_processor('login', fn)
def register_context_processor(self, fn):
self._add_ctx_processor('register', fn)
def reset_password_context_processor(self, fn):
self._add_ctx_processor('reset_password', fn)
def change_password_context_processor(self, fn):
self._add_ctx_processor('change_password', fn)
def send_confirmation_context_processor(self, fn):
self._add_ctx_processor('send_confirmation', fn)
def send_login_context_processor(self, fn):
self._add_ctx_processor('send_login', fn)
def mail_context_processor(self, fn):
self._add_ctx_processor('mail', fn)
def send_mail_task(self, fn):
self._send_mail_task = fn
def unauthorized_handler(self, fn):
self._unauthorized_callback = fn
class Security(object):
"""The :class:`Security` class initializes the Flask-Security extension.
:param app: The application.
:param datastore: An instance of a user datastore.
:param register_blueprint: to register the Security blueprint or not.
:param login_form: set form for the login view
:param register_form: set form for the register view
:param confirm_register_form: set form for the confirm register view
:param forgot_password_form: set form for the forgot password view
:param reset_password_form: set form for the reset password view
:param change_password_form: set form for the change password view
:param send_confirmation_form: set form for the send confirmation view
:param passwordless_login_form: set form for the passwordless login view
:param anonymous_user: class to use for anonymous user
"""
def __init__(self, app=None, datastore=None, register_blueprint=True,
**kwargs):
self.app = app
self._datastore = datastore
self._register_blueprint = register_blueprint
self._kwargs = kwargs
self._state = None # set by init_app
if app is not None and datastore is not None:
self._state = self.init_app(
app,
datastore,
register_blueprint=register_blueprint,
**kwargs)
def init_app(self, app, datastore=None, register_blueprint=None, **kwargs):
"""Initializes the Flask-Security extension for the specified
application and datastore implementation.
:param app: The application.
:param datastore: An instance of a user datastore.
:param register_blueprint: to register the Security blueprint or not.
"""
self.app = app
if datastore is None:
datastore = self._datastore
if register_blueprint is None:
register_blueprint = self._register_blueprint
for key, value in self._kwargs.items():
kwargs.setdefault(key, value)
for key, value in _default_config.items():
app.config.setdefault('SECURITY_' + key, value)
for key, value in _default_messages.items():
app.config.setdefault('SECURITY_MSG_' + key, value)
identity_loaded.connect_via(app)(_on_identity_loaded)
self._state = state = _get_state(app, datastore, **kwargs)
if register_blueprint:
app.register_blueprint(create_blueprint(state, __name__))
app.context_processor(_context_processor)
@app.before_first_request
def _register_i18n():
if '_' not in app.jinja_env.globals:
app.jinja_env.globals['_'] = state.i18n_domain.gettext
state.render_template = self.render_template
state.send_mail = self.send_mail
app.extensions['security'] = state
if hasattr(app, 'cli'):
from .cli import users, roles
if state.cli_users_name:
app.cli.add_command(users, state.cli_users_name)
if state.cli_roles_name:
app.cli.add_command(roles, state.cli_roles_name)
return state
def render_template(self, *args, **kwargs):
return render_template(*args, **kwargs)
def send_mail(self, *args, **kwargs):
return send_mail(*args, **kwargs)
def __getattr__(self, name):
return getattr(self._state, name, None)
|
|
"""
Test Elasticsearch persistence.
"""
from datetime import timedelta
from unittest.mock import patch
from hamcrest import (
all_of,
assert_that,
calling,
contains,
equal_to,
has_entry,
has_key,
has_property,
is_,
none,
raises,
)
from microcosm.api import create_object_graph
from nose.plugins.attrib import attr
from microcosm_elasticsearch.assertions import assert_that_eventually, assert_that_not_eventually
from microcosm_elasticsearch.errors import ElasticsearchConflictError, ElasticsearchNotFoundError
from microcosm_elasticsearch.tests.fixtures import Person, Planet, SelectorAttribute
class TestStore:
def setup(self):
self.graph = create_object_graph("example", testing=True)
self.store = self.graph.person_store
self.overloaded_store = self.graph.person_overloaded_store
self.graph.elasticsearch_index_registry.createall(force=True)
self.kevin = Person(
first="Kevin",
last="Durant",
origin_planet=Planet.EARTH,
)
self.steph = Person(
first="Steph",
last="Curry",
origin_planet=Planet.MARS,
)
def test_retrieve_not_found(self):
assert_that(
calling(self.store.retrieve).with_args(self.store.new_object_id()),
raises(ElasticsearchNotFoundError),
)
def test_create(self):
self.store.create(self.kevin)
assert_that(
self.store.retrieve(self.kevin.id),
all_of(
has_property("id", self.kevin.id),
has_property("first", "Kevin"),
has_property("middle", none()),
has_property("last", "Durant"),
has_property("origin_planet", Planet.EARTH),
),
)
def test_create_duplicate(self):
self.store.create(self.kevin)
assert_that(
calling(self.store.create).with_args(self.kevin),
raises(ElasticsearchConflictError),
)
def test_count(self):
with self.store.flushing():
self.store.create(self.kevin)
self.store.create(self.steph)
assert_that(self.store.count(), is_(equal_to(2)))
@attr("slow")
def test_count_slow(self):
self.store.create(self.kevin)
self.store.create(self.steph)
assert_that_eventually(
self.store.count,
is_(equal_to(2)),
tries=5,
sleep_seconds=1.0,
)
def test_delete_not_found(self):
assert_that(
calling(self.store.delete).with_args(self.store.new_object_id()),
raises(ElasticsearchNotFoundError),
)
def test_delete(self):
self.store.create(self.kevin)
assert_that(self.store.delete(self.kevin.id), is_(equal_to(True)))
assert_that(
calling(self.store.retrieve).with_args(self.kevin.id),
raises(ElasticsearchNotFoundError),
)
def test_search(self):
with self.store.flushing():
self.store.create(self.kevin)
assert_that(
self.store.search(),
contains(
all_of(
has_property("id", self.kevin.id),
has_property("first", "Kevin"),
has_property("last", "Durant"),
),
),
)
def test_search_with_count(self):
with self.store.flushing():
self.store.create(self.kevin)
items, count = self.store.search_with_count()
assert_that(
items,
contains(
all_of(
has_property("id", self.kevin.id),
has_property("first", "Kevin"),
has_property("last", "Durant"),
),
),
)
assert_that(count, is_(equal_to(1)))
@attr("slow")
def test_search_slow(self):
self.store.create(self.kevin)
assert_that_eventually(
self.store.search,
contains(
all_of(
has_property("id", self.kevin.id),
has_property("first", "Kevin"),
has_property("last", "Durant"),
),
),
tries=5,
sleep_seconds=1.0,
)
def test_search_order_reverse_chronological(self):
with self.store.flushing():
self.store.create(self.kevin)
with patch.object(self.store, "new_timestamp") as mocked:
# ensure we have >= 1s created at delta
mocked.return_value = self.kevin.created_at + timedelta(seconds=1).seconds * 1000
self.store.create(self.steph)
assert_that(
self.store.search(),
contains(
has_property("id", self.steph.id),
has_property("id", self.kevin.id),
),
)
def test_search_paging(self):
with self.store.flushing():
self.store.create(self.kevin)
with patch.object(self.store, "new_timestamp") as mocked:
# ensure we have >= 1s created at delta
mocked.return_value = self.kevin.created_at + timedelta(seconds=1).seconds * 1000
self.store.create(self.steph)
assert_that(
self.store.search(offset=1, limit=1),
contains(
has_property("id", self.kevin.id),
),
)
assert_that(
self.store.search(offset=0, limit=1),
contains(
has_property("id", self.steph.id),
),
)
def test_search_filter(self):
with self.store.flushing():
self.store.create(self.kevin)
assert_that(
self.store.search(q=self.kevin.first),
contains(
all_of(
has_property("id", self.kevin.id),
has_property("first", "Kevin"),
has_property("last", "Durant"),
),
),
)
def test_search_filter_out(self):
with self.store.flushing():
self.store.create(self.kevin)
assert_that(
self.store.search(q=self.steph.first),
contains(),
)
@attr("slow")
def test_search_filter_out_slow(self):
self.store.create(self.kevin)
assert_that_not_eventually(
calling(self.store.search).with_args(q=self.steph.first),
contains(
all_of(
has_property("id", self.kevin.id),
has_property("first", "Kevin"),
has_property("last", "Durant"),
),
),
)
def test_update_not_found(self):
assert_that(
calling(self.store.update).with_args(self.store.new_object_id(), self.kevin),
raises(ElasticsearchNotFoundError),
)
def test_update(self):
self.store.create(self.kevin)
self.kevin.middle = "MVP"
self.store.update(self.kevin.id, self.kevin)
assert_that(
self.store.retrieve(self.kevin.id),
all_of(
has_property("id", self.kevin.id),
has_property("first", "Kevin"),
has_property("middle", "MVP"),
has_property("last", "Durant"),
),
)
def test_replace_not_found(self):
self.kevin.middle = "MVP"
self.store.replace(self.kevin.id, self.kevin)
assert_that(
self.store.retrieve(self.kevin.id),
all_of(
has_property("id", self.kevin.id),
has_property("first", "Kevin"),
has_property("middle", "MVP"),
has_property("last", "Durant"),
),
)
def test_replace(self):
self.store.create(self.kevin)
self.kevin.middle = "MVP"
self.store.replace(self.kevin.id, self.kevin)
assert_that(
self.store.retrieve(self.kevin.id),
all_of(
has_property("id", self.kevin.id),
has_property("first", "Kevin"),
has_property("middle", "MVP"),
has_property("last", "Durant"),
),
)
def test_bulk(self):
self.store.bulk(
actions=[
("index", self.kevin)
],
batch_size=1,
)
assert_that(
self.store.retrieve(self.kevin.id),
all_of(
has_property("id", self.kevin.id),
has_property("first", "Kevin"),
has_property("middle", none()),
has_property("last", "Durant"),
),
)
def test_bulk_with_report(self):
results = self.store.bulk(
actions=[
("index", self.kevin),
("delete", self.steph),
],
batch_size=2,
)
assert_that(
self.store.retrieve(self.kevin.id),
all_of(
has_property("id", self.kevin.id),
has_property("first", "Kevin"),
has_property("middle", none()),
has_property("last", "Durant"),
),
)
result = results[0]
# Updated items
assert_that(result[0], is_(equal_to(1)))
# Report on failed to delete items
assert_that(result[1], contains(
has_key('delete'),
))
assert_that(result[1][0]['delete'], has_entry('result', 'not_found'))
class TestOverloadedStore(TestStore):
def setup(self):
super().setup()
self.person_in_one = Person(
first="One",
last="Person",
origin_planet=Planet.MARS,
)
self.person_in_two = Person(
first="Two",
last="Person",
origin_planet=Planet.MARS,
)
with self.overloaded_store.flushing(selector_attribute=SelectorAttribute.ONE):
self.overloaded_store.create(
self.person_in_one,
selector_attribute=SelectorAttribute.ONE
)
with self.overloaded_store.flushing(selector_attribute=SelectorAttribute.TWO):
self.overloaded_store.create(
self.person_in_two,
selector_attribute=SelectorAttribute.TWO
)
def test_search_with_count(self):
result, count = self.overloaded_store.search_with_count(selector_attribute=SelectorAttribute.ONE)
assert_that(result[0], has_property("id", self.person_in_one.id))
assert_that(count, is_(equal_to(1)))
result, count = self.overloaded_store.search_with_count(selector_attribute=SelectorAttribute.TWO)
assert_that(result[0], has_property("id", self.person_in_two.id))
assert_that(count, is_(equal_to(1)))
def test_search(self):
assert_that(
self.overloaded_store.search(selector_attribute=SelectorAttribute.ONE),
contains(
all_of(
has_property("id", self.person_in_one.id),
has_property("first", "One"),
),
),
)
assert_that(
self.overloaded_store.search(selector_attribute=SelectorAttribute.TWO),
contains(
all_of(
has_property("id", self.person_in_two.id),
has_property("first", "Two"),
),
),
)
|
|
# Copyright 2017, 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Defines various constants used througout the project
"""
import mxnet as mx
import numpy as np
BOS_SYMBOL = "<s>"
EOS_SYMBOL = "</s>"
UNK_SYMBOL = "<unk>"
PAD_SYMBOL = "<pad>"
PAD_ID = 0
TOKEN_SEPARATOR = " "
VOCAB_SYMBOLS = [PAD_SYMBOL, UNK_SYMBOL, BOS_SYMBOL, EOS_SYMBOL]
# reserve extra space for the EOS or BOS symbol that is added to both source and target
SPACE_FOR_XOS = 1
ARG_SEPARATOR = ":"
ENCODER_PREFIX = "encoder_"
DECODER_PREFIX = "decoder_"
EMBEDDING_PREFIX = "embed_"
ATTENTION_PREFIX = "att_"
COVERAGE_PREFIX = "cov_"
BIDIRECTIONALRNN_PREFIX = ENCODER_PREFIX + "birnn_"
STACKEDRNN_PREFIX = ENCODER_PREFIX + "rnn_"
FORWARD_PREFIX = "forward_"
REVERSE_PREFIX = "reverse_"
TRANSFORMER_ENCODER_PREFIX = ENCODER_PREFIX + "transformer_"
CNN_ENCODER_PREFIX = ENCODER_PREFIX + "cnn_"
CHAR_SEQ_ENCODER_PREFIX = ENCODER_PREFIX + "char_"
DEFAULT_OUTPUT_LAYER_PREFIX = "target_output_"
# embedding prefixes
SOURCE_EMBEDDING_PREFIX = "source_" + EMBEDDING_PREFIX
SOURCE_POSITIONAL_EMBEDDING_PREFIX = "source_pos_" + EMBEDDING_PREFIX
TARGET_EMBEDDING_PREFIX = "target_" + EMBEDDING_PREFIX
TARGET_POSITIONAL_EMBEDDING_PREFIX = "target_pos_" + EMBEDDING_PREFIX
SHARED_EMBEDDING_PREFIX = "source_target_" + EMBEDDING_PREFIX
# encoder names (arguments)
RNN_NAME = "rnn"
RNN_WITH_CONV_EMBED_NAME = "rnn-with-conv-embed"
TRANSFORMER_TYPE = "transformer"
CONVOLUTION_TYPE = "cnn"
TRANSFORMER_WITH_CONV_EMBED_TYPE = "transformer-with-conv-embed"
IMAGE_PRETRAIN_TYPE = "image-pretrain-cnn"
# available encoders
ENCODERS = [RNN_NAME, RNN_WITH_CONV_EMBED_NAME, TRANSFORMER_TYPE, TRANSFORMER_WITH_CONV_EMBED_TYPE, CONVOLUTION_TYPE, IMAGE_PRETRAIN_TYPE]
# available decoder
DECODERS = [RNN_NAME, TRANSFORMER_TYPE, CONVOLUTION_TYPE]
# rnn types
LSTM_TYPE = 'lstm'
LNLSTM_TYPE = 'lnlstm'
LNGLSTM_TYPE = 'lnglstm'
GRU_TYPE = 'gru'
LNGRU_TYPE = 'lngru'
LNGGRU_TYPE = 'lnggru'
CELL_TYPES = [LSTM_TYPE, LNLSTM_TYPE, LNGLSTM_TYPE, GRU_TYPE, LNGRU_TYPE, LNGGRU_TYPE]
# positional embeddings
NO_POSITIONAL_EMBEDDING = "none"
FIXED_POSITIONAL_EMBEDDING = "fixed"
LEARNED_POSITIONAL_EMBEDDING = "learned"
POSITIONAL_EMBEDDING_TYPES = [NO_POSITIONAL_EMBEDDING, FIXED_POSITIONAL_EMBEDDING, LEARNED_POSITIONAL_EMBEDDING]
DEFAULT_INIT_PATTERN = ".*"
# init types
INIT_XAVIER = 'xavier'
INIT_UNIFORM = 'uniform'
INIT_TYPES = [INIT_XAVIER, INIT_UNIFORM]
INIT_XAVIER_FACTOR_TYPE_IN = "in"
INIT_XAVIER_FACTOR_TYPE_OUT = "out"
INIT_XAVIER_FACTOR_TYPE_AVG = "avg"
INIT_XAVIER_FACTOR_TYPES = [INIT_XAVIER_FACTOR_TYPE_IN, INIT_XAVIER_FACTOR_TYPE_OUT, INIT_XAVIER_FACTOR_TYPE_AVG]
RAND_TYPE_UNIFORM = 'uniform'
RAND_TYPE_GAUSSIAN = 'gaussian'
# Embedding init types
EMBED_INIT_PATTERN = '(%s|%s|%s)weight' % (SOURCE_EMBEDDING_PREFIX, TARGET_EMBEDDING_PREFIX, SHARED_EMBEDDING_PREFIX)
EMBED_INIT_DEFAULT = 'default'
EMBED_INIT_NORMAL = 'normal'
EMBED_INIT_TYPES = [EMBED_INIT_DEFAULT, EMBED_INIT_NORMAL]
# RNN init types
RNN_INIT_PATTERN = ".*h2h.*"
RNN_INIT_ORTHOGONAL = 'orthogonal'
RNN_INIT_ORTHOGONAL_STACKED = 'orthogonal_stacked'
# use the default initializer used also for all other weights
RNN_INIT_DEFAULT = 'default'
# RNN decoder state init types
RNN_DEC_INIT_ZERO = "zero"
RNN_DEC_INIT_LAST = "last"
RNN_DEC_INIT_AVG = "avg"
RNN_DEC_INIT_CHOICES = [RNN_DEC_INIT_ZERO, RNN_DEC_INIT_LAST, RNN_DEC_INIT_AVG]
# attention types
ATT_BILINEAR = 'bilinear'
ATT_DOT = 'dot'
ATT_MH_DOT = 'mhdot'
ATT_FIXED = 'fixed'
ATT_LOC = 'location'
ATT_MLP = 'mlp'
ATT_COV = "coverage"
ATT_TYPES = [ATT_BILINEAR, ATT_DOT, ATT_MH_DOT, ATT_FIXED, ATT_LOC, ATT_MLP, ATT_COV]
# weight tying components
WEIGHT_TYING_SRC = 'src'
WEIGHT_TYING_TRG = 'trg'
WEIGHT_TYING_SOFTMAX = 'softmax'
# weight tying types (combinations of above components):
WEIGHT_TYING_TRG_SOFTMAX = 'trg_softmax'
WEIGHT_TYING_SRC_TRG = 'src_trg'
WEIGHT_TYING_SRC_TRG_SOFTMAX = 'src_trg_softmax'
# default decoder prefixes
RNN_DECODER_PREFIX = DECODER_PREFIX + "rnn_"
TRANSFORMER_DECODER_PREFIX = DECODER_PREFIX + "transformer_"
CNN_DECODER_PREFIX = DECODER_PREFIX + "cnn_"
# Activation types
# Gaussian Error Linear Unit (https://arxiv.org/pdf/1606.08415.pdf)
GELU = "gelu"
# Gated Linear Unit (https://arxiv.org/pdf/1705.03122.pdf)
GLU = "glu"
RELU = "relu"
SIGMOID = "sigmoid"
SOFT_RELU = "softrelu"
# Swish-1/SiLU (https://arxiv.org/pdf/1710.05941.pdf, https://arxiv.org/pdf/1702.03118.pdf)
SWISH1 = "swish1"
TANH = "tanh"
TRANSFORMER_ACTIVATION_TYPES = [GELU, RELU, SWISH1]
CNN_ACTIVATION_TYPES = [GLU, RELU, SIGMOID, SOFT_RELU, TANH]
# Convolutional block pad types:
CNN_PAD_LEFT = "left"
CNN_PAD_CENTERED = "centered"
# default I/O variable names
SOURCE_NAME = "source"
SOURCE_LENGTH_NAME = "source_length"
TARGET_NAME = "target"
TARGET_LABEL_NAME = "target_label"
LEXICON_NAME = "lexicon"
SOURCE_ENCODED_NAME = "encoded_source"
TARGET_PREVIOUS_NAME = "prev_target_word_id"
HIDDEN_PREVIOUS_NAME = "prev_hidden"
SOURCE_DYNAMIC_PREVIOUS_NAME = "prev_dynamic_source"
LOGIT_INPUTS_NAME = "logit_inputs"
LOGITS_NAME = "logits"
SOFTMAX_NAME = "softmax"
SOFTMAX_OUTPUT_NAME = SOFTMAX_NAME + "_output"
MEASURE_SPEED_EVERY = 50 # measure speed and metrics every X batches
# Monitor constants
STAT_FUNC_DEFAULT = "mx_default" # default MXNet monitor stat func: mx.nd.norm(x)/mx.nd.sqrt(x.size)
STAT_FUNC_MAX = 'max'
STAT_FUNC_MIN = 'min'
STAT_FUNC_MEAN = 'mean'
MONITOR_STAT_FUNCS = {STAT_FUNC_DEFAULT: None,
STAT_FUNC_MAX: lambda x: mx.nd.max(x),
STAT_FUNC_MEAN: lambda x: mx.nd.mean(x)}
# Inference constants
DEFAULT_BEAM_SIZE = 5
CHUNK_SIZE_NO_BATCHING = 1
CHUNK_SIZE_PER_BATCH_SEGMENT = 500
BEAM_SEARCH_STOP_FIRST = 'first'
BEAM_SEARCH_STOP_ALL = 'all'
# Inference Input JSON constants
JSON_TEXT_KEY = "text"
JSON_FACTORS_KEY = "factors"
JSON_CONSTRAINTS_KEY = "constraints"
JSON_ENCODING = "utf-8"
# Lexical constraints
BANK_ADJUSTMENT = 'even'
VERSION_NAME = "version"
CONFIG_NAME = "config"
LOG_NAME = "log"
JSON_SUFFIX = ".json"
VOCAB_SRC_PREFIX = "vocab.src"
VOCAB_SRC_NAME = VOCAB_SRC_PREFIX + ".%d" + JSON_SUFFIX
VOCAB_TRG_PREFIX = "vocab.trg"
VOCAB_TRG_NAME = VOCAB_TRG_PREFIX + ".%d" + JSON_SUFFIX
VOCAB_ENCODING = "utf-8"
PARAMS_PREFIX = "params."
PARAMS_NAME = PARAMS_PREFIX + "%05d"
PARAMS_BEST_NAME = "params.best"
DECODE_OUT_NAME = "decode.output.%05d"
DECODE_IN_NAME = "decode.source.%d"
DECODE_REF_NAME = "decode.target"
SYMBOL_NAME = "symbol" + JSON_SUFFIX
METRICS_NAME = "metrics"
TENSORBOARD_NAME = "tensorboard"
# training resumption constants
TRAINING_STATE_DIRNAME = "training_state"
TRAINING_STATE_TEMP_DIRNAME = "tmp.training_state"
TRAINING_STATE_TEMP_DELETENAME = "delete.training_state"
OPT_STATES_LAST = "mx_optimizer_last.pkl"
OPT_STATES_BEST = "mx_optimizer_best.pkl"
OPT_STATES_INITIAL = "mx_optimizer_initial.pkl"
BUCKET_ITER_STATE_NAME = "bucket.pkl"
RNG_STATE_NAME = "rng.pkl"
TRAINING_STATE_NAME = "training.pkl"
SCHEDULER_STATE_NAME = "scheduler.pkl"
TRAINING_STATE_PARAMS_NAME = "params"
ARGS_STATE_NAME = "args.yaml"
# Arguments that may differ and still resume training
ARGS_MAY_DIFFER = ["overwrite_output", "use-tensorboard", "quiet",
"align_plot_prefix", "sure_align_threshold",
"keep_last_params"]
# Other argument constants
TRAINING_ARG_SOURCE = "--source"
TRAINING_ARG_TARGET = "--target"
TRAINING_ARG_PREPARED_DATA = "--prepared-data"
VOCAB_ARG_SHARED_VOCAB = "--shared-vocab"
INFERENCE_ARG_INPUT_LONG = "--input"
INFERENCE_ARG_INPUT_SHORT = "-i"
INFERENCE_ARG_OUTPUT_LONG = "--output"
INFERENCE_ARG_OUTPUT_SHORT = "-o"
INFERENCE_ARG_INPUT_FACTORS_LONG = "--input-factors"
INFERENCE_ARG_INPUT_FACTORS_SHORT = "-if"
TRAIN_ARGS_MONITOR_BLEU = "--decode-and-evaluate"
TRAIN_ARGS_CHECKPOINT_FREQUENCY = "--checkpoint-frequency"
# Used to delimit factors on STDIN for inference
DEFAULT_FACTOR_DELIMITER = '|'
# data layout strings
BATCH_MAJOR_IMAGE = "NCHW"
BATCH_MAJOR = "NTC"
TIME_MAJOR = "TNC"
BATCH_TYPE_SENTENCE = "sentence"
BATCH_TYPE_WORD = "word"
KVSTORE_DEVICE = "device"
KVSTORE_LOCAL = "local"
KVSTORE_SYNC = "dist_sync"
KVSTORE_DIST_DEVICE_SYNC = "dist_device_sync"
KVSTORE_DIST_ASYNC = "dist_async"
KVSTORE_NCCL = 'nccl'
KVSTORE_TYPES = [KVSTORE_DEVICE, KVSTORE_LOCAL, KVSTORE_SYNC,
KVSTORE_DIST_DEVICE_SYNC, KVSTORE_DIST_ASYNC,
KVSTORE_NCCL]
# Training constants
OPTIMIZER_ADAM = "adam"
OPTIMIZER_EVE = "eve"
OPTIMIZER_NADAM = "nadam"
OPTIMIZER_RMSPROP = "rmsprop"
OPTIMIZER_SGD = "sgd"
OPTIMIZER_NAG = "nag"
OPTIMIZER_ADAGRAD = "adagrad"
OPTIMIZER_ADADELTA = "adadelta"
OPTIMIZERS = [OPTIMIZER_ADAM, OPTIMIZER_EVE, OPTIMIZER_NADAM, OPTIMIZER_RMSPROP, OPTIMIZER_SGD, OPTIMIZER_NAG,
OPTIMIZER_ADAGRAD, OPTIMIZER_ADADELTA]
LR_SCHEDULER_FIXED_RATE_INV_SQRT_T = "fixed-rate-inv-sqrt-t"
LR_SCHEDULER_FIXED_RATE_INV_T = "fixed-rate-inv-t"
LR_SCHEDULER_FIXED_STEP = "fixed-step"
LR_SCHEDULER_PLATEAU_REDUCE = "plateau-reduce"
LR_SCHEDULERS = [LR_SCHEDULER_FIXED_RATE_INV_SQRT_T,
LR_SCHEDULER_FIXED_RATE_INV_T,
LR_SCHEDULER_FIXED_STEP,
LR_SCHEDULER_PLATEAU_REDUCE]
LR_DECAY_OPT_STATES_RESET_OFF = 'off'
LR_DECAY_OPT_STATES_RESET_INITIAL = 'initial'
LR_DECAY_OPT_STATES_RESET_BEST = 'best'
LR_DECAY_OPT_STATES_RESET_CHOICES = [LR_DECAY_OPT_STATES_RESET_OFF,
LR_DECAY_OPT_STATES_RESET_INITIAL,
LR_DECAY_OPT_STATES_RESET_BEST]
GRADIENT_CLIPPING_TYPE_ABS = 'abs'
GRADIENT_CLIPPING_TYPE_NORM = 'norm'
GRADIENT_CLIPPING_TYPE_NONE = 'none'
GRADIENT_CLIPPING_TYPES = [GRADIENT_CLIPPING_TYPE_ABS, GRADIENT_CLIPPING_TYPE_NORM, GRADIENT_CLIPPING_TYPE_NONE]
GRADIENT_COMPRESSION_NONE = None
GRADIENT_COMPRESSION_2BIT = "2bit"
GRADIENT_COMPRESSION_TYPES = [GRADIENT_CLIPPING_TYPE_NONE, GRADIENT_COMPRESSION_2BIT]
# output handler
OUTPUT_HANDLER_TRANSLATION = "translation"
OUTPUT_HANDLER_TRANSLATION_WITH_SCORE = "translation_with_score"
OUTPUT_HANDLER_TRANSLATION_WITH_ALIGNMENTS = "translation_with_alignments"
OUTPUT_HANDLER_TRANSLATION_WITH_ALIGNMENT_MATRIX = "translation_with_alignment_matrix"
OUTPUT_HANDLER_BENCHMARK = "benchmark"
OUTPUT_HANDLER_ALIGN_PLOT = "align_plot"
OUTPUT_HANDLER_ALIGN_TEXT = "align_text"
OUTPUT_HANDLER_BEAM_STORE = "beam_store"
OUTPUT_HANDLERS = [OUTPUT_HANDLER_TRANSLATION,
OUTPUT_HANDLER_TRANSLATION_WITH_SCORE,
OUTPUT_HANDLER_TRANSLATION_WITH_ALIGNMENTS,
OUTPUT_HANDLER_TRANSLATION_WITH_ALIGNMENT_MATRIX,
OUTPUT_HANDLER_BENCHMARK,
OUTPUT_HANDLER_ALIGN_PLOT,
OUTPUT_HANDLER_ALIGN_TEXT,
OUTPUT_HANDLER_BEAM_STORE]
# metrics
ACCURACY = 'accuracy'
PERPLEXITY = 'perplexity'
BLEU = 'bleu'
CHRF = 'chrf'
BLEU_VAL = BLEU + "-val"
CHRF_VAL = CHRF + "-val"
AVG_TIME = "avg-sec-per-sent-val"
DECODING_TIME = "decode-walltime-val"
METRICS = [PERPLEXITY, ACCURACY, BLEU]
METRIC_MAXIMIZE = {ACCURACY: True, BLEU: True, PERPLEXITY: False}
METRIC_WORST = {ACCURACY: 0.0, BLEU: 0.0, PERPLEXITY: np.inf}
# loss
CROSS_ENTROPY = 'cross-entropy'
LOSS_NORM_BATCH = 'batch'
LOSS_NORM_VALID = "valid"
TARGET_MAX_LENGTH_FACTOR = 2
DEFAULT_NUM_STD_MAX_OUTPUT_LENGTH = 2
DTYPE_FP16 = 'float16'
DTYPE_FP32 = 'float32'
LARGE_POSITIVE_VALUE = 99999999.
LARGE_NEGATIVE_VALUE = -LARGE_POSITIVE_VALUE
LARGE_VALUES = {
# Something at the middle of 32768<x<65519. Will be rounded to a multiple of 32.
# https://en.wikipedia.org/wiki/Half-precision_floating-point_format#Precision_limitations_on_integer_values
DTYPE_FP16: 49152.0,
# Will be rounded to 1.0e8.
# https://en.wikipedia.org/wiki/Single-precision_floating-point_format#Precision_limits_on_integer_values.
DTYPE_FP32: LARGE_POSITIVE_VALUE
}
LHUC_NAME = "lhuc"
# lhuc application points
LHUC_ENCODER = "encoder"
LHUC_DECODER = "decoder"
LHUC_STATE_INIT = "state_init"
LHUC_ALL = "all"
LHUC_CHOICES = [LHUC_ENCODER, LHUC_DECODER, LHUC_STATE_INIT, LHUC_ALL]
# data sharding
SHARD_NAME = "shard.%05d"
SHARD_SOURCE = SHARD_NAME + ".source"
SHARD_TARGET = SHARD_NAME + ".target"
DATA_INFO = "data.info"
DATA_CONFIG = "data.config"
PREPARED_DATA_VERSION_FILE = "data.version"
PREPARED_DATA_VERSION = 2
|
|
# coding: utf-8
from __future__ import print_function #prepare for python3
import os,sys,shutil
import time as clocktime
import numpy as np
from scipy.stats import powerlaw
import matplotlib
matplotlib.use('Agg')
#matplotlib.use("Cairo")
import matplotlib.pyplot as plt
from amuse.community.rebound.interface import Rebound
from plotting import plot_interaction, plot_system
from amuse.units.units import named
from amuse.ext.orbital_elements import orbital_elements_for_rel_posvel_arrays
from botsrots import BotsRots
try:
from amuse.units import units
MEarth = units.MEarth
REarth = units.REarth
except:
from usagi.units import units
MEarth = units.MEarth
REarth = units.REarth
rhoEarth = units.MEarth / (4./3. * np.pi * units.REarth**3)
MMoon = named('Lunar mass', 'M_Moon', 0.0123 * units.MEarth)#7.35e22 * units.kg
aR = named('Roche limit radius for lunar density', 'a_R', 2.9 * units.REarth)
from amuse.lab import *
def get_roche_limit_radius(
rho,
):
# Kokubo 2000 eq 2
a_R = 2.456 * (rho.value_in(rhoEarth))**(-1./3.) | units.REarth
return a_R
def particle_radius(
m,
rho,
):
# Kokubo 2000 eq 1
radius = (
(m.value_in(units.MEarth))**(1./3.) *
(rho.value_in(rhoEarth)**(-1./3.))
) | units.REarth
return radius
class Resolve_Encounters(object):
def __init__(
self,
encounters_0,
encounters_1,
primary = None,
convert_nbody = None,
G = constants.G,
epsilon_n = 0.1,
epsilon_t = 1,
f = 0.0,
time = 0.0 | units.yr,
):
self.all_encounters_A = encounters_0
self.all_encounters_B = encounters_1
self.number_of_collisions = len(self.all_encounters_A)
self.primary = primary
self.particles_modified = Particles()
self.particles_removed = Particles()
self.epsilon_n = epsilon_n
self.epsilon_t = epsilon_t
self.G = G
self.f = f
self.time = time
# Velocity changes only v_n -> can be done for ALL particles!
self.update_velocities()
# Should resolve collision immediately since the in-between state is unphysical
# and may cause mergers within Roche radius
self.resolve_rebounders()
if self.f > 0.0:
# These are used to determine if a merger will take place
self.get_hill_radius()
self.get_jacobi_energy()
self.get_encounter_type()
# Resolve.
# NOTE: need to take ongoing changes into account...
# First: collisions that don't result in a merger
# Then: mergers
if len(self.merging) > 0:
self.resolve_mergers()
def update_velocities(
self,
):
A = self.all_encounters_A
B = self.all_encounters_B
if self.epsilon_t != 1.0:
return -1
r = B.position - A.position
v = B.velocity - A.velocity
n = r/r.lengths().reshape((self.number_of_collisions,1))
v_n = (
v[:,0]*n[:,0] +
v[:,1]*n[:,1] +
v[:,2]*n[:,2]
).reshape((len(n),1)) * n
self.v_A_orig = A.velocity
self.v_B_orig = B.velocity
A.velocity += (1+self.epsilon_n) * v_n * (B.mass / (A.mass+B.mass)).reshape((self.number_of_collisions,1))
B.velocity += -(1+self.epsilon_n) * v_n * (A.mass / (A.mass+B.mass)).reshape((self.number_of_collisions,1))
def get_jacobi_energy(
self,
):
"""
Taken from Canup & Esposito (1995/1994), with cues from Kokubo, Ida &
Makino (2000)
"""
A = self.all_encounters_A
B = self.all_encounters_B
# Constants
m_A = A.mass
m_B = B.mass
M = m_A + m_B
r_A = A.position
r_B = B.position
r = (
r_A * m_A.reshape((self.number_of_collisions,1)) +
r_B * m_B.reshape((self.number_of_collisions,1))
) / M.reshape((self.number_of_collisions,1))
r_p = self.primary.position
r_orb = r - r_p
v_A = A.velocity
v_B = B.velocity
v_c = (
v_A * m_A.reshape((self.number_of_collisions,1)) +
v_B * m_B.reshape((self.number_of_collisions,1))
) / M.reshape((self.number_of_collisions,1))
v_d = v_B - v_A
v_p = self.primary.velocity
v_orb = (v_c - v_p)
# Derived
x_hat = VectorQuantity(
(
r_orb /
r_orb.lengths().reshape((self.number_of_collisions,1))
),
units.none,
)
v_orb_hat = VectorQuantity(
(
v_orb /
v_orb.lengths().reshape((self.number_of_collisions,1))
),
units.none,
)
z_hat = x_hat.cross(v_orb_hat)
y_hat = x_hat.cross(z_hat)
x = (
r[:,0] * x_hat[:,0] +
r[:,1] * x_hat[:,1] +
r[:,2] * x_hat[:,2]
)
z = (
r[:,0] * z_hat[:,0] +
r[:,1] * z_hat[:,1] +
r[:,2] * z_hat[:,2]
)
Omega = (
v_orb[:,0] * y_hat[:,0] +
v_orb[:,1] * y_hat[:,1] +
v_orb[:,2] * y_hat[:,2]
) / (2*np.pi * r_orb.lengths())
# Remember this is a potential, not really an energy
# But since mass is always > 0, no problem.
self.E_J = (
0.5 * v_d.lengths_squared() -
1.5 * x**2 * Omega**2 +
0.5 * z**2 * Omega**2 -
self.G*M/r.lengths() +
4.5 * self.radius_Hill**2 * Omega**2
)
def get_hill_radius(
self,
):
A = self.all_encounters_A
B = self.all_encounters_B
m_A = A.mass
m_B = B.mass
M = m_A + m_B
r_A = A.position
r_B = B.position
r = (
r_A * m_A.reshape((self.number_of_collisions,1)) +
r_B * m_B.reshape((self.number_of_collisions,1))
) / M.reshape((self.number_of_collisions,1))
r_p = self.primary.position
r_orb = r - r_p
self.radius_Hill = (
M /
(3 * self.primary.mass)
)**(1./3) * r_orb.lengths()
def get_encounter_type(
self,
energy_unit = units.erg,
mass_unit = units.kg,
length_unit = units.km,
time_unit = units.s,
):
A = self.all_encounters_A
B = self.all_encounters_B
interaction_includes_planet = (
(self.primary.key == A.key) ^
(self.primary.key == B.key)
)
if self.f > 0.0:
jacobi_energy_negative = (
self.E_J < (0 | energy_unit / mass_unit)
)
within_hill_radius = (
(A.radius + B.radius) < (self.f * self.radius_Hill)
)
merging = (
interaction_includes_planet ^
(
jacobi_energy_negative &
within_hill_radius
)
)
else:
merging = interaction_includes_planet
not_merging = (merging == False)
self.merging = np.where(merging)[0]
self.not_merging = np.where(not_merging)[0]
self.colliding = self.not_merging#np.where(not_merging)[0]
#self.colliding = np.where(not_merging & approaching)[0]
def resolve_rebounders(
self,
move_particles = True,
correct_for_multiple_collisions = False,
):
A = self.all_encounters_A
B = self.all_encounters_B
if move_particles:
# Make sure the radii no longer overlap
# This introduces an additional kick, but it prevents singularities...
m_A = A.mass
m_B = B.mass
M = m_A + m_B
r = B.position - A.position
#Distance the particles are overlapping:
d = r.lengths() - B.radius - A.radius
n_hat = VectorQuantity(
(
r /
r.lengths().reshape((self.number_of_collisions,1))
),
units.none,
)
#Displacement post-velocity change:
disp = d.reshape((self.number_of_collisions,1)) * n_hat
A.position += (m_B/M).reshape((self.number_of_collisions,1)) * disp
B.position += -(m_A/M).reshape((self.number_of_collisions,1)) * disp
# Sync
self.particles_modified.add_particles(A)
self.particles_modified.add_particles(B)
def resolve_mergers(
self,
):
# Conserve position and velocity of center-of-mass
# Combine total mass in the most massive particle
# Choose the first one if masses are equal
A = self.all_encounters_A
B = self.all_encounters_B
# This has to be a for loop, since we have to account for multiple collisions with one object in one timestep.
for i in range(len(self.merging)):
index = self.merging[i]
if B[index].mass > A[index].mass:
seed = B[index]
merge_with = A[index]
else:
seed = A[index]
merge_with = B[index]
dist = (seed.position-merge_with.position).lengths()
if merge_with.key in self.particles_removed.key:
print("already merged!")
break
if seed.key in self.particles_removed.key:
print("This should never happen!")
print(seed.key)
break
if seed.key in self.particles_modified.key:
# Particle already exists in modified form,
# probably had a collision.
# Use the modified form
# and remove it from the already-done list!
seed = self.particles_modified.select(
lambda x: x == seed.key,["key"])[0].copy()
self.particles_modified.remove_particle(seed)
rho = seed.mass / (4/3. * np.pi * seed.radius**3)
if merge_with.key in self.particles_modified.key:
merge_with = self.particles_modified.select(
lambda x: x == merge_with.key, ["key"])[0].copy()
self.particles_modified.remove_particle(merge_with)
particles_to_merge = Particles()
particles_to_merge.add_particle(seed)
particles_to_merge.add_particle(merge_with)
new_particle = seed.copy()
new_particle.position = particles_to_merge.center_of_mass()
new_particle.velocity = particles_to_merge.center_of_mass_velocity()
new_particle.mass = particles_to_merge.mass.sum()
new_particle.radius = particle_radius(new_particle.mass, rho)
self.particles_removed.add_particle(merge_with)
self.particles_modified.add_particle(new_particle)
class Planetary_Disc(object):
"""
Class to resolve encounters and collisions in a disc around a planet.
Collisions with the planet are also taken into account.
"""
def __init__(self, options):
"""
Initialise particles, identify subgroups.
"""
self.options = options
convert_nbody = self.options["converter"]
self.f = 0. if self.options["rubblepile"] else 1.0
self.converter = convert_nbody if convert_nbody != None else (
nbody_system.nbody_to_si(
1|nbody_system.length,
1|nbody_system.mass,
)
)
self.particles = Particles()
self.integrators = []
self.encounters = []
self.sync_attributes = ["mass", "radius", "x", "y", "z", "vx", "vy", "vz"]
self.length_unit = units.AU
self.mass_unit = units.kg
self.speed_unit = units.kms
self.energy_unit = units.erg
self.time_unit = units.yr
self.particles.collection_attributes.nbody_length = self.converter.to_si(1|nbody_system.length)
self.particles.collection_attributes.nbody_mass = self.converter.to_si(1|nbody_system.mass)
self.time_margin = 0 | self.time_unit
self.model_time = 0 | self.time_unit
self.kinetic_energy = 0 | self.energy_unit
self.potential_energy = 0 | self.energy_unit
self.timestep = self.options["timestep"]
self.CollisionResolver = BotsRots()
def exit_graceful(self):
self.write_backup()
exit()
def write_backup(self, filename="continue.hdf5"):
self.particles.collection_attributes.time = self.model_time
if self.options["gravity"]=="Rebound":
self.particles.collection_attributes.timestep = self.integrator.model_time / self.timestep
if self.options["gravity"]=="Bonsai":
self.particles.collection_attributes.timestep = self.integrator.model_time / self.timestep
#self.particles.collection_attributes.grav_parameters = self.integrator.parameters
write_set_to_file(self.particles,filename,"amuse")
def evolve_model(self,time):
if options["verbose"]>0:
print("#Evolving to %s"%(time/self.timestep))
if time > self.model_time:
#print(self.particles[0])
number_of_encounters = 0
last_encounter_0 = -1
last_encounter_1 = -1
if options["verbose"]>0:
print("#%s > %s, evolving..."%(
time/self.timestep,
self.model_time/self.timestep,
))
self.integrator.evolve_model(time + 0.000001*self.timestep)
if options["verbose"]>0:
print("#integrator now at %s"%(self.integrator.model_time/self.timestep))
# Detect an error, save data in that case
if self.integrator.particles[0].x.number == np.nan:
self.exit_graceful()
else:
if options["verbose"]>0:
print("#Updating model")
self.from_integrator_to_particles.copy()
if options["verbose"]>0:
print("#Getting energies from model")
self.model_time = self.integrator.model_time
self.kinetic_energy = self.integrator.kinetic_energy
self.potential_energy = self.integrator.potential_energy
if (
self.options["gravity"]=="Rebound" or
self.options["gravity"]=="Bonsai"
):
if self.options["verbose"]>0:
print("#Timesteps completed: %s"%(self.integrator.model_time / self.timestep))
if options["verbose"]>0:
print("#Handling collisions")
if self.collision_detection.is_set():
number_of_loops = 0
if self.options["gravity"] == "ph4":
max_number_of_loops = len(self.particles)
else:
max_number_of_loops = 1
while (len(self.collision_detection.particles(0)) > 0) and number_of_loops < max_number_of_loops:
number_of_loops += 1
this_encounter_0 = self.collision_detection.particles(0)[0].key
this_encounter_1 = self.collision_detection.particles(1)[0].key
if (this_encounter_0 == last_encounter_0 and this_encounter_1 == last_encounter_1):
p0 = self.collision_detection.particles(0)[0]
p1 = self.collision_detection.particles(1)[0]
last_encounter_0 = this_encounter_0
last_encounter_1 = this_encounter_1
number_of_encounters += len(self.collision_detection.particles(0))
#m_before = self.integrator.particles.mass.sum()
self.resolve_encounters()
#m_after = self.integrator.particles.mass.sum()
#if (
# np.abs((m_after - m_before).value_in(units.MEarth)) >
# self.converter.to_si(
# (1e-10|nbody_system.mass)
# ).value_in(units.MEarth)
# ):
# print("#Mass changed!", (m_after - m_before).as_quantity_in(units.MEarth))
self.integrator.evolve_model(time + 0.000001*self.timestep)
if self.options["verbose"]>0:
print("#Handled %i encounters this timestep"%(number_of_encounters))
if options["verbose"]>0:
print("#Done")
def define_subgroups(self):
#self.star = self.particles[0]
#self.planet = self.particles[1]
#self.disc = self.particles[2:]
self.star = self.particles.select(lambda x: x == "star", ["type"])
self.planet = self.particles.select(lambda x: x == "planet", ["type"])
#self.moon = self.particles.select(lambda x: x == "moon", ["type"])
self.disc = self.particles.select(lambda x: x == "disc", ["type"])
def add_particle(self, particle):
self.add_particles(particle.as_set())
#self.particles.add_particle(particle)
#self.integrator.particles.add_particle(particle)
#self.define_subgroups()
def add_particles(self, particles):
self.particles.add_particles(particles)
self.integrator.particles.add_particles(particles)
self.define_subgroups()
def remove_particle(self, particle):
self.remove_particles(particle.as_set())
#self.particles.remove_particle(particle)
#self.integrator.particles.remove_particle(particle)
#self.define_subgroups()
def remove_particles(self, particles):
if len(particles) > 0:
if options["verbose"]>0:
print("#Removing %i particles"%(len(particles)))
#from_encounter_to_particles = \
# particles.new_channel_to(self.particles)
#from_encounter_to_particles.copy_attributes(self.sync_attributes)
#self.from_particles_to_integrator.copy_attributes(self.sync_attributes)
self.integrator.particles.remove_particles(particles)
self.particles.remove_particles(particles)
#print(len(self.particles),len(self.integrator.particles))
self.define_subgroups()
def add_integrator(self, integrator):
self.integrator = integrator
self.collision_detection = integrator.stopping_conditions.collision_detection
try:
self.integrator_timestep = integrator.parameters.timestep
self.time_margin = 0.5 * self.integrator_timestep
except:
self.integrator_timestep = False
if not options["disable_collisions"]:
self.collision_detection.enable()
self.from_integrator_to_particles = \
self.integrator.particles.new_channel_to(self.particles)
self.from_particles_to_integrator = \
self.particles.new_channel_to(self.integrator.particles)
def resolve_encounters(
self,
):
if options["verbose"]>1:
print("%d : Resolving encounters"%(clocktime.time()-starttime))
#f = 1.0 # fraction of the Hill radius
#print(self.integrator.particles[0])
#print(self.particles[0])
colliders_i = self.particles.get_indices_of_keys(self.collision_detection.particles(0).key)
colliders_j = self.particles.get_indices_of_keys(self.collision_detection.particles(1).key)
d_pos, d_vel = self.CollisionResolver.handle_collisions(self.particles,colliders_i,colliders_j)
self.particles.position += d_pos
self.particles.velocity += d_vel
self.from_particles_to_integrator.copy_attributes(["mass","x","y","z","vx","vy","vz"])
self.from_particles_to_integrator.copy_attributes(["radius"])
distance_to_planet = (self.disc.position - self.planet.position).lengths() - self.planet.radius - self.disc.radius
colliding_with_planet = np.where(distance_to_planet < 0|self.planet.x.unit)
planet_and_colliders = self.planet + self.disc[colliding_with_planet]
self.planet.position = planet_and_colliders.center_of_mass()
self.planet.velocity = planet_and_colliders.center_of_mass_velocity()
self.planet.mass = planet_and_colliders.mass.sum()
self.remove_particles(self.disc[colliding_with_planet])
#print(self.integrator.particles[0])
#print(self.particles[0])
#self.disc[colliding_with_planet].x *= 50
#self.disc[colliding_with_planet].mass *= 0
#self.disc[colliding_with_planet].radius *= 0
#self.from_particles_to_integrator.copy_attributes(["mass","x","y","z","vx","vy","vz"])
#self.from_particles_to_integrator.copy_attributes(["radius"])
def main(options):
starttime = clocktime.time()
now = clocktime.strftime("%Y%m%d%H%M%S")
# Read the initial conditions file provided. This uses "Giant Impact" units.
mass_unit = options["unit_mass"]
length_unit = options["unit_length"]
converter = nbody_system.nbody_to_si(1|mass_unit, 1|length_unit)
options["converter"] = converter
time = options["time_start"]
if options["verbose"]>1:
print("%d : Start reading particles"%(clocktime.time()-starttime))
if len(sys.argv) >= 2:
filename = sys.argv[1]
ext = filename.split('.')[-1]
if ext == "hdf5":
particles = read_set_from_file(filename, "amuse")
rundir = "./runs/" + filename.split('/')[-1][:-5]
else:
print("Unknown filetype")
exit()
else:
print("No initial conditions given")
exit()
if options["verbose"]>1:
print("%d : Read particles"%(clocktime.time()-starttime))
rundir += "-%s-%s"%(
now,
options["gravity"],
)
if options["rubblepile"]:
rundir += "-rubblepile"
backupdir = rundir + "/backups"
plotdir = rundir + "/plots"
try:
os.makedirs(rundir)
os.makedirs(backupdir)
os.makedirs(plotdir)
shutil.copy(sys.argv[0],rundir)
except:
#FIXME make a new dir in this case, to prevent overwriting old files
# use a datetime stamp
print("#directories already present")
exit()
particles[0].colour = "blue"
particles[1:].colour = "black"
kepler_time = converter.to_si(
2 * np.pi *
(
(1|nbody_system.length)**3 /
((1|nbody_system.mass) * nbody_system.G)
)**0.5
)
converter_earthunits = nbody_system.nbody_to_si(1|units.MEarth,1|units.REarth)
options["timestep"] = (kepler_time/(2*np.pi))*(2**-4)
if options["verbose"]>1:
print("%d : Starting gravity"%(clocktime.time()-starttime))
# Start up gravity code
if options["gravity"] == "Rebound":
gravity = Rebound(converter,redirection="none")
gravity.parameters.timestep = options["timestep"]
gravity.parameters.integrator = options["integrator"]
gravity.parameters.solver = "compensated"
#gravity.parameters.solver = "tree"
#gravity.parameters.opening_angle2 = 0.25
#gravity.parameters.boundary = "open"
#gravity.parameters.boundary_size = 10|units.REarth
if options["whfast_corrector"]:
gravity.parameters.whfast_corrector = options["whfast_corrector"]
elif options["gravity"] == "Bonsai":
#gravity = Bonsai(converter,redirection="none")
gravity = Bonsai(converter,)
gravity.parameters.timestep = options["timestep"]
gravity.parameters.opening_angle = 0.5
#gravity.parameters.epsilon_squared = (0.1 * particles[-1].radius)**2
gravity.parameters.epsilon_squared = 0.0 | nbody_system.length**2
elif options["gravity"] == "Pikachu":
#gravity = Bonsai(converter,redirection="none")
gravity = Pikachu(converter,)
gravity.parameters.timestep = options["timestep"]
gravity.parameters.opening_angle = 0.5
#gravity.parameters.epsilon_squared = (0.1 * particles[-1].radius)**2
gravity.parameters.epsilon_squared = 0.0 | nbody_system.length**2
elif options["gravity"] == "ph4":
if options["use_gpu"]:
gravity = ph4(converter, mode="gpu", redirection="none")
else:
gravity = ph4(converter, redirection="none")
elif options["gravity"] == "phigrape":
if options["use_gpu"]:
gravity = PhiGRAPE(converter, mode="gpu")
else:
gravity = PhiGRAPE(converter)
elif options["gravity"] == "Hermite":
gravity = Hermite(converter, number_of_workers=6)
gravity.parameters.dt_min = options["timestep"]
gravity.parameters.dt_max = options["timestep"]
else:
print("Unknown gravity code")
exit()
print(gravity.parameters)
planetary_disc = Planetary_Disc(options)
planetary_disc.add_integrator(gravity)
planetary_disc.add_particles(particles)
t_start = time
plot_time = time
backup_time = time
timestep = options["timestep"]
plot_timestep = options["timestep_plot"]
backup_timestep = options["timestep_backup"]
t_end = options["time_end"]
backup = 0
plot = 0
log_time = VectorQuantity([],units.s)
log_kinetic_energy = VectorQuantity([],units.erg)
log_potential_energy = VectorQuantity([],units.erg)
log_angular_momentum = VectorQuantity([],units.AU**2 * units.MEarth * units.yr**-1)
log = open(rundir+"/log.txt",'w')
log.write("#1 time = %s\n"%(converter_earthunits.to_si(1|nbody_system.time)))
log.write("#1 length = %s\n"%(converter_earthunits.to_si(1|nbody_system.length)))
log.write("#1 mass = %s\n"%(converter_earthunits.to_si(1|nbody_system.mass)))
log.write("#1 energy = %s\n"%(converter_earthunits.to_si(1|nbody_system.energy)))
log.write("#Time N E_kin E_pot l2 M_disc a_mean a_sigma e_mean e_sigma inc_mean inc_sigma\n")
log.write("#%s n %s %s %s %s %s %s\n"%(
units.s,
nbody_system.energy,#s.erg,
nbody_system.energy,#s.erg,
(units.REarth**2 * units.MEarth * units.day**-1)**2,
units.MEarth,
units.REarth,
units.REarth,
)
)
log.flush()
time += options["timestep"]
if options["verbose"]>1:
print("%d : Starting loop"%(clocktime.time()-starttime))
while time < t_end:
if time >= plot_time:
if options["verbose"]>1:
print("%d : Making plot"%(clocktime.time()-starttime))
plot_system(
planetary_disc.particles,
"%s/plot-%05i.png"%(plotdir,plot),
center_on_most_massive=False,
center = planetary_disc.planet.position,
time = planetary_disc.model_time,
)
plot += 1
plot_time += plot_timestep
if time >= backup_time:
if options["verbose"]>1:
print("%d : Making backup"%(clocktime.time()-starttime))
planetary_disc.write_backup(filename="%s/savefile-%i.hdf5"%(backupdir,backup))
backup += 1
backup_time += backup_timestep
if (time - planetary_disc.model_time) <= 0.5 * timestep:
if options["verbose"]>0:
print("#Increasing timestep: %s - %s <= 0.5"%(
planetary_disc.model_time / planetary_disc.timestep,
time / planetary_disc.timestep,
))
time += timestep
kinetic_energy = planetary_disc.kinetic_energy
potential_energy = planetary_disc.potential_energy
angular_momentum = planetary_disc.particles.total_angular_momentum()
semimajor_axis, eccentricity, true_anomaly,inc, long_asc_node, arg_per_mat = orbital_elements_for_rel_posvel_arrays(
planetary_disc.disc.position - planetary_disc.planet.position,
planetary_disc.disc.velocity - planetary_disc.planet.velocity,
planetary_disc.planet.mass,#total_masses,
G=constants.G,
)
#FIXME kinetic energy per particle
#FIXME angular momentum per particle
log.write("%s %i %s %s %s %s %s %s %s %s %s %s\n"%(
planetary_disc.model_time.value_in(units.s),
len(planetary_disc.particles),
converter_earthunits.to_nbody(kinetic_energy).value_in(nbody_system.energy),
converter_earthunits.to_nbody(potential_energy).value_in(nbody_system.energy),
(
angular_momentum[0]**2 +
angular_momentum[1]**2 +
angular_momentum[2]**2
).value_in(units.REarth**4 * units.MEarth**2 * units.day**-2),
planetary_disc.disc.mass.sum().value_in(units.MEarth),
semimajor_axis.mean().value_in(units.REarth),
semimajor_axis.std().value_in(units.REarth),
eccentricity.mean(),
eccentricity.std(),
inc.mean(),
inc.std(),
)
)
log.flush()
else:
if options["verbose"]>0:
print("#Not increasing timestep: %s - %s > 0.5"%(
planetary_disc.model_time / planetary_disc.timestep,
time / planetary_disc.timestep,
))
planetary_disc.evolve_model(time)
gravity.stop()
log.close()
if __name__ == "__main__":
options = {}
options["verbose"] = 0
options["rubblepile"] = True
options["gravity"] = "Bonsai"
options["integrator"] = "leapfrog"
options["whfast_corrector"] = 0
options["use_gpu"] = True
options["time_start"] = 0. | units.yr
options["time_end"] = 10000. |units.yr
#options["timestep"] = 24 |units.hour
options["timestep_plot"] = 2 | units.day#0.275 | units.yr
options["timestep_backup"] = 365.25 |units.day
options["unit_mass"] = 80*units.MJupiter
options["disable_collisions"] = False
options["unit_length"] = 80 * units.RJupiter#get_roche_limit_radius(1.0|units.g * units.cm**-3).value_in(units.RJupiter) * units.RJupiter
main(options)
|
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Utility to run Automated Human Readable Description (AHRD) pipeline.
<https://github.com/groupschoof/AHRD>
"""
import os.path as op
from os import symlink
import sys
import re
import logging
from jcvi.formats.base import must_open
from jcvi.apps.base import OptionParser, ActionDispatcher, mkdir, glob
##### Compiled RegExps #####
# Cellular locations
loc_pat = re.compile(r",\s*(chloroplastic|cytoplasmic|mitochondrial).*?\s$", re.I)
# Any word that matches e.g. Os02g0234800
osg_pat = re.compile(r"\bOs\d{2}g\d{7}.*?\s", re.I)
# (fragment)
frag_pat = re.compile(r"\(fragment[s]?\)", re.I)
# Trailing protein numeric copy (e.g. Myb 1)
trail_pat = re.compile(r"(?<!type)\s\d+\s*$", re.I)
# UPF
upf_pat = re.compile(r"^UPF.*$")
# Remove 'DDB_G\d+' ID
ddb_pat = re.compile(r"\s+DDB_G\d+", re.I)
# Any AHRD that matches e.g. "AT5G54690-like protein"
atg_pat = re.compile(r"\bAT[1-5M]G\d{5}-like protein", re.I)
# remove 'arabidopsis thaliana'
atg_id_pat = re.compile(r"[_]*AT\d{1}G\d+[/]*", re.I)
athila_pat1 = re.compile(r"Belongs to|^Encodes|^Expression|^highly", re.I)
athila_pat2 = re.compile(r"^Arabidopsis thaliana ", re.I)
athila_pat3 = re.compile(r"^Arabidopsis ", re.I)
athila_pat4 = re.compile(r"BEST Arabidopsis thaliana protein match is: ", re.I)
# '? => '
apos_pat = re.compile(r"'?")
# > => none
gt_pat = re.compile(r">")
# -like to -like protein
like_pat = re.compile(r"[-]like$", re.I)
# 'repeat$' to 'repeat protein'
repeat_pat = re.compile(r"repeat$", re.I)
# re used by the following 3 cases
Protein_pat = re.compile(r"Protein\s+", re.I)
# 'binding$' to 'binding protein'
binding_pat = re.compile(r"binding$", re.I)
# 'domain$' to 'domain-containing protein'
domain_pat = re.compile(r"domain$", re.I)
# 'related$' to '-like protein'
related_pat = re.compile(r"[,\s+]*[\s+|-]*related$", re.I)
# '[0-9]+ homolog' to '-like protein'
homolog_pat1 = re.compile(r"(?<!type)\s+\d+\s+homolog.*$", re.I)
# 'Protein\s+(.*)\s+homolog' to '$1-like protein'
homolog_pat2 = re.compile(r"^Protein([\s+\S+]+)\s+homolog.*", re.I)
# 'homolog protein' to '-like protein'
homolog_pat3 = re.compile(r"\s+homolog\s+protein.*", re.I)
# 'homolog \S+' to '-like protein'
homolog_pat4 = re.compile(r"\s+homolog\s+\S+$", re.I)
# 'homologue$' to '-like protein'
homolog_pat5 = re.compile(r"\s+homologue[\s+\S+]$", re.I)
# 'homolog$' to '-like protein'
homolog_pat6 = re.compile(r"\s+homolog$", re.I)
# 'Agenet domain-containing protein / bromo-adjacent homology (BAH) domain-containing protein'
# to 'Agenet and bromo-adjacent homology (BAH) domain-containing protein'
agenet_pat = re.compile(r"Agenet domain-containing protein \/ ", re.I)
# plural to singular
plural_pat = re.compile(r"[deinr]s$", re.I)
# 'like_TBP' or 'likeTBP' to 'like TBP'
tbp_pat = re.compile(r"like[_]*TBP", re.I)
# 'protein protein' to 'protein'
prot_pat = re.compile(r" protein protein", re.I)
# 'Candidate|Hypothetical|Novel|Predicted|Possible' to 'Putative'
put_pat = re.compile(r"Candidate|Hypothetical|Novel|Predicted|Possible", re.I)
# 'dimerisation' to 'dimerization'
dimer_pat = re.compile(r"dimerisation", re.I)
# '\s+LENGTH=\d+' to ''
length_pat = re.compile(r"\s+LENGTH\=\d+", re.I)
# disallowed words
disallow = ("genome", "annotation", "project")
disallow_pat = re.compile("|".join(str(x) for x in disallow))
# disallowed organism names
organism = ("thaliana", "rickettsia", "rice", "yeast")
organism_pat = re.compile("|".join("^.*{0}".format(str(x)) for x in organism))
# consolidate glycosidic links
glycosidic_link_pat = re.compile("\d+,\d+")
# Kevin Silverstein suggested names (exclude list)
spada = ("LCR", "RALF", "SCR")
spada_pat = re.compile("|".join("^{0}$".format(str(x)) for x in spada))
# names with all capital letters (maybe followed by numbers)
sym_pat = re.compile(r"^[A-Z]+[A-Z0-9\-]{0,}$")
lc_sym_pat = re.compile(r"^[A-z]{1}[a-z]+[0-9]{1,}$")
eol_sym_pat = re.compile(r"\([A-Z]+[A-Z0-9\-]{0,}\)$")
# sulfer -> sulfur
sulfer_pat = re.compile(r"sulfer")
# assessory -> accessory
assessory_pat = re.compile(r"assessory")
# british to american spelling conversion
# -ise -> -ize
# -isation -> ization
ise_pat = re.compile(r"\b([A-z]+)ise\b")
isation_pat = re.compile(r"\b([A-z]+)isation\b")
Template = """
proteins_fasta: {2}
blast_dbs:
swissprot:
weight: 100
file: swissprot/{1}.swissprot.pairwise
blacklist: {0}/blacklist_descline.txt
filter: {0}/filter_descline_sprot.txt
token_blacklist: {0}/blacklist_token.txt
description_score_bit_score_weight: 0.2
tair:
weight: 50
file: tair/{1}.tair.pairwise
blacklist: {0}/blacklist_descline.txt
filter: {0}/filter_descline_tair.txt
token_blacklist: {0}/blacklist_token.txt
description_score_bit_score_weight: 0.4
trembl:
weight: 10
file: trembl/{1}.trembl.pairwise
blacklist: {0}/blacklist_descline.txt
filter: {0}/filter_descline_trembl.txt
token_blacklist: {0}/blacklist_token.txt
description_score_bit_score_weight: 0.4
{7}
token_score_bit_score_weight: {4}
token_score_database_score_weight: {5}
token_score_overlap_score_weight: {6}
description_score_relative_description_frequency_weight: 0.6
output: {3}
"""
iprscanTemplate = """
interpro_database: ./interpro.xml
interpro_result: {0}
"""
# Necessary for the script to know the location of `interpro.xml` and `interpro.dtd`
iprscan_datadir = "/usr/local/devel/ANNOTATION/iprscan/iprscan_v4.7/data"
def main():
actions = (
('batch', 'batch run AHRD'),
('merge', 'merge AHRD run results'),
('fix', 'fix AHRD names'),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
Unknown = "Unknown protein"
Hypothetical = "hypothetical protein"
def fix_text(s):
# Fix descriptions like D7TDB1 (
s = re.sub("([A-Z0-9]){6} \(", "", s)
s = s.translate(None, "[]")
s = s.replace("(-)", "[-]")
s = s.replace("(+)", "[+]")
s = s.replace("(Uncharacterized protein)", "")
s = s.translate(None, "()")
# before trimming off at the first ";", check if name has glycosidic
# linkage information (e.g 1,3 or 1,4). If so, also check if multiple
# linkages are separated by ";". If so, replace ";" by "-"
m = re.findall(glycosidic_link_pat, s)
if m and ";" in s:
s = re.sub(";\s*", "-", s)
s = s.split(";")[0]
# Cellular locations
# Any word that matches e.g. AT5G54690
# Any word that matches e.g. Os02g0234800
# (fragment)
# Trailing protein numeric copy (e.g. Myb 1)
# UPF
# Remove 'DDB_G\d+' ID
# '_At[0-9]+g[0-9]+' to ''
for pat in (loc_pat, osg_pat, frag_pat, trail_pat, upf_pat, ddb_pat):
# below is a hack since word boundaries don't work on /
s = s.strip() + " "
s = re.sub(pat, "", s)
# '? => '
s = re.sub(apos_pat, "'", s)
# > => none
s = re.sub(gt_pat, "", s)
# reduce runs such as -- '''
s = re.sub(r"[-]+", "-", s)
s = re.sub(r"[']+", "'", s)
s = s.strip()
# -like to -like protein
s = re.sub(like_pat, "-like protein", s)
# 'repeat$' to 'repeat protein'
if re.search(repeat_pat, s):
s += "-containing protein"
# 'binding$' to 'binding protein'
if re.search(binding_pat, s):
s += " protein"
if re.match(Protein_pat, s):
s = re.sub(Protein_pat, "", s)
# 'domain$' to 'domain-containing protein'
if re.search(domain_pat, s):
s += "-containing protein"
if re.search(r"-domain", s):
s = re.sub(r"-domain", " domain", s)
if re.match(Protein_pat, s):
s = re.sub(Protein_pat, "", s)
# 'related$' to '-like protein'
if re.search(related_pat, s):
s = re.sub(related_pat, "-like protein", s)
if re.match(Protein_pat, s) and not re.match(r"Protein kinase", s):
s = re.sub(Protein_pat, "", s)
# '[0-9]+ homolog' to '-like protein'
if re.search(homolog_pat1, s):
s = re.sub(homolog_pat1, "-like protein", s)
if re.match(Protein_pat, s):
s = re.sub(Protein_pat, "", s)
# 'Protein\s+(.*)\s+homolog' to '$1-like protein'
match = re.search(homolog_pat2, s)
if match and not re.match(r"Protein kinase", s):
ret = match.group(1)
s = re.sub(homolog_pat2, ret + "-like protein", s)
s = re.sub(r"^\s+", "", s)
s = s.capitalize()
# 'homolog protein' to '-like protein'
# 'homolog \S+' to '-like protein'
# 'homologue$' to '-like protein'
# 'homolog$' to '-like protein'
for pat in (homolog_pat3, homolog_pat4, homolog_pat5, homolog_pat6):
if re.search(pat, s):
s = re.sub(pat, "-like protein", s)
# 'Agenet domain-containing protein / bromo-adjacent homology (BAH) domain-containing protein'
# to 'Agenet and bromo-adjacent homology (BAH) domain-containing protein'
if re.search(agenet_pat, s):
s = re.sub(agenet_pat, "Agenet and ", s)
# plural to singular
if re.search(plural_pat, s):
#if s.find('biogenesis') == -1 and s.find('Topors') == -1 and s.find('allergens') == -1:
if s.find('biogenesis') == -1 and s.find('Topors') == -1:
s = re.sub(r"s$", "", s)
# 'like_TBP' or 'likeTBP' to 'like TBP'
if re.search(tbp_pat, s):
s = re.sub(tbp_pat, "like TBP", s)
# 'protein protein' to 'protein'
if re.search(prot_pat, s):
s = re.sub(prot_pat, " protein", s)
# 'Candidate|Hypothetical|Novel|Predicted|Possible' to 'Putative'
if re.search(put_pat, s):
s = re.sub(put_pat, "Putative", s)
# 'dimerisation' to 'dimerization'
if re.search(dimer_pat, s):
s = re.sub(dimer_pat, "dimerization", s)
# Any AHRD that matches e.g. "AT5G54690-like protein"
# Any AHRD that contains the words '^Belongs|^Encoded|^Expression|^highly'
for pat in (atg_pat, athila_pat1):
if re.search(pat, s):
s = Unknown
# remove 'arabidopsis[ thaliana]' and/or embedded Atg IDs
for pat in (atg_id_pat, athila_pat2, athila_pat3, athila_pat4):
# below is a hack since word boundaries don't work on /
s = s.strip() + " "
s = re.sub(pat, "", s)
# remove "\s+LENGTH=\d+" from TAIR deflines
if re.search(length_pat, s):
s = re.sub(length_pat, "", s)
# if name has a dot followed by a space (". ") in it and contains multiple
# parts separated by a comma, strip name starting from first occurrence of ","
if re.search(r"\. ", s):
if re.search(r",", s):
s = s.split(",")[0]
# if name contains any of the disallowed words,
# remove word occurrence from name
# if name contains references to any other organism, trim name upto
# that occurrence
for pat in (disallow_pat, organism_pat):
if re.search(pat, s):
s = re.sub(pat, "", s)
s = s.strip()
# if name is entirely a gene symbol-like (all capital letters, maybe followed by numbers)
# add a "-like protein" at the end
if (re.search(sym_pat, s) or re.search(lc_sym_pat, s)) \
and not re.search(spada_pat, s):
s = s + "-like protein"
# if gene symbol in parantheses at EOL, remove symbol
if re.search(eol_sym_pat, s):
s = re.sub(eol_sym_pat, "", s)
# if name terminates at a symbol([^A-Za-z0-9_]), trim it off
if re.search(r"\W{1,}$", s) and not re.search(r"\)$", s):
s = re.sub("\W{1,}$", "", s)
# change sulfer to sulfur
if re.search(sulfer_pat, s):
s = re.sub(sulfer_pat, "sulfur", s)
# change assessory to accessory
if re.search(assessory_pat, s):
s = re.sub(assessory_pat, "accessory", s)
# change -ise/-isation to -ize/-ization
match = re.search(ise_pat, s)
if match:
ret = match.group(1)
s = re.sub(ise_pat, "{0}ize".format(ret), s)
match = re.search(isation_pat, s)
if match:
ret = match.group(1)
s = re.sub(isation_pat, "{0}ization".format(ret), s)
"""
case (qr/^Histone-lysine/) { $ahrd =~ s/,\s+H\d{1}\s+lysine\-\d+//gs; }
"""
sl = s.lower()
# Any mention of `clone` or `contig` is not informative
if "clone" in sl or "contig" in sl:
s = Unknown
# All that's left is `protein` is not informative
if sl in ("protein", "protein, putative", ""):
s = Unknown
if Unknown.lower() in sl:
s = Unknown
if "FUNCTIONS IN".lower() in sl and "unknown" in sl:
s = Unknown
if "uncharacterized" in sl:
s = "uncharacterized protein"
s = s.replace(", putative", "")
s = s.replace("Putative ", "")
if s == Unknown or s.strip() == "protein":
s = Hypothetical
# Compact all spaces
s = ' '.join(s.split())
assert s.strip()
return s
def fix(args):
"""
%prog fix ahrd.csv > ahrd.fixed.csv
Fix ugly names from Uniprot.
"""
p = OptionParser(fix.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
csvfile, = args
fp = open(csvfile)
fw = must_open(opts.outfile, "w")
for row in fp:
if row[0] == '#':
continue
if row.strip() == "":
continue
atoms = row.rstrip("\r\n").split("\t")
name, hit, ahrd_code, desc = atoms[:4] \
if len(atoms) > 2 else \
atoms[0], None, None, atoms[-1]
newdesc = fix_text(desc)
if hit and hit.strip() != "" and newdesc == Hypothetical:
newdesc = "conserved " + newdesc
print >> fw, "\t".join(atoms[:4] + [newdesc] + atoms[4:])
def merge(args):
"""
%prog merge output/*.csv > ahrd.csv
Merge AHRD results, remove redundant headers, empty lines, etc. If there are
multiple lines containing the same ID (first column). Then whatever comes
the first will get retained.
"""
p = OptionParser(merge.__doc__)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
csvfiles = args
cf = csvfiles[0]
fp = open(cf)
for row in fp:
if row.startswith("Protein"):
break
header = row.rstrip()
print header
seen = set()
for cf in csvfiles:
fp = open(cf)
for row in fp:
if row[0] == '#':
continue
if row.strip() == "":
continue
if row.strip() == header:
continue
atoms = row.rstrip().split("\t")
id = atoms[0]
if id in seen:
logging.error("ID `{0}` ignored.".format(id))
continue
seen.add(id)
print row.strip()
def batch(args):
"""
%prog batch splits output
The arguments are two folders.
Input FASTA sequences are in splits/.
Output csv files are in output/.
Must have folders swissprot/, tair/, trembl/ that contains the respective
BLAST output. Once finished, you can run, for example:
$ parallel java -Xmx2g -jar ~/code/AHRD/dist/ahrd.jar {} ::: output/*.yml
"""
p = OptionParser(batch.__doc__)
ahrd_weights = { "blastp": [0.5, 0.3, 0.2],
"blastx": [0.6, 0.4, 0.0]
}
blast_progs = tuple(ahrd_weights.keys())
p.add_option("--path", default="~/code/AHRD/",
help="Path where AHRD is installed [default: %default]")
p.add_option("--blastprog", default="blastp", choices=blast_progs,
help="Specify the blast program being run. Based on this option," \
+ " the AHRD parameters (score_weights) will be modified." \
+ " [default: %default]")
p.add_option("--iprscan", default=None,
help="Specify path to InterProScan results file if available." \
+ " If specified, the yml conf file will be modified" \
+ " appropriately. [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
splits, output = args
mkdir(output)
bit_score, db_score, ovl_score = ahrd_weights[opts.blastprog]
for f in glob("{0}/*.fasta".format(splits)):
fb = op.basename(f).rsplit(".", 1)[0]
fw = open(op.join(output, fb + ".yml"), "w")
path = op.expanduser(opts.path)
dir = op.join(path, "test/resources")
outfile = op.join(output, fb + ".csv")
interpro = iprscanTemplate.format(opts.iprscan) if opts.iprscan else ""
print >> fw, Template.format(dir, fb, f, outfile, bit_score, db_score, ovl_score, interpro)
if opts.iprscan:
if not op.lexists("interpro.xml"):
symlink(op.join(iprscan_datadir, "interpro.xml"), "interpro.xml")
if not op.lexists("interpro.dtd"):
symlink(op.join(iprscan_datadir, "interpro.dtd"), "interpro.dtd")
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.
# Copyright (C) 2016 GoDaddy Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import collections
import copy
import itertools
import os
import sys
import traceback
import jsonschema
from oslo_utils import reflection
import six
from failure import _utils as utils
class InvalidFormat(ValueError):
"""Exception raised when data is not in the right format."""
class NoActiveException(RuntimeError):
"""Exception raised when no current exception/exc_info() exists."""
class WrappedFailure(utils.StrMixin, Exception):
"""Wraps one or several failure objects.
When exception/s cannot be re-raised (for example, because the value and
traceback are lost in serialization) or there are several exceptions active
at the same time (due to more than one thread raising exceptions), we will
wrap the corresponding failure objects into this exception class and
*may* reraise this exception type to allow users to handle the contained
failures/causes as they see fit...
See the failure class documentation for a more comprehensive set of reasons
why this object *may* be reraised instead of the original exception.
:param causes: the :py:class:`~failure.Failure` objects
that caused this this exception to be raised.
"""
def __init__(self, causes):
super(WrappedFailure, self).__init__()
self._causes = []
for cause in causes:
if cause.check(type(self)) and cause.exception is not None:
# NOTE(imelnikov): flatten wrapped failures.
self._causes.extend(cause.exception)
else:
self._causes.append(cause)
def __iter__(self):
"""Iterate over failures that caused the exception."""
return iter(self._causes)
def __len__(self):
"""Return number of wrapped failures."""
return len(self._causes)
def check(self, *exc_classes):
"""Check if any of exception classes caused the failure/s.
:param exc_classes: exception types/exception type names to
search for.
If any of the contained failures were caused by an exception of a
given type, the corresponding argument that matched is returned. If
not then ``None`` is returned.
"""
if not exc_classes:
return None
for cause in self:
result = cause.check(*exc_classes)
if result is not None:
return result
return None
def __bytes__(self):
buf = six.BytesIO()
buf.write(b'WrappedFailure: [')
causes_gen = (six.binary_type(cause) for cause in self._causes)
buf.write(b", ".join(causes_gen))
buf.write(b']')
return buf.getvalue()
def __unicode__(self):
buf = six.StringIO()
buf.write(u'WrappedFailure: [')
causes_gen = (six.text_type(cause) for cause in self._causes)
buf.write(u", ".join(causes_gen))
buf.write(u']')
return buf.getvalue()
class Failure(utils.StrMixin):
"""An immutable object that represents failure.
Failure objects encapsulate exception information so that they can be
re-used later to re-raise, inspect, examine, log, print, serialize,
deserialize...
For those who are curious, here are a few reasons why the original
exception itself *may* not be reraised and instead a reraised wrapped
failure exception object will be instead. These explanations are *only*
applicable when a failure object is serialized and deserialized (when it is
retained inside the python process that the exception was created in the
the original exception can be reraised correctly without issue).
* Traceback objects are not serializable/recreatable, since they contain
references to stack frames at the location where the exception was
raised. When a failure object is serialized and sent across a channel
and recreated it is *not* possible to restore the original traceback and
originating stack frames.
* The original exception *type* can not *always* be guaranteed to be
found, certain nodes can run code that is not accessible/available
when the failure is being deserialized. Even if it was possible to use
pickle safely (which it is not) it would not *always*
be possible to find the originating exception or associated code in this
situation.
* The original exception *type* can not be guaranteed to be constructed in
a *correct* manner. At the time of failure object creation the exception
has already been created and the failure object can not assume it has
knowledge (or the ability) to recreate the original type of the captured
exception (this is especially hard if the original exception was created
via a complex process via some custom exception ``__init__`` method).
* The original exception *type* can not *always* be guaranteed to be
constructed and/or imported in a *safe* manner. Importing *foreign*
exception types dynamically can be problematic when not done
correctly and in a safe manner; since failure objects can
capture *any* exception it would be *unsafe* to try to import
those exception types namespaces and modules on the receiver side
dynamically (this would create similar issues as the ``pickle`` module
has).
TODO(harlowja): use parts of http://bugs.python.org/issue17911 and the
backport at https://pypi.python.org/pypi/traceback2/ to (hopefully)
simplify the methods and contents of this object...
"""
BASE_EXCEPTIONS = {
# py2.x old/legacy names...
2: ('exceptions.BaseException', 'exceptions.Exception'),
# py3.x new names...
3: ('builtins.BaseException', 'builtins.Exception'),
}
"""
Root exceptions of all other python exceptions (as a string).
See: https://docs.python.org/2/library/exceptions.html
"""
#: Expected failure schema (in json schema format).
SCHEMA = {
"$ref": "#/definitions/cause",
"definitions": {
"cause": {
"type": "object",
'properties': {
'exc_args': {
"type": "array",
"minItems": 0,
},
'exc_kwargs': {
"type": "object",
"additionalProperties": True,
},
'exception_str': {
"type": "string",
},
'traceback_str': {
"type": "string",
},
'exc_type_names': {
"type": "array",
"items": {
"type": "string",
},
"minItems": 1,
},
'generated_on': {
"type": "array",
"items": {
"type": "number",
},
"minItems": 1,
},
'cause': {
"type": "object",
"$ref": "#/definitions/cause",
},
},
"required": [
"exception_str",
'traceback_str',
'exc_type_names',
'generated_on',
],
"additionalProperties": True,
},
},
}
def __init__(self, exc_info=None, exc_args=None,
exc_kwargs=None, exception_str='',
exc_type_names=None, cause=None,
traceback_str='', generated_on=None):
exc_type_names = utils.to_tuple(exc_type_names)
if not exc_type_names:
raise ValueError("Invalid exception type (no type names"
" provided)")
self._exc_type_names = exc_type_names
self._exc_info = utils.to_tuple(exc_info, on_none=None)
self._exc_args = utils.to_tuple(exc_args)
if exc_kwargs:
self._exc_kwargs = dict(exc_kwargs)
else:
self._exc_kwargs = {}
self._exception_str = exception_str
self._cause = cause
self._traceback_str = traceback_str
self._generated_on = utils.to_tuple(generated_on, on_none=None)
@classmethod
def from_exc_info(cls, exc_info=None,
retain_exc_info=True,
cause=None, find_cause=True):
"""Creates a failure object from a ``sys.exc_info()`` tuple."""
if exc_info is None:
exc_info = sys.exc_info()
if not any(exc_info):
raise NoActiveException("No exception currently"
" being handled")
# This should always be the (type, value, traceback) tuple,
# either from a prior sys.exc_info() call or from some other
# creation...
if len(exc_info) != 3:
raise ValueError("Provided 'exc_info' must contain three"
" elements")
exc_type, exc_val, exc_tb = exc_info
try:
if exc_type is None or exc_val is None:
raise ValueError("Invalid exception tuple (exception"
" type and exception value must"
" be provided)")
exc_args = tuple(getattr(exc_val, 'args', []))
exc_kwargs = dict(getattr(exc_val, 'kwargs', {}))
exc_type_names = utils.extract_roots(exc_type)
if not exc_type_names:
exc_type_name = reflection.get_class_name(
exc_val, truncate_builtins=False)
# This should only be possible if the exception provided
# was not really an exception...
raise TypeError("Invalid exception type '%s' (not an"
" exception)" % (exc_type_name))
exception_str = utils.exception_message(exc_val)
if hasattr(exc_val, '__traceback_str__'):
traceback_str = exc_val.__traceback_str__
else:
if exc_tb is not None:
traceback_str = '\n'.join(
traceback.format_exception(*exc_info))
else:
traceback_str = ''
if not retain_exc_info:
exc_info = None
if find_cause and cause is None:
cause = cls._extract_cause(exc_val)
return cls(exc_info=exc_info, exc_args=exc_args,
exc_kwargs=exc_kwargs, exception_str=exception_str,
exc_type_names=exc_type_names, cause=cause,
traceback_str=traceback_str,
generated_on=sys.version_info[0:2])
finally:
del exc_type, exc_val, exc_tb
@classmethod
def from_exception(cls, exception, retain_exc_info=True,
cause=None, find_cause=True):
"""Creates a failure object from a exception instance."""
exc_info = (
type(exception),
exception,
getattr(exception, '__traceback__', None)
)
return cls.from_exc_info(exc_info=exc_info,
retain_exc_info=retain_exc_info,
cause=cause, find_cause=find_cause)
@classmethod
def validate(cls, data):
"""Validate input data matches expected failure ``dict`` format."""
try:
jsonschema.validate(
data, cls.SCHEMA,
# See: https://github.com/Julian/jsonschema/issues/148
types={'array': (list, tuple)})
except jsonschema.ValidationError as e:
raise InvalidFormat("Failure data not of the"
" expected format: %s" % (e.message))
else:
# Ensure that all 'exc_type_names' originate from one of
# base exceptions, because those are the root exceptions that
# python mandates/provides and anything else is invalid...
causes = collections.deque([data])
while causes:
cause = causes.popleft()
try:
generated_on = cause['generated_on']
ok_bases = cls.BASE_EXCEPTIONS[generated_on[0]]
except (KeyError, IndexError):
ok_bases = []
root_exc_type = cause['exc_type_names'][-1]
if root_exc_type not in ok_bases:
raise InvalidFormat(
"Failure data 'exc_type_names' must"
" have an initial exception type that is one"
" of %s types: '%s' is not one of those"
" types" % (ok_bases, root_exc_type))
sub_cause = cause.get('cause')
if sub_cause is not None:
causes.append(sub_cause)
def _matches(self, other):
if self is other:
return True
return (self.exception_type_names == other.exception_type_names and
self.exception_args == other.exception_args and
self.exception_kwargs == other.exception_kwargs and
self.exception_str == other.exception_str and
self.traceback_str == other.traceback_str and
self.cause == other.cause and
self.generated_on == other.generated_on)
def matches(self, other):
"""Checks if another object is equivalent to this object.
:returns: checks if another object is equivalent to this object
:rtype: boolean
"""
if not isinstance(other, Failure):
return False
if self.exc_info is None or other.exc_info is None:
return self._matches(other)
else:
return self == other
def __eq__(self, other):
if not isinstance(other, Failure):
return NotImplemented
return (self._matches(other) and
utils.are_equal_exc_info_tuples(self.exc_info,
other.exc_info))
def __ne__(self, other):
return not (self == other)
# NOTE(imelnikov): obj.__hash__() should return same values for equal
# objects, so we should redefine __hash__. Failure equality semantics
# is a bit complicated, so for now we just mark Failure objects as
# unhashable. See python docs on object.__hash__ for more info:
# http://docs.python.org/2/reference/datamodel.html#object.__hash__
__hash__ = None
@property
def exception(self):
"""Exception value, or ``None`` if exception value is not present.
Exception value *may* be lost during serialization.
"""
if self._exc_info:
return self._exc_info[1]
else:
return None
@property
def generated_on(self):
"""Python major & minor version tuple this failure was generated on.
May be ``None`` if not provided during creation (or after if lost).
"""
return self._generated_on
@property
def exception_str(self):
"""String representation of exception."""
return self._exception_str
@property
def exception_args(self):
"""Tuple of arguments given to the exception constructor."""
return self._exc_args
@property
def exception_kwargs(self):
"""Dict of keyword arguments given to the exception constructor."""
return self._exc_kwargs
@property
def exception_type_names(self):
"""Tuple of current exception type **names** (in MRO order)."""
return self._exc_type_names
@property
def exc_info(self):
"""Exception info tuple or ``None``.
See: https://docs.python.org/2/library/sys.html#sys.exc_info for what
the contents of this tuple are (if none, then no contents can
be examined).
"""
return self._exc_info
@property
def traceback_str(self):
"""Exception traceback as string."""
return self._traceback_str
@staticmethod
def reraise_if_any(failures, cause_cls_finder=None):
"""Re-raise exceptions if argument is not empty.
If argument is empty list/tuple/iterator, this method returns
None. If argument is converted into a list with a
single ``Failure`` object in it, that failure is reraised. Else, a
:class:`~.WrappedFailure` exception is raised with the failure
list as causes.
"""
if not isinstance(failures, (list, tuple)):
# Convert generators/other into a list...
failures = list(failures)
if len(failures) == 1:
failures[0].reraise(cause_cls_finder=cause_cls_finder)
elif len(failures) > 1:
raise WrappedFailure(failures)
def reraise(self, cause_cls_finder=None):
"""Re-raise captured exception (possibly trying to recreate)."""
if self._exc_info:
six.reraise(*self._exc_info)
else:
# Attempt to regenerate the full chain (and then raise
# from the root); without a traceback, oh well...
root = None
parent = None
for cause in itertools.chain([self], self.iter_causes()):
if cause_cls_finder is not None:
cause_cls = cause_cls_finder(cause)
else:
cause_cls = None
if cause_cls is None:
# Unable to find where this cause came from, give up...
raise WrappedFailure([self])
exc = cause_cls(
*cause.exception_args, **cause.exception_kwargs)
# Saving this will ensure that if this same exception
# is serialized again that we will extract the traceback
# from it directly (thus proxying along the original
# traceback as much as we can).
exc.__traceback_str__ = cause.traceback_str
if root is None:
root = exc
if parent is not None:
parent.__cause__ = exc
parent = exc
six.reraise(type(root), root, tb=None)
def check(self, *exc_classes):
"""Check if any of ``exc_classes`` caused the failure.
Arguments of this method can be exception types or type
names (strings **fully qualified**). If captured exception is
an instance of exception of given type, the corresponding argument
is returned, otherwise ``None`` is returned.
"""
for cls in exc_classes:
cls_name = utils.cls_to_cls_name(cls)
if cls_name in self._exc_type_names:
return cls
return None
@property
def cause(self):
"""Nested failure *cause* of this failure.
This property is typically only useful on 3.x or newer versions
of python as older versions do **not** have associated causes.
Refer to :pep:`3134` and :pep:`409` and :pep:`415` for what
this is examining to find failure causes.
"""
return self._cause
def __unicode__(self):
return self.pformat()
def pformat(self, traceback=False):
"""Pretty formats the failure object into a string."""
buf = six.StringIO()
if not self._exc_type_names:
buf.write('Failure: %s' % (self._exception_str))
else:
buf.write('Failure: %s: %s' % (self._exc_type_names[0],
self._exception_str))
if traceback:
if self._traceback_str is not None:
traceback_str = self._traceback_str.rstrip()
else:
traceback_str = None
if traceback_str:
buf.write(os.linesep)
buf.write(traceback_str)
else:
buf.write(os.linesep)
buf.write('Traceback not available.')
return buf.getvalue()
def iter_causes(self):
"""Iterate over all causes."""
curr = self._cause
while curr is not None:
yield curr
curr = curr._cause
def __getstate__(self):
dct = self.to_dict()
if self._exc_info:
# Avoids 'TypeError: can't pickle traceback objects'
dct['exc_info'] = self._exc_info[0:2]
return dct
def __setstate__(self, dct):
self._exception_str = dct['exception_str']
if 'exc_args' in dct:
self._exc_args = tuple(dct['exc_args'])
else:
# Guess we got an older version somehow, before this
# was added, so at that point just set to an empty tuple...
self._exc_args = ()
if 'exc_kwargs' in dct:
self._exc_kwargs = dict(dct['exc_kwargs'])
else:
self._exc_kwargs = {}
self._traceback_str = dct['traceback_str']
self._exc_type_names = dct['exc_type_names']
self._generated_on = dct['generated_on']
if 'exc_info' in dct:
# Tracebacks can't be serialized/deserialized, but since we
# provide a traceback string (and more) this should be
# acceptable...
#
# TODO(harlowja): in the future we could do something like
# what the twisted people have done, see for example
# twisted-13.0.0/twisted/python/failure.py#L89 for how they
# created a fake traceback object...
exc_info = list(dct['exc_info'])
while len(exc_info) < 3:
exc_info.append(None)
self._exc_info = tuple(exc_info[0:3])
else:
self._exc_info = None
cause = dct.get('cause')
if cause is not None:
cause = self.from_dict(cause)
self._cause = cause
@classmethod
def _extract_cause(cls, exc_val):
"""Helper routine to extract nested cause (if any)."""
# See: https://www.python.org/dev/peps/pep-3134/ for why/what
# these are...
#
# '__cause__' attribute for explicitly chained exceptions
# '__context__' attribute for implicitly chained exceptions
# '__traceback__' attribute for the traceback
#
# See: https://www.python.org/dev/peps/pep-0415/ for why/what
# the '__suppress_context__' is/means/implies...
nested_exc_vals = []
seen = [exc_val]
while True:
suppress_context = getattr(
exc_val, '__suppress_context__', False)
if suppress_context:
attr_lookups = ['__cause__']
else:
attr_lookups = ['__cause__', '__context__']
nested_exc_val = None
for attr_name in attr_lookups:
attr_val = getattr(exc_val, attr_name, None)
if attr_val is None:
continue
nested_exc_val = attr_val
if nested_exc_val is None or nested_exc_val in seen:
break
seen.append(nested_exc_val)
nested_exc_vals.append(nested_exc_val)
exc_val = nested_exc_val
last_cause = None
for exc_val in reversed(nested_exc_vals):
f = cls.from_exception(exc_val, cause=last_cause,
find_cause=False)
last_cause = f
return last_cause
@classmethod
def from_dict(cls, data):
"""Converts this from a dictionary to a object."""
data = dict(data)
cause = data.get('cause')
if cause is not None:
data['cause'] = cls.from_dict(cause)
return cls(**data)
def to_dict(self, include_args=True, include_kwargs=True):
"""Converts this object to a dictionary.
:param include_args: boolean indicating whether to include the
exception args in the output.
:param include_kwargs: boolean indicating whether to include the
exception kwargs in the output.
"""
data = {
'exception_str': self.exception_str,
'traceback_str': self.traceback_str,
'exc_type_names': self.exception_type_names,
'exc_args': self.exception_args if include_args else tuple(),
'exc_kwargs': self.exception_kwargs if include_kwargs else {},
'generated_on': self.generated_on,
}
if self._cause is not None:
data['cause'] = self._cause.to_dict(include_args=include_args,
include_kwargs=include_kwargs)
return data
def copy(self, deep=False):
"""Copies this object (shallow or deep).
:param deep: boolean indicating whether to do a deep copy (or a
shallow copy).
"""
cause = self._cause
if cause is not None:
cause = cause.copy(deep=deep)
exc_info = utils.copy_exc_info(self.exc_info, deep=deep)
exc_args = self.exception_args
exc_kwargs = self.exception_kwargs
if deep:
exc_args = copy.deepcopy(exc_args)
exc_kwargs = copy.deepcopy(exc_kwargs)
else:
exc_args = tuple(exc_args)
exc_kwargs = exc_kwargs.copy()
# These are just simple int/strings, so deep copy doesn't really
# matter/apply here (as they are immutable anyway).
exc_type_names = tuple(self._exc_type_names)
generated_on = self._generated_on
if generated_on:
generated_on = tuple(generated_on)
# NOTE(harlowja): use `self.__class__` here so that we can work
# with subclasses (assuming anyone makes one).
return self.__class__(exc_info=exc_info,
exception_str=self.exception_str,
traceback_str=self.traceback_str,
exc_args=exc_args,
exc_kwargs=exc_kwargs,
exc_type_names=exc_type_names,
cause=cause, generated_on=generated_on)
|
|
import unittest
from mock import Mock
from datetime import datetime
from tests.test_basefile import BaseFileTests
class CNTTests(BaseFileTests):
def setUp(self):
super(CNTTests, self).setUp()
self.filesys.open.return_value = MockFile()
from niprov.cnt import NeuroscanFile
self.constructor = NeuroscanFile
self.file = NeuroscanFile(self.path, dependencies=self.dependencies)
def test_Inspect_parses_experimental_basics(self):
out = self.file.inspect()
self.assertEqual(out['subject'], 'Jane Doe')
self.assertEqual(out['dimensions'], [32, 2080])
self.assertEqual(out['acquired'], datetime(2015,3,9,13,7,3))
self.assertEqual(out['sampling-frequency'], 1000)
self.assertEqual(out['duration'], 2080/1000.)
def test_Determines_modality(self):
out = self.file.inspect()
self.assertEqual(out['modality'], 'EEG')
def test_Preserves_modality_if_inherited(self):
pass # Doesn't have to preserve
class MockFile(object):
def __init__(self):
self.cursor = -1
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
pass
def read(self, nbytes):
self.cursor = self.cursor + 1
return _data[self.cursor]
_data = [
None, #h.rev = self._fread(fid,12,'char')
None, #h.nextfile = self._fread(fid,1,'long')
None, #h.prevfile = self._fread(fid,1,'ulong')
None, #h.type = self._fread(fid,1,'char')
None, #h.id = self._fread(fid,20,'char')
None, #h.oper = self._fread(fid,20,'char')
None, #h.doctor = self._fread(fid,20,'char')
None, #h.referral = self._fread(fid,20,'char')
None, #h.hospital = self._fread(fid,20,'char')
#h.patient = self._fread(fid,20,'char')
b'Jane Doe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
None, #h.age = self._fread(fid,1,'short')
None, #h.sex = self._fread(fid,1,'char')
None, #h.hand = self._fread(fid,1,'char')
None, #h.med = self._fread(fid,20, 'char')
None, #h.category = self._fread(fid,20, 'char')
None, #h.state = self._fread(fid,20, 'char')
None, #h.label = self._fread(fid,20, 'char')
b'09/03/15' , #h.date = self._fread(fid,10, 'char')
b'13:07:03', #h.time = self._fread(fid,12, 'char')
None, #h.mean_age = self._fread(fid,1,'float')
None, #h.stdev = self._fread(fid,1,'float')
None, #h.n = self._fread(fid,1,'short')
None, #h.compfile = self._fread(fid,38,'char')
None, #h.spectwincomp = self._fread(fid,1,'float')
None, #h.meanaccuracy = self._fread(fid,1,'float')
None, #h.meanlatency = self._fread(fid,1,'float')
None, #h.sortfile = self._fread(fid,46,'char')
None, #h.numevents = self._fread(fid,1,'int')
None, #h.compoper = self._fread(fid,1,'char')
None, #h.avgmode = self._fread(fid,1,'char')
None, #h.review = self._fread(fid,1,'char')
None, #h.nsweeps = self._fread(fid,1,'ushort')
None, #h.compsweeps = self._fread(fid,1,'ushort')
None, #h.acceptcnt = self._fread(fid,1,'ushort')
None, #h.rejectcnt = self._fread(fid,1,'ushort')
None, #h.pnts = self._fread(fid,1,'ushort')
b' \x00', #h.nchannels = self._fread(fid,1,'ushort')
None, #h.avgupdate = self._fread(fid,1,'ushort')
None, #h.domain = self._fread(fid,1,'char')
None, #h.variance = self._fread(fid,1,'char')
b'\xe8\x03', #h.rate = self._fread(fid,1,'ushort')
None, #h.scale = self._fread(fid,1,'double')
None, #h.veogcorrect = self._fread(fid,1,'char')
None, #h.heogcorrect = self._fread(fid,1,'char')
None, #h.aux1correct = self._fread(fid,1,'char')
None, #h.aux2correct = self._fread(fid,1,'char')
None, #h.veogtrig = self._fread(fid,1,'float')
None, #h.heogtrig = self._fread(fid,1,'float')
None, #h.aux1trig = self._fread(fid,1,'float')
None, #h.aux2trig = self._fread(fid,1,'float')
None, #h.heogchnl = self._fread(fid,1,'short')
None, #h.veogchnl = self._fread(fid,1,'short')
None, #h.aux1chnl = self._fread(fid,1,'short')
None, #h.aux2chnl = self._fread(fid,1,'short')
None, #h.veogdir = self._fread(fid,1,'char')
None, #h.heogdir = self._fread(fid,1,'char')
None, #h.aux1dir = self._fread(fid,1,'char')
None, #h.aux2dir = self._fread(fid,1,'char')
None, #h.veog_n = self._fread(fid,1,'short')
None, #h.heog_n = self._fread(fid,1,'short')
None, #h.aux1_n = self._fread(fid,1,'short')
None, #h.aux2_n = self._fread(fid,1,'short')
None, #h.veogmaxcnt = self._fread(fid,1,'short')
None, #h.heogmaxcnt = self._fread(fid,1,'short')
None, #h.aux1maxcnt = self._fread(fid,1,'short')
None, #h.aux2maxcnt = self._fread(fid,1,'short')
None, #h.veogmethod = self._fread(fid,1,'char')
None, #h.heogmethod = self._fread(fid,1,'char')
None, #h.aux1method = self._fread(fid,1,'char')
None, #h.aux2method = self._fread(fid,1,'char')
None, #h.ampsensitivity = self._fread(fid,1,'float')
None, #h.lowpass = self._fread(fid,1,'char')
None, #h.highpass = self._fread(fid,1,'char')
None, #h.notch = self._fread(fid,1,'char')
None, #h.autoclipadd = self._fread(fid,1,'char')
None, #h.baseline = self._fread(fid,1,'char')
None, #h.offstart = self._fread(fid,1,'float')
None, #h.offstop = self._fread(fid,1,'float')
None, #h.reject = self._fread(fid,1,'char')
None, #h.rejstart = self._fread(fid,1,'float')
None, #h.rejstop = self._fread(fid,1,'float')
None, #h.rejmin = self._fread(fid,1,'float')
None, #h.rejmax = self._fread(fid,1,'float')
None, #h.trigtype = self._fread(fid,1,'char')
None, #h.trigval = self._fread(fid,1,'float')
None, #h.trigchnl = self._fread(fid,1,'char')
None, #h.trigmask = self._fread(fid,1,'short')
None, #h.trigisi = self._fread(fid,1,'float')
None, #h.trigmin = self._fread(fid,1,'float')
None, #h.trigmax = self._fread(fid,1,'float')
None, #h.trigdir = self._fread(fid,1,'char')
None, #h.autoscale = self._fread(fid,1,'char')
None, #h.n2 = self._fread(fid,1,'short')
None, #h.dir = self._fread(fid,1,'char')
None, #h.dispmin = self._fread(fid,1,'float')
None, #h.dispmax = self._fread(fid,1,'float')
None, #h.xmin = self._fread(fid,1,'float')
None, #h.xmax = self._fread(fid,1,'float')
None, #h.automin = self._fread(fid,1,'float')
None, #h.automax = self._fread(fid,1,'float')
None, #h.zmin = self._fread(fid,1,'float')
None, #h.zmax = self._fread(fid,1,'float')
None, #h.lowcut = self._fread(fid,1,'float')
None, #h.highcut = self._fread(fid,1,'float')
None, #h.common = self._fread(fid,1,'char')
None, #h.savemode = self._fread(fid,1,'char')
None, #h.manmode = self._fread(fid,1,'char')
None, #h.ref = self._fread(fid,10,'char')
None, #h.rectify = self._fread(fid,1,'char')
None, #h.displayxmin = self._fread(fid,1,'float')
None, #h.displayxmax = self._fread(fid,1,'float')
None, #h.phase = self._fread(fid,1,'char')
None, #h.screen = self._fread(fid,16,'char')
None, #h.calmode = self._fread(fid,1,'short')
None, #h.calmethod = self._fread(fid,1,'short')
None, #h.calupdate = self._fread(fid,1,'short')
None, #h.calbaseline = self._fread(fid,1,'short')
None, #h.calsweeps = self._fread(fid,1,'short')
None, #h.calattenuator = self._fread(fid,1,'float')
None, #h.calpulsevolt = self._fread(fid,1,'float')
None, #h.calpulsestart = self._fread(fid,1,'float')
None, #h.calpulsestop = self._fread(fid,1,'float')
None, #h.calfreq = self._fread(fid,1,'float')
None, #h.taskfile = self._fread(fid,34,'char')
None, #h.seqfile = self._fread(fid,34,'char')
None, #h.spectmethod = self._fread(fid,1,'char')
None, #h.spectscaling = self._fread(fid,1,'char')
None, #h.spectwindow = self._fread(fid,1,'char')
None, #h.spectwinlength = self._fread(fid,1,'float')
None, #h.spectorder = self._fread(fid,1,'char')
None, #h.notchfilter = self._fread(fid,1,'char')
None, #h.headgain = self._fread(fid,1,'short')
None, #h.additionalfiles = self._fread(fid,1,'int')
None, #h.unused = self._fread(fid,5,'char')
None, #h.fspstopmethod = self._fread(fid,1,'short')
None, #h.fspstopmode = self._fread(fid,1,'short')
None, #h.fspfvalue = self._fread(fid,1,'float')
None, #h.fsppoint = self._fread(fid,1,'short')
None, #h.fspblocksize = self._fread(fid,1,'short')
None, #h.fspp1 = self._fread(fid,1,'ushort')
None, #h.fspp2 = self._fread(fid,1,'ushort')
None, #h.fspalpha = self._fread(fid,1,'float')
None, #h.fspnoise = self._fread(fid,1,'float')
None, #h.fspv1 = self._fread(fid,1,'short')
None, #h.montage = self._fread(fid,40,'char')
None, #h.eventfile = self._fread(fid,40,'char')
None, #h.fratio = self._fread(fid,1,'float')
None, #h.minor_rev = self._fread(fid,1,'char')
None, #h.eegupdate = self._fread(fid,1,'short')
None, #h.compressed = self._fread(fid,1,'char')
None, #h.xscale = self._fread(fid,1,'float')
None, #h.yscale = self._fread(fid,1,'float')
None, #h.xsize = self._fread(fid,1,'float')
None, #h.ysize = self._fread(fid,1,'float')
None, #h.acmode = self._fread(fid,1,'char')
None, #h.commonchnl = self._fread(fid,1,'uchar')
None, #h.xtics = self._fread(fid,1,'char')
None, #h.xrange = self._fread(fid,1,'char')
None, #h.ytics = self._fread(fid,1,'char')
None, #h.yrange = self._fread(fid,1,'char')
None, #h.xscalevalue = self._fread(fid,1,'float')
None, #h.xscaleinterval = self._fread(fid,1,'float')
None, #h.yscalevalue = self._fread(fid,1,'float')
None, #h.yscaleinterval = self._fread(fid,1,'float')
None, #h.scaletoolx1 = self._fread(fid,1,'float')
None, #h.scaletooly1 = self._fread(fid,1,'float')
None, #h.scaletoolx2 = self._fread(fid,1,'float')
None, #h.scaletooly2 = self._fread(fid,1,'float')
None, #h.port = self._fread(fid,1,'short')
b' \x08\x00\x00', #h.numsamples = self._fread(fid,1,'ulong')
None, #h.filterflag = self._fread(fid,1,'char')
None, #h.lowcutoff = self._fread(fid,1,'float')
None, #h.lowpoles = self._fread(fid,1,'short')
None, #h.highcutoff = self._fread(fid,1,'float')
None, #h.highpoles = self._fread(fid,1,'short')
None, #h.filtertype = self._fread(fid,1,'char')
None, #h.filterdomain = self._fread(fid,1,'char')
None, #h.snrflag = self._fread(fid,1,'char')
None, #h.coherenceflag = self._fread(fid,1,'char')
None, #h.continuoustype = self._fread(fid,1,'char')
None, #h.eventtablepos = self._fread(fid,1,'ulong')
None, #h.continuousseconds = self._fread(fid,1,'float')
None, #h.channeloffset = self._fread(fid,1,'long')
None, #h.autocorrectflag = self._fread(fid,1,'char')
None, #h.dcthreshold = self._fread(fid,1,'uchar')
]
|
|
import sys
import Queue
import time
import threading
import collections
import json
import requests
import websocket
# =========================== logging =========================================
import logging
logger = logging.getLogger(__name__)
# =========================== classes =========================================
def convertToString(data):
if isinstance(data, basestring):
return str(data)
elif isinstance(data, collections.Mapping):
return dict(map(convertToString, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(convertToString, data))
else:
return data
class DuplexClientPeriodicTimer(threading.Thread):
def __init__(self, period, func):
# store params
self.period = period
self.func = func
# local variables
self.dataLock = threading.RLock()
self.goOn = True
self.currentDelay = 0
# start the thread
threading.Thread.__init__(self)
self.name = 'DuplexClientPeriodicTimer'
self.daemon = True
self.start()
def run(self):
try:
self.currentDelay = 1
while self.goOn:
funcToCall = None
with self.dataLock:
self.currentDelay -= 1
if self.currentDelay == 0:
funcToCall = self.func
self.currentDelay = self.period
if funcToCall:
try:
funcToCall()
except Exception as err:
logger.critical("DuplexClientPeriodicTimer could not call {0}: {1}".format(funcToCall,err))
time.sleep(1)
except Exception as err:
logger.critical("DuplexClientPeriodicTimer crashed: {0}".format(err))
def fireNow(self):
with self.dataLock:
self.currentDelay = 1
def close(self):
self.goOn = False
class DuplexClient(object):
def __init__(self, kwargs):
self.from_server_cb = kwargs['from_server_cb']
self.id = kwargs['id']
self.token = kwargs['token']
@classmethod
def from_url(cls, **kwargs):
server_url = kwargs['server_url']
if server_url.startswith('http'):
return DuplexClientHttp(kwargs)
elif server_url.startswith('ws'):
return DuplexClientWs(kwargs)
else:
raise SystemError()
def to_server(self, o):
raise NotImplementedError()
def getStatus(self):
raise NotImplementedError()
def disconnect(self):
raise NotImplementedError()
class DuplexClientHttp(DuplexClient):
MAXQUEUEZISE = 100
def __init__(self, kwargs):
"""
The following kwargs MUST be present:
- the required kwargs for the DuplexClient class
- 'server_url' URL of the server, e.g. "http://127.0.0.1:8080/api/v1/o.json"
- 'polling_period' the period (in seconds) with which the DuplexClientHttp instance polls the server
- 'buffer_tx' a boolean. It False, objects passed through the to_server() method are buffered until the next polling_period.
"""
# store params
self.server_url = kwargs['server_url']
self.polling_period = kwargs['polling_period']
self.buffer_tx = kwargs['buffer_tx']
# local variables
self.dataLock = threading.RLock()
self.toserverqueue = Queue.Queue(maxsize=self.MAXQUEUEZISE)
self.is_connected = False
self.lastheard_ts = None
self._set_is_connected(False)
# initialize parent
DuplexClient.__init__(self, kwargs)
# start periodic timer, and fire now
self.periodicTimer = DuplexClientPeriodicTimer(
period = self.polling_period,
func = self._poll_server,
)
self.periodicTimer.fireNow()
# ======================= public ==========================================
def to_server(self, o):
"""
'o' contains a single object ['b','yttywetywe']
"""
assert type(o) == str
# add to queue
self.toserverqueue.put(o, block=False)
# send now, if appropriate
if self.buffer_tx==False:
self.periodicTimer.fireNow()
def getStatus(self):
returnVal = {}
with self.dataLock:
returnVal['connectionmode'] = 'http_polling'
returnVal['is_connected'] = self.is_connected
returnVal['lastheard_ts'] = self.lastheard_ts
if self.lastheard_ts:
returnVal['lastheard_since'] = time.time()-self.lastheard_ts
returnVal['toserverqueue_fill'] = self.toserverqueue.qsize()
return returnVal
def disconnect(self):
self.polling_timer.cancel()
# ======================= private =========================================
def _poll_server(self, o=None):
"""
Send one HTTP POST request to server to
(1) send the elements from toserverqueue to the server
(2) receive the objects from the server and rearm.
"""
# send objects
try:
# create HTTP body
body = {
'id': self.id,
'token': self.token,
'ttl': self.polling_period+3,
}
# objects
o = []
while True:
try:
e = self.toserverqueue.get(block=False)
except Queue.Empty:
break
else:
o += [e]
if o:
body['o'] = o
# send to server
r = requests.post(
self.server_url,
json = body,
).json()
r = convertToString(r)
except requests.exceptions.ConnectionError as err:
self._set_is_connected(False)
logger.error(err)
except Exception as err:
self._set_is_connected(False)
logger.error(err)
else:
self._set_is_connected(True)
if r['o']:
self.from_server_cb(r['o'])
def _set_is_connected(self,newstate):
with self.dataLock:
self.is_connected = newstate
if self.is_connected:
self.lastheard_ts = time.time()
class DuplexClientWs(DuplexClient):
def __init__(self, kwargs):
# store params
self.server_url = kwargs['server_url']
# initialize parent
DuplexClient.__init__(self, kwargs)
# start Websocket
self.ws = websocket.WebSocketApp(self.server_url,
on_message=self.on_message,
on_error=self.on_error,
on_close=self.on_close,
)
self.ws.on_open = self.on_open
wst = threading.Thread(target=self.ws.run_forever)
wst.daemon = True
wst.start()
def on_message(self, ws, message):
if message['o']:
self.from_server_cb(message['o'])
def on_error(self, ws, error):
self.from_server_cb(error)
def on_close(self, ws):
logger.debug("Websocket closed for id: {0}.".format(self.id))
def on_open(self, ws):
logger.debug("Websocket open for id: {0}.".format(self.id))
def to_server(self, o):
body = {
'id': self.id,
'token': self.token,
'o': [o]
}
self.ws.send(json.dumps(body))
def getStatus(self):
raise NotImplementedError()
def disconnect(self):
self.ws.close()
# =========================== main ============================================
class CliDuplexClient(object):
def __init__(self):
# create server
self.duplexClient = DuplexClient.from_url(
server_url = 'http://api-dev.solsystem.io/api/v1/o.json',
id = 'testmanager',
token = '{"org":"lmayz4","token":"255L*XFQX?8ETDyBVs"}',
polling_period = 5,
buffer_tx = True,
from_server_cb = self.from_server_cb,
)
'''
self.duplexClient = DuplexClient.from_url(
server_url = 'ws://127.0.0.1:8080/api/v1/ws',
id = id,
token = 'mytoken',
from_server_cb = self.from_server_cb,
)
'''
# cli
self.cli = DustCli.DustCli(
'CliDuplexClient',
self._clihandle_quit,
)
self.cli.registerCommand(
name = 'tx',
alias = 'tx',
description = 'transmit to server',
params = ['msg'],
callback = self._clihandle_tx,
)
self.cli.registerCommand(
name = 'status',
alias = 'u',
description = 'check status',
params = [],
callback = self._clihandle_status,
)
def _clihandle_tx(self, params):
msg = params[0]
self.duplexClient.to_server([{'msg': msg}])
def _clihandle_status(self, params):
pp.pprint(self.duplexClient.getStatus())
def from_server_cb(self, o):
logger.debug('from server: {0}'.format(o))
def _clihandle_quit(self):
time.sleep(.3)
print "bye bye."
sys.exit(0)
def main():
client = CliDuplexClient()
if __name__ == "__main__":
import random
import string
import DustCli
import pprint
pp = pprint.PrettyPrinter(indent=4)
main()
|
|
# pylint:disable=missing-class-docstring,unused-argument
from collections import defaultdict
from typing import Optional, Any, Dict, TYPE_CHECKING
import ailment
from ailment import Expression, Block
from ailment.statement import Statement
from ..ailblock_walker import AILBlockWalker
from ..sequence_walker import SequenceWalker
from ..structurer_nodes import ConditionNode, ConditionalBreakNode, LoopNode
if TYPE_CHECKING:
from angr.sim_variable import SimVariable
from angr.knowledge_plugins.variables.variable_manager import VariableManagerInternal
class LocationBase:
__slots__ = ()
class StatementLocation(LocationBase):
__slots__ = ('block_addr', 'block_idx', 'stmt_idx', )
def __init__(self, block_addr, block_idx, stmt_idx):
self.block_addr = block_addr
self.block_idx = block_idx
self.stmt_idx = stmt_idx
def __repr__(self):
return f"Loc: Statement@{self.block_addr:x}.{self.block_idx}-{self.stmt_idx}"
class ExpressionLocation(LocationBase):
__slots__ = ('block_addr', 'block_idx', 'stmt_idx', 'expr_idx', )
def __init__(self, block_addr, block_idx, stmt_idx, expr_idx):
self.block_addr = block_addr
self.block_idx = block_idx
self.stmt_idx = stmt_idx
self.expr_idx = expr_idx
def __repr__(self):
return f"Loc: Expression@{self.block_addr:x}.{self.block_idx}-{self.stmt_idx}[{self.expr_idx}]"
class ConditionLocation(LocationBase):
__slots__ = ('node_addr', )
def __init__(self, cond_node_addr):
self.node_addr = cond_node_addr
def __repr__(self):
return f"Loc: ConditionNode@{self.node_addr:x}"
class ConditionalBreakLocation(LocationBase):
__slots__ = ('node_addr', )
def __init__(self, node_addr):
self.node_addr = node_addr
def __repr__(self):
return f"Loc: ConditionalBreakNode@{self.node_addr:x}"
class ExpressionUseFinder(AILBlockWalker):
def __init__(self):
super().__init__()
self.uses = defaultdict(set)
def _handle_expr(self, expr_idx: int, expr: Expression, stmt_idx: int, stmt: Optional[Statement],
block: Optional[Block]) -> Any:
if isinstance(expr, ailment.Register) and expr.variable is not None:
if not (isinstance(stmt, ailment.Stmt.Assignment) and stmt.dst is expr):
if block is not None:
self.uses[expr.variable].add((expr, ExpressionLocation(block.addr, block.idx, stmt_idx, expr_idx)))
else:
self.uses[expr.variable].add((expr, None))
return None
return super()._handle_expr(expr_idx, expr, stmt_idx, stmt, block)
class ExpressionCounter(SequenceWalker):
"""
Find all expressions that are assigned once and only used once.
"""
def __init__(self, node, variable_manager):
handlers = {
ConditionalBreakNode: self._handle_ConditionalBreak,
ConditionNode: self._handle_Condition,
LoopNode: self._handle_Loop,
ailment.Block: self._handle_Block,
}
self.assignments = defaultdict(set)
self.uses = { }
self._variable_manger: 'VariableManagerInternal' = variable_manager
super().__init__(handlers)
self.walk(node)
def _u(self, v) -> Optional['SimVariable']:
"""
Get unified variable for a given variable.
"""
return self._variable_manger.unified_variable(v)
def _handle_Block(self, node: ailment.Block, **kwargs):
# find assignments
for idx, stmt in enumerate(node.statements):
if isinstance(stmt, ailment.Stmt.Assignment):
if isinstance(stmt.dst, ailment.Expr.Register) and stmt.dst.variable is not None:
u = self._u(stmt.dst.variable)
if u is not None:
# dependency
dependency_finder = ExpressionUseFinder()
dependency_finder.walk_expression(stmt.src)
dependencies = set(self._u(v) for v in dependency_finder.uses)
self.assignments[u].add((stmt.src,
tuple(dependencies),
StatementLocation(node.addr, node.idx, idx)))
if (isinstance(stmt, ailment.Stmt.Call)
and isinstance(stmt.ret_expr, ailment.Expr.Register)
and stmt.ret_expr.variable is not None):
u = self._u(stmt.ret_expr.variable)
if u is not None:
dependency_finder = ExpressionUseFinder()
dependency_finder.walk_expression(stmt)
dependencies = set(self._u(v) for v in dependency_finder.uses)
self.assignments[u].add((stmt,
tuple(dependencies),
StatementLocation(node.addr, node.idx, idx)))
# walk the block and find uses of variables
use_finder = ExpressionUseFinder()
use_finder.walk(node)
for v, content in use_finder.uses.items():
u = self._u(v)
if u is not None:
if u not in self.uses:
self.uses[u] = set()
self.uses[u] |= content
def _collect_uses(self, expr: Expression, loc: LocationBase):
use_finder = ExpressionUseFinder()
use_finder.walk_expression(expr, stmt_idx=-1)
for var, uses in use_finder.uses.items():
u = self._u(var)
if u is None:
continue
for use in uses:
if u not in self.uses:
self.uses[u] = set()
self.uses[u].add((use[0], loc))
def _handle_ConditionalBreak(self, node: ConditionalBreakNode, **kwargs):
# collect uses on the condition expression
self._collect_uses(node.condition, ConditionalBreakLocation(node.addr))
return super()._handle_ConditionalBreak(node, **kwargs)
def _handle_Condition(self, node: ConditionNode, **kwargs):
# collect uses on the condition expression
self._collect_uses(node.condition, ConditionLocation(node.addr))
return super()._handle_Condition(node, **kwargs)
def _handle_Loop(self, node: LoopNode, **kwargs):
# collect uses on the condition expression
if node.initializer is not None:
self._collect_uses(node.initializer, ConditionLocation(node.addr))
if node.iterator is not None:
self._collect_uses(node.iterator, ConditionLocation(node.addr))
if node.condition is not None:
self._collect_uses(node.condition, ConditionLocation(node.addr))
return super()._handle_Loop(node, **kwargs)
class ExpressionReplacer(AILBlockWalker):
def __init__(self, assignments: Dict, uses: Dict):
super().__init__()
self._assignments = assignments
self._uses = uses
def _handle_expr(self, expr_idx: int, expr: Expression, stmt_idx: int, stmt: Optional[Statement],
block: Optional[Block]) -> Any:
if isinstance(expr, ailment.Register) and expr.variable is not None and expr.variable in self._uses:
replace_with, _ = self._assignments[expr.variable]
return replace_with
return super()._handle_expr(expr_idx, expr, stmt_idx, stmt, block)
class ExpressionFolder(SequenceWalker):
def __init__(self, assignments: Dict, uses: Dict, node):
handlers = {
ailment.Block: self._handle_Block,
ConditionNode: self._handle_Condition,
ConditionalBreakNode: self._handle_ConditionalBreak,
}
super().__init__(handlers)
self._assignments = assignments
self._uses = uses
self.walk(node)
def _handle_Block(self, node: ailment.Block, **kwargs):
# Walk the block to remove each assignment and replace uses of each variable
new_stmts = [ ]
for stmt in node.statements:
if isinstance(stmt, ailment.Stmt.Assignment):
if isinstance(stmt.dst, ailment.Expr.Register) and stmt.dst.variable is not None:
if stmt.dst.variable in self._assignments:
# remove this statement
continue
if (isinstance(stmt, ailment.Stmt.Call)
and isinstance(stmt.ret_expr, ailment.Expr.Register)
and stmt.ret_expr.variable is not None
and stmt.ret_expr.variable in self._assignments):
# remove this statement
continue
new_stmts.append(stmt)
node.statements = new_stmts
# Walk the block to replace the use of each variable
replacer = ExpressionReplacer(self._assignments, self._uses)
replacer.walk(node)
def _handle_ConditionalBreak(self, node: ConditionalBreakNode, **kwargs):
replacer = ExpressionReplacer(self._assignments, self._uses)
r = replacer.walk_expression(node.condition)
if r is not None and r is not node.condition:
node.condition = r
return super()._handle_ConditionalBreak(node, **kwargs)
def _handle_Condition(self, node: ConditionNode, **kwargs):
replacer = ExpressionReplacer(self._assignments, self._uses)
r = replacer.walk_expression(node.condition)
if r is not None and r is not node.condition:
node.condition = r
return super()._handle_Condition(node, **kwargs)
def _handle_Loop(self, node: LoopNode, **kwargs):
replacer = ExpressionReplacer(self._assignments, self._uses)
# iterator
if node.iterator is not None:
r = replacer.walk_expression(node.iterator)
if r is not None and r is not node.iterator:
node.iterator = r
# initializer
if node.initializer is not None:
r = replacer.walk_expression(node.initializer)
if r is not None and r is not node.initializer:
node.initializer = r
# condition
if node.condition is not None:
r = replacer.walk_expression(node.condition)
if r is not None and r is not node.condition:
node.condition = r
return super()._handle_Loop(node, **kwargs)
|
|
# pylint: disable=too-many-lines,redefined-outer-name,redefined-builtin
from asyncio import Future
from typing import (
Any,
Callable,
Iterable,
Mapping,
Optional,
Tuple,
TypeVar,
Union,
overload,
)
from . import abc, typing
from ._version import __version__
from .internal.utils import alias
from .notification import Notification
from .observable import ConnectableObservable, GroupedObservable, Observable
from .observer import Observer
from .pipe import compose, pipe
from .subject import Subject
_T = TypeVar("_T")
_T1 = TypeVar("_T1")
_T2 = TypeVar("_T2")
_TKey = TypeVar("_TKey")
_TState = TypeVar("_TState")
_A = TypeVar("_A")
_B = TypeVar("_B")
_C = TypeVar("_C")
_D = TypeVar("_D")
_E = TypeVar("_E")
_F = TypeVar("_F")
_G = TypeVar("_G")
def amb(*sources: Observable[_T]) -> Observable[_T]:
"""Propagates the observable sequence that emits first.
.. marble::
:alt: amb
---8--6--9-----------|
--1--2--3---5--------|
----------10-20-30---|
[ amb() ]
--1--2--3---5--------|
Example:
>>> winner = reactivex.amb(xs, ys, zs)
Args:
sources: Sequence of observables to monitor for first emission.
Returns:
An observable sequence that surfaces any of the given sequences,
whichever emitted the first element.
"""
from .observable.amb import amb_ as amb_
return amb_(*sources)
def case(
mapper: Callable[[], _TKey],
sources: Mapping[_TKey, Observable[_T]],
default_source: Optional[Union[Observable[_T], "Future[_T]"]] = None,
) -> Observable[_T]:
"""Uses mapper to determine which source in sources to use.
.. marble::
:alt: case
--1---------------|
a--1--2--3--4--|
b--10-20-30---|
[case(mapper, { 1: a, 2: b })]
---1--2--3--4--|
Examples:
>>> res = reactivex.case(mapper, { '1': obs1, '2': obs2 })
>>> res = reactivex.case(mapper, { '1': obs1, '2': obs2 }, obs0)
Args:
mapper: The function which extracts the value for to test in a
case statement.
sources: An object which has keys which correspond to the case
statement labels.
default_source: [Optional] The observable sequence or Future that will
be run if the sources are not matched. If this is not provided,
it defaults to :func:`empty`.
Returns:
An observable sequence which is determined by a case statement.
"""
from .observable.case import case_
return case_(mapper, sources, default_source)
def catch(*sources: Observable[_T]) -> Observable[_T]:
"""Continues observable sequences which are terminated with an
exception by switching over to the next observable sequence.
.. marble::
:alt: catch
---1---2---3-*
a-7-8-|
[ catch(a) ]
---1---2---3---7-8-|
Examples:
>>> res = reactivex.catch(xs, ys, zs)
Args:
sources: Sequence of observables.
Returns:
An observable sequence containing elements from consecutive observables
from the sequence of sources until one of them terminates successfully.
"""
from .observable.catch import catch_with_iterable_
return catch_with_iterable_(sources)
def catch_with_iterable(sources: Iterable[Observable[_T]]) -> Observable[_T]:
"""Continues observable sequences that are terminated with an
exception by switching over to the next observable sequence.
.. marble::
:alt: catch
---1---2---3-*
a-7-8-|
[ catch(a) ]
---1---2---3---7-8-|
Examples:
>>> res = reactivex.catch([xs, ys, zs])
>>> res = reactivex.catch(src for src in [xs, ys, zs])
Args:
sources: An Iterable of observables; thus, a generator can also
be used here.
Returns:
An observable sequence containing elements from consecutive observables
from the sequence of sources until one of them terminates successfully.
"""
from .observable.catch import catch_with_iterable_
return catch_with_iterable_(sources)
def create(subscribe: typing.Subscription[_T]) -> Observable[_T]:
"""Creates an observable sequence object from the specified
subscription function.
.. marble::
:alt: create
[ create(a) ]
---1---2---3---4---|
Args:
subscribe: Subscription function.
Returns:
An observable sequence that can be subscribed to via the given
subscription function.
"""
return Observable(subscribe)
@overload
def combine_latest(
__a: Observable[_A], __b: Observable[_B]
) -> Observable[Tuple[_A, _B]]:
...
@overload
def combine_latest(
__a: Observable[_A], __b: Observable[_B], __c: Observable[_C]
) -> Observable[Tuple[_A, _B, _C]]:
...
@overload
def combine_latest(
__a: Observable[_A], __b: Observable[_B], __c: Observable[_C], __d: Observable[_D]
) -> Observable[Tuple[_A, _B, _C, _D]]:
...
def combine_latest(*__sources: Observable[Any]) -> Observable[Any]:
"""Merges the specified observable sequences into one observable
sequence by creating a tuple whenever any of the observable
sequences emits an element.
.. marble::
:alt: combine_latest
---a-----b--c------|
--1---2--------3---|
[ combine_latest() ]
---a1-a2-b2-c2-c3--|
Examples:
>>> obs = rx.combine_latest(obs1, obs2, obs3)
Args:
sources: Sequence of observables.
Returns:
An observable sequence containing the result of combining elements from
each source in given sequence.
"""
from .observable.combinelatest import combine_latest_
return combine_latest_(*__sources)
def concat(*sources: Observable[_T]) -> Observable[_T]:
"""Concatenates all of the specified observable sequences.
.. marble::
:alt: concat
---1--2--3--|
--6--8--|
[ concat() ]
---1--2--3----6--8-|
Examples:
>>> res = reactivex.concat(xs, ys, zs)
Args:
sources: Sequence of observables.
Returns:
An observable sequence that contains the elements of each source in
the given sequence, in sequential order.
"""
from .observable.concat import concat_with_iterable_
return concat_with_iterable_(sources)
def concat_with_iterable(sources: Iterable[Observable[_T]]) -> Observable[_T]:
"""Concatenates all of the specified observable sequences.
.. marble::
:alt: concat
---1--2--3--|
--6--8--|
[ concat() ]
---1--2--3----6--8-|
Examples:
>>> res = reactivex.concat_with_iterable([xs, ys, zs])
>>> res = reactivex.concat_with_iterable(for src in [xs, ys, zs])
Args:
sources: An Iterable of observables; thus, a generator can also
be used here.
Returns:
An observable sequence that contains the elements of each given
sequence, in sequential order.
"""
from .observable.concat import concat_with_iterable_
return concat_with_iterable_(sources)
def defer(
factory: Callable[[abc.SchedulerBase], Union[Observable[_T], "Future[_T]"]]
) -> Observable[_T]:
"""Returns an observable sequence that invokes the specified
factory function whenever a new observer subscribes.
.. marble::
:alt: defer
[ defer(1,2,3) ]
---1--2--3--|
---1--2--3--|
Example:
>>> res = reactivex.defer(lambda scheduler: of(1, 2, 3))
Args:
factory: Observable factory function to invoke for each observer
which invokes :func:`subscribe()
<reactivex.Observable.subscribe>` on the resulting sequence.
The factory takes a single argument, the scheduler used.
Returns:
An observable sequence whose observers trigger an invocation
of the given factory function.
"""
from .observable.defer import defer_
return defer_(factory)
def empty(scheduler: Optional[abc.SchedulerBase] = None) -> Observable[Any]:
"""Returns an empty observable sequence.
.. marble::
:alt: empty
[ empty() ]
--|
Example:
>>> obs = reactivex.empty()
Args:
scheduler: [Optional] Scheduler instance to send the termination call
on. By default, this will use an instance of
:class:`ImmediateScheduler <reactivex.scheduler.ImmediateScheduler>`.
Returns:
An observable sequence with no elements.
"""
from .observable.empty import empty_
return empty_(scheduler)
def for_in(
values: Iterable[_T1], mapper: typing.Mapper[_T1, Observable[_T2]]
) -> Observable[_T2]:
"""Concatenates the observable sequences obtained by running the
specified result mapper for each element in the specified values.
.. marble::
:alt: for_in
a--1--2-|
b--10--20-|
[for_in((a, b), lambda i: i+1)]
---2--3--11--21-|
Note:
This is just a wrapper for
:func:`reactivex.concat(map(mapper, values)) <reactivex.concat>`
Args:
values: An Iterable of values to turn into an observable
source.
mapper: A function to apply to each item in the values list to turn
it into an observable sequence; this should return instances of
:class:`reactivex.Observable`.
Returns:
An observable sequence from the concatenated observable
sequences.
"""
mapped: Iterable[Observable[_T2]] = map(mapper, values)
return concat_with_iterable(mapped)
@overload
def fork_join(__a: Observable[_A], __b: Observable[_B]) -> Observable[Tuple[_A, _B]]:
...
@overload
def fork_join(
__a: Observable[_A], __b: Observable[_B], __c: Observable[_C]
) -> Observable[Tuple[_A, _B, _C]]:
...
@overload
def fork_join(
__a: Observable[_A], __b: Observable[_B], __c: Observable[_C], __d: Observable[_D]
) -> Observable[Tuple[_A, _B, _C, _D]]:
...
@overload
def fork_join(
__a: Observable[_A],
__b: Observable[_B],
__c: Observable[_C],
__d: Observable[_D],
__e: Observable[_E],
) -> Observable[Tuple[_A, _B, _C, _D, _E]]:
...
def fork_join(*sources: Observable[Any]) -> Observable[Any]:
"""Wait for observables to complete and then combine last values
they emitted into a tuple. Whenever any of that observables completes
without emitting any value, result sequence will complete at that moment as well.
.. marble::
:alt: fork_join
---a-----b--c---d-|
--1---2------3-4---|
-a---------b---|
[ fork_join() ]
--------------------d4b|
Examples:
>>> obs = reactivex.fork_join(obs1, obs2, obs3)
Args:
sources: Sequence of observables.
Returns:
An observable sequence containing the result of combining last element from
each source in given sequence.
"""
from .observable.forkjoin import fork_join_
return fork_join_(*sources)
def from_callable(
supplier: Callable[[], _T], scheduler: Optional[abc.SchedulerBase] = None
) -> Observable[_T]:
"""Returns an observable sequence that contains a single element generated
by the given supplier, using the specified scheduler to send out observer
messages.
.. marble::
:alt: from_callable
[ from_callable() ]
--1--|
Examples:
>>> res = reactivex.from_callable(lambda: calculate_value())
>>> res = reactivex.from_callable(lambda: 1 / 0) # emits an error
Args:
supplier: Function which is invoked to obtain the single element.
scheduler: [Optional] Scheduler instance to schedule the values on.
If not specified, the default is to use an instance of
:class:`CurrentThreadScheduler
<reactivex.scheduler.CurrentThreadScheduler>`.
Returns:
An observable sequence containing the single element obtained by
invoking the given supplier function.
"""
from .observable.returnvalue import from_callable_
return from_callable_(supplier, scheduler)
def from_callback(
func: Callable[..., Callable[..., None]],
mapper: Optional[typing.Mapper[Any, Any]] = None,
) -> Callable[[], Observable[Any]]:
"""Converts a callback function to an observable sequence.
Args:
func: Function with a callback as the last argument to
convert to an Observable sequence.
mapper: [Optional] A mapper which takes the arguments
from the callback to produce a single item to yield on
next.
Returns:
A function, when executed with the required arguments minus
the callback, produces an Observable sequence with a single
value of the arguments to the callback as a list.
"""
from .observable.fromcallback import from_callback_
return from_callback_(func, mapper)
def from_future(future: "Future[_T]") -> Observable[_T]:
"""Converts a Future to an Observable sequence
.. marble::
:alt: from_future
[ from_future() ]
------1|
Args:
future: A Python 3 compatible future.
https://docs.python.org/3/library/asyncio-task.html#future
Returns:
An observable sequence which wraps the existing future success
and failure.
"""
from .observable.fromfuture import from_future_
return from_future_(future)
def from_iterable(
iterable: Iterable[_T], scheduler: Optional[abc.SchedulerBase] = None
) -> Observable[_T]:
"""Converts an iterable to an observable sequence.
.. marble::
:alt: from_iterable
[ from_iterable(1,2,3) ]
---1--2--3--|
Example:
>>> reactivex.from_iterable([1,2,3])
Args:
iterable: An Iterable to change into an observable sequence.
scheduler: [Optional] Scheduler instance to schedule the values on.
If not specified, the default is to use an instance of
:class:`CurrentThreadScheduler
<reactivex.scheduler.CurrentThreadScheduler>`.
Returns:
The observable sequence whose elements are pulled from the
given iterable sequence.
"""
from .observable.fromiterable import from_iterable_ as from_iterable_
return from_iterable_(iterable, scheduler)
from_ = alias("from_", "Alias for :func:`reactivex.from_iterable`.", from_iterable)
from_list = alias(
"from_list", "Alias for :func:`reactivex.from_iterable`.", from_iterable
)
def from_marbles(
string: str,
timespan: typing.RelativeTime = 0.1,
scheduler: Optional[abc.SchedulerBase] = None,
lookup: Optional[Mapping[Union[str, float], Any]] = None,
error: Optional[Exception] = None,
) -> Observable[Any]:
"""Convert a marble diagram string to a cold observable sequence, using
an optional scheduler to enumerate the events.
.. marble::
:alt: from_marbles
[ from_marbles(-1-2-3-) ]
-1-2-3-|
Each character in the string will advance time by timespan
(except for space). Characters that are not special (see the table below)
will be interpreted as a value to be emitted. Numbers will be cast
to int or float.
Special characters:
+------------+--------------------------------------------------------+
| :code:`-` | advance time by timespan |
+------------+--------------------------------------------------------+
| :code:`#` | on_error() |
+------------+--------------------------------------------------------+
| :code:`|` | on_completed() |
+------------+--------------------------------------------------------+
| :code:`(` | open a group of marbles sharing the same timestamp |
+------------+--------------------------------------------------------+
| :code:`)` | close a group of marbles |
+------------+--------------------------------------------------------+
| :code:`,` | separate elements in a group |
+------------+--------------------------------------------------------+
| <space> | used to align multiple diagrams, does not advance time |
+------------+--------------------------------------------------------+
In a group of elements, the position of the initial :code:`(` determines the
timestamp at which grouped elements will be emitted.
E.g. :code:`--(12,3,4)--` will emit 12, 3, 4 at 2 * timespan and then
advance virtual time by 8 * timespan.
Examples:
>>> from_marbles('--1--(2,3)-4--|')
>>> from_marbles('a--b--c-', lookup={'a': 1, 'b': 2, 'c': 3})
>>> from_marbles('a--b---#', error=ValueError('foo'))
Args:
string: String with marble diagram
timespan: [Optional] Duration of each character in seconds.
If not specified, defaults to :code:`0.1`.
scheduler: [Optional] Scheduler to run the the input sequence
on. If not specified, defaults to the subscribe scheduler
if defined, else to an instance of
:class:`NewThreadScheduler <reactivex.scheduler.NewThreadScheduler`.
lookup: [Optional] A dict used to convert an element into a specified
value. If not specified, defaults to :code:`{}`.
error: [Optional] Exception that will be use in place of the :code:`#`
symbol. If not specified, defaults to :code:`Exception('error')`.
Returns:
The observable sequence whose elements are pulled from the
given marble diagram string.
"""
from .observable.marbles import from_marbles as _from_marbles
return _from_marbles(
string, timespan, lookup=lookup, error=error, scheduler=scheduler
)
cold = alias("cold", "Alias for :func:`reactivex.from_marbles`.", from_marbles)
def generate_with_relative_time(
initial_state: _TState,
condition: typing.Predicate[_TState],
iterate: typing.Mapper[_TState, _TState],
time_mapper: Callable[[_TState], typing.RelativeTime],
) -> Observable[_TState]:
"""Generates an observable sequence by iterating a state from an
initial state until the condition fails.
.. marble::
:alt: generate_with_relative_time
[generate_with_relative_time()]
-1-2-3-4-|
Example:
>>> res = reactivex.generate_with_relative_time(
0, lambda x: True, lambda x: x + 1, lambda x: 0.5
)
Args:
initial_state: Initial state.
condition: Condition to terminate generation (upon returning
:code:`False`).
iterate: Iteration step function.
time_mapper: Time mapper function to control the speed of
values being produced each iteration, returning relative times, i.e.
either a :class:`float` denoting seconds, or an instance of
:class:`timedelta`.
Returns:
The generated sequence.
"""
from .observable.generatewithrelativetime import generate_with_relative_time_
return generate_with_relative_time_(initial_state, condition, iterate, time_mapper)
def generate(
initial_state: _TState,
condition: typing.Predicate[_TState],
iterate: typing.Mapper[_TState, _TState],
) -> Observable[_TState]:
"""Generates an observable sequence by running a state-driven loop
producing the sequence's elements.
.. marble::
:alt: generate
[ generate() ]
-1-2-3-4-|
Example:
>>> res = reactivex.generate(0, lambda x: x < 10, lambda x: x + 1)
Args:
initial_state: Initial state.
condition: Condition to terminate generation (upon returning
:code:`False`).
iterate: Iteration step function.
Returns:
The generated sequence.
"""
from .observable.generate import generate_
return generate_(initial_state, condition, iterate)
def hot(
string: str,
timespan: typing.RelativeTime = 0.1,
duetime: typing.AbsoluteOrRelativeTime = 0.0,
scheduler: Optional[abc.SchedulerBase] = None,
lookup: Optional[Mapping[Union[str, float], Any]] = None,
error: Optional[Exception] = None,
) -> Observable[Any]:
"""Convert a marble diagram string to a hot observable sequence, using
an optional scheduler to enumerate the events.
.. marble::
:alt: hot
[ from_marbles(-1-2-3-) ]
-1-2-3-|
-2-3-|
Each character in the string will advance time by timespan
(except for space). Characters that are not special (see the table below)
will be interpreted as a value to be emitted. Numbers will be cast
to int or float.
Special characters:
+------------+--------------------------------------------------------+
| :code:`-` | advance time by timespan |
+------------+--------------------------------------------------------+
| :code:`#` | on_error() |
+------------+--------------------------------------------------------+
| :code:`|` | on_completed() |
+------------+--------------------------------------------------------+
| :code:`(` | open a group of elements sharing the same timestamp |
+------------+--------------------------------------------------------+
| :code:`)` | close a group of elements |
+------------+--------------------------------------------------------+
| :code:`,` | separate elements in a group |
+------------+--------------------------------------------------------+
| <space> | used to align multiple diagrams, does not advance time |
+------------+--------------------------------------------------------+
In a group of elements, the position of the initial :code:`(` determines the
timestamp at which grouped elements will be emitted.
E.g. :code:`--(12,3,4)--` will emit 12, 3, 4 at 2 * timespan and then
advance virtual time by 8 * timespan.
Examples:
>>> hot("--1--(2,3)-4--|")
>>> hot("a--b--c-", lookup={'a': 1, 'b': 2, 'c': 3})
>>> hot("a--b---#", error=ValueError("foo"))
Args:
string: String with marble diagram
timespan: [Optional] Duration of each character in seconds.
If not specified, defaults to :code:`0.1`.
duetime: [Optional] Absolute datetime or timedelta from now that
determines when to start the emission of elements.
scheduler: [Optional] Scheduler to run the the input sequence
on. If not specified, defaults to an instance of
:class:`NewThreadScheduler <reactivex.scheduler.NewThreadScheduler>`.
lookup: [Optional] A dict used to convert an element into a specified
value. If not specified, defaults to :code:`{}`.
error: [Optional] Exception that will be use in place of the :code:`#`
symbol. If not specified, defaults to :code:`Exception('error')`.
Returns:
The observable sequence whose elements are pulled from the
given marble diagram string.
"""
from .observable.marbles import hot as _hot
return _hot(
string, timespan, duetime, lookup=lookup, error=error, scheduler=scheduler
)
def if_then(
condition: Callable[[], bool],
then_source: Union[Observable[_T], "Future[_T]"],
else_source: Union[None, Observable[_T], "Future[_T]"] = None,
) -> Observable[_T]:
"""Determines whether an observable collection contains values.
.. marble::
:alt: if_then
---1--2--3--|
--6--8--|
[ if_then() ]
---1--2--3--|
Examples:
>>> res = reactivex.if_then(condition, obs1)
>>> res = reactivex.if_then(condition, obs1, obs2)
Args:
condition: The condition which determines if the then_source or
else_source will be run.
then_source: The observable sequence or :class:`Future` that
will be run if the condition function returns :code:`True`.
else_source: [Optional] The observable sequence or :class:`Future`
that will be run if the condition function returns :code:`False`.
If this is not provided, it defaults to :func:`empty() <reactivex.empty>`.
Returns:
An observable sequence which is either the then_source or
else_source.
"""
from .observable.ifthen import if_then_
return if_then_(condition, then_source, else_source)
def interval(
period: typing.RelativeTime, scheduler: Optional[abc.SchedulerBase] = None
) -> Observable[int]:
"""Returns an observable sequence that produces a value after each period.
.. marble::
:alt: interval
[ interval() ]
---1---2---3---4--->
Example:
>>> res = reactivex.interval(1.0)
Args:
period: Period for producing the values in the resulting sequence
(specified as a :class:`float` denoting seconds or an instance of
:class:`timedelta`).
scheduler: Scheduler to run the interval on. If not specified, an
instance of :class:`TimeoutScheduler <reactivex.scheduler.TimeoutScheduler>`
is used.
Returns:
An observable sequence that produces a value after each period.
"""
from .observable.interval import interval_
return interval_(period, scheduler)
def merge(*sources: Observable[Any]) -> Observable[Any]:
"""Merges all the observable sequences into a single observable sequence.
.. marble::
:alt: merge
---1---2---3---4-|
-a---b---c---d--|
[ merge() ]
-a-1-b-2-c-3-d-4-|
Example:
>>> res = reactivex.merge(obs1, obs2, obs3)
Args:
sources: Sequence of observables.
Returns:
The observable sequence that merges the elements of the
observable sequences.
"""
from .observable.merge import merge_
return merge_(*sources)
def never() -> Observable[Any]:
"""Returns a non-terminating observable sequence, which can be used
to denote an infinite duration (e.g. when using reactive joins).
.. marble::
:alt: never
[ never() ]
-->
Returns:
An observable sequence whose observers will never get called.
"""
from .observable.never import never_
return never_()
def of(*args: _T) -> Observable[_T]:
"""This method creates a new observable sequence whose elements are taken
from the arguments.
.. marble::
:alt: of
[ of(1,2,3) ]
---1--2--3--|
Note:
This is just a wrapper for
:func:`reactivex.from_iterable(args) <reactivex.from_iterable>`
Example:
>>> res = reactivex.of(1,2,3)
Args:
args: The variable number elements to emit from the observable.
Returns:
The observable sequence whose elements are pulled from the
given arguments
"""
return from_iterable(args)
def on_error_resume_next(
*sources: Union[
Observable[_T], "Future[_T]", Callable[[Optional[Exception]], Observable[_T]]
]
) -> Observable[_T]:
"""Continues an observable sequence that is terminated normally or
by an exception with the next observable sequence.
.. marble::
:alt: on_error_resume_next
--1--2--*
a--3--4--*
b--6-|
[on_error_resume_next(a,b)]
--1--2----3--4----6-|
Examples:
>>> res = reactivex.on_error_resume_next(xs, ys, zs)
Args:
sources: Sequence of sources, each of which is expected to be an
instance of either :class:`Observable` or :class:`Future`.
Returns:
An observable sequence that concatenates the source sequences,
even if a sequence terminates with an exception.
"""
from .observable.onerrorresumenext import on_error_resume_next_
return on_error_resume_next_(*sources)
def range(
start: int,
stop: Optional[int] = None,
step: Optional[int] = None,
scheduler: Optional[abc.SchedulerBase] = None,
) -> Observable[int]:
"""Generates an observable sequence of integral numbers within a
specified range, using the specified scheduler to send out observer
messages.
.. marble::
:alt: range
[ range(4) ]
--0--1--2--3--|
Examples:
>>> res = reactivex.range(10)
>>> res = reactivex.range(0, 10)
>>> res = reactivex.range(0, 10, 1)
Args:
start: The value of the first integer in the sequence.
stop: [Optional] Generate number up to (exclusive) the stop
value. Default is `sys.maxsize`.
step: [Optional] The step to be used (default is 1).
scheduler: [Optional] The scheduler to schedule the values on.
If not specified, the default is to use an instance of
:class:`CurrentThreadScheduler
<reactivex.scheduler.CurrentThreadScheduler>`.
Returns:
An observable sequence that contains a range of sequential
integral numbers.
"""
from .observable.range import range_
return range_(start, stop, step, scheduler)
def return_value(
value: _T, scheduler: Optional[abc.SchedulerBase] = None
) -> Observable[_T]:
"""Returns an observable sequence that contains a single element,
using the specified scheduler to send out observer messages.
There is an alias called 'just'.
.. marble::
:alt: return_value
[ return_value(4) ]
-4-|
Examples:
>>> res = reactivex.return_value(42)
>>> res = reactivex.return_value(42, timeout_scheduler)
Args:
value: Single element in the resulting observable sequence.
Returns:
An observable sequence containing the single specified element.
"""
from .observable.returnvalue import return_value_
return return_value_(value, scheduler)
just = alias("just", "Alias for :func:`reactivex.return_value`.", return_value)
def repeat_value(value: _T, repeat_count: Optional[int] = None) -> Observable[_T]:
"""Generates an observable sequence that repeats the given element
the specified number of times.
.. marble::
:alt: repeat_value
[ repeat_value(4) ]
-4-4-4-4->
Examples:
>>> res = reactivex.repeat_value(42)
>>> res = reactivex.repeat_value(42, 4)
Args:
value: Element to repeat.
repeat_count: [Optional] Number of times to repeat the element.
If not specified, repeats indefinitely.
Returns:
An observable sequence that repeats the given element the
specified number of times.
"""
from .observable.repeat import repeat_value_
return repeat_value_(value, repeat_count)
def start(
func: Callable[[], _T], scheduler: Optional[abc.SchedulerBase] = None
) -> Observable[_T]:
"""Invokes the specified function asynchronously on the specified
scheduler, surfacing the result through an observable sequence.
.. marble::
:alt: start
[ start(lambda i: return 4) ]
-4-|
-4-|
Note:
The function is called immediately, not during the subscription
of the resulting sequence. Multiple subscriptions to the
resulting sequence can observe the function's result.
Example:
>>> res = reactivex.start(lambda: pprint('hello'))
>>> res = reactivex.start(lambda: pprint('hello'), rx.Scheduler.timeout)
Args:
func: Function to run asynchronously.
scheduler: [Optional] Scheduler to run the function on. If
not specified, defaults to an instance of
:class:`TimeoutScheduler <reactivex.scheduler.TimeoutScheduler>`.
Returns:
An observable sequence exposing the function's result value,
or an exception.
"""
from .observable.start import start_
return start_(func, scheduler)
def start_async(function_async: Callable[[], "Future[_T]"]) -> Observable[_T]:
"""Invokes the asynchronous function, surfacing the result through
an observable sequence.
.. marble::
:alt: start_async
[ start_async() ]
------1|
Args:
function_async: Asynchronous function which returns a :class:`Future`
to run.
Returns:
An observable sequence exposing the function's result value,
or an exception.
"""
from .observable.startasync import start_async_
return start_async_(function_async)
def throw(
exception: Union[str, Exception], scheduler: Optional[abc.SchedulerBase] = None
) -> Observable[Any]:
"""Returns an observable sequence that terminates with an exception,
using the specified scheduler to send out the single OnError message.
.. marble::
:alt: throw
[ throw() ]
-*
Example:
>>> res = reactivex.throw(Exception('Error'))
Args:
exception: An object used for the sequence's termination.
scheduler: [Optional] Scheduler to schedule the error notification on.
If not specified, the default is to use an instance of
:class:`ImmediateScheduler <reactivex.scheduler.ImmediateScheduler>`.
Returns:
The observable sequence that terminates exceptionally with the
specified exception object.
"""
from .observable.throw import throw_
return throw_(exception, scheduler)
def timer(
duetime: typing.AbsoluteOrRelativeTime,
period: Optional[typing.RelativeTime] = None,
scheduler: Optional[abc.SchedulerBase] = None,
) -> Observable[int]:
"""Returns an observable sequence that produces a value after
duetime has elapsed and then after each period.
.. marble::
:alt: timer
[ timer(2) ]
--0-|
Examples:
>>> res = reactivex.timer(datetime(...))
>>> res = reactivex.timer(datetime(...), 0.1)
>>> res = reactivex.timer(5.0)
>>> res = reactivex.timer(5.0, 1.0)
Args:
duetime: Absolute (specified as a datetime object) or relative time
(specified as a float denoting seconds or an instance of timedelta)
at which to produce the first value.
period: [Optional] Period to produce subsequent values (specified as a
float denoting seconds or an instance of timedelta).
If not specified, the resulting timer is not recurring.
scheduler: [Optional] Scheduler to run the timer on. If not specified,
the default is to use an instance of
:class:`TimeoutScheduler <reactivex.scheduler.TimeoutScheduler>`.
Returns:
An observable sequence that produces a value after due time has
elapsed and then each period.
"""
from .observable.timer import timer_
return timer_(duetime, period, scheduler)
def to_async(
func: Callable[..., _T], scheduler: Optional[abc.SchedulerBase] = None
) -> Callable[..., Observable[_T]]:
"""Converts the function into an asynchronous function. Each
invocation of the resulting asynchronous function causes an
invocation of the original synchronous function on the specified
scheduler.
.. marble::
:alt: to_async
[ to_async()() ]
------1|
Examples:
>>> res = reactivex.to_async(lambda x, y: x + y)(4, 3)
>>> res = reactivex.to_async(lambda x, y: x + y, Scheduler.timeout)(4, 3)
>>> res = reactivex.to_async(lambda x: log.debug(x), Scheduler.timeout)('hello')
Args:
func: Function to convert to an asynchronous function.
scheduler: [Optional] Scheduler to run the function on. If not
specified, defaults to an instance of
:class:`TimeoutScheduler <reactivex.scheduler.TimeoutScheduler>`.
Returns:
Asynchronous function.
"""
from .observable.toasync import to_async_
return to_async_(func, scheduler)
def using(
resource_factory: Callable[[], abc.DisposableBase],
observable_factory: Callable[[abc.DisposableBase], Observable[_T]],
) -> Observable[_T]:
"""Constructs an observable sequence that depends on a resource
object, whose lifetime is tied to the resulting observable
sequence's lifetime.
Example:
>>> res = reactivex.using(lambda: AsyncSubject(), lambda: s: s)
Args:
resource_factory: Factory function to obtain a resource object.
observable_factory: Factory function to obtain an observable
sequence that depends on the obtained resource.
Returns:
An observable sequence whose lifetime controls the lifetime
of the dependent resource object.
"""
from .observable.using import using_
return using_(resource_factory, observable_factory)
def with_latest_from(*sources: Observable[Any]) -> Observable[Tuple[Any, ...]]:
"""Merges the specified observable sequences into one observable
sequence by creating a :class:`tuple` only when the first
observable sequence produces an element.
.. marble::
:alt: with_latest_from
---1---2---3----4-|
--a-----b----c-d----|
[with_latest_from() ]
---1,a-2,a-3,b--4,d-|
Examples:
>>> obs = rx.with_latest_from(obs1)
>>> obs = rx.with_latest_from([obs1, obs2, obs3])
Args:
sources: Sequence of observables.
Returns:
An observable sequence containing the result of combining
elements of the sources into a :class:`tuple`.
"""
from .observable.withlatestfrom import with_latest_from_
return with_latest_from_(*sources)
def zip(*args: Observable[Any]) -> Observable[Tuple[Any, ...]]:
"""Merges the specified observable sequences into one observable
sequence by creating a :class:`tuple` whenever all of the
observable sequences have produced an element at a corresponding
index.
.. marble::
:alt: zip
--1--2---3-----4---|
-a----b----c-d------|
[ zip() ]
--1,a-2,b--3,c-4,d-|
Example:
>>> res = rx.zip(obs1, obs2)
Args:
args: Observable sources to zip.
Returns:
An observable sequence containing the result of combining
elements of the sources as a :class:`tuple`.
"""
from .observable.zip import zip_
return zip_(*args)
__all__ = [
"abc",
"amb",
"case",
"catch",
"catch_with_iterable",
"create",
"combine_latest",
"compose",
"concat",
"concat_with_iterable",
"ConnectableObservable",
"defer",
"empty",
"fork_join",
"from_callable",
"from_callback",
"from_future",
"from_iterable",
"GroupedObservable",
"never",
"Notification",
"on_error_resume_next",
"of",
"Observable",
"Observer",
"return_value",
"pipe",
"range",
"repeat_value",
"Subject",
"start",
"start_async",
"throw",
"timer",
"typing",
"to_async",
"using",
"with_latest_from",
"zip",
"__version__",
]
|
|
'''@file train_asr.py
this file will do the asr training'''
import os
from functools import partial
import tensorflow as tf
from six.moves import configparser
from nabu.distributed import create_server
from nabu.processing import batchdispenser, feature_reader, target_coder
from nabu.neuralnetworks.classifiers.asr import asr_factory
from nabu.neuralnetworks.trainers import trainer_factory
from nabu.neuralnetworks.decoders import decoder_factory
def train_asr(clusterfile,
job_name,
task_index,
ssh_command,
expdir):
''' does everything for asr training
Args:
clusterfile: the file where all the machines in the cluster are
specified if None, local training will be done
job_name: one of ps or worker in the case of distributed training
task_index: the task index in this job
ssh_command: the command to use for ssh, if 'None' no tunnel will be
created
expdir: the experiments directory
'''
#read the database config file
parsed_database_cfg = configparser.ConfigParser()
parsed_database_cfg.read(os.path.join(expdir, 'database.cfg'))
database_cfg = dict(parsed_database_cfg.items('database'))
#read the features config file
parsed_feat_cfg = configparser.ConfigParser()
parsed_feat_cfg.read(os.path.join(expdir, 'model', 'features.cfg'))
feat_cfg = dict(parsed_feat_cfg.items('features'))
#read the asr config file
parsed_nnet_cfg = configparser.ConfigParser()
parsed_nnet_cfg.read(os.path.join(expdir, 'model', 'asr.cfg'))
nnet_cfg = dict(parsed_nnet_cfg.items('asr'))
#read the trainer config file
parsed_trainer_cfg = configparser.ConfigParser()
parsed_trainer_cfg.read(os.path.join(expdir, 'trainer.cfg'))
trainer_cfg = dict(parsed_trainer_cfg.items('trainer'))
#read the decoder config file
parsed_decoder_cfg = configparser.ConfigParser()
parsed_decoder_cfg.read(os.path.join(expdir, 'model', 'decoder.cfg'))
decoder_cfg = dict(parsed_decoder_cfg.items('decoder'))
#make distinction between three implemented different kind of training forms
if database_cfg['train_mode'] == 'supervised':
nonsupervised = False
elif database_cfg['train_mode'] == 'nonsupervised' or\
database_cfg['train_mode'] == 'semisupervised':
nonsupervised = True
else:
raise Exception('Wrong kind of training mode')
#when (partly) nonsupervised, what features are used for the reconstruction
#currently two possible options implemented
if nonsupervised:
if trainer_cfg['reconstruction_features'] == 'audio_samples':
audio_used = True
elif trainer_cfg['reconstruction_features'] == 'input_features':
audio_used = False
else:
raise Exception(
'Unknown specification for the reconstruction features')
#read the quant config file if nonsupervised training and samples used
if nonsupervised:
if audio_used:
parsed_quant_cfg = configparser.ConfigParser()
parsed_quant_cfg.read(os.path.join(expdir,
'model', 'quantization.cfg'))
quant_cfg = dict(parsed_quant_cfg.items('features'))
#based on the other settings, compute and overwrite samples_per_hlfeature
#and unpredictable_samples in the classifier config dictionary
if nonsupervised:
if audio_used:
rate_after_quant = int(quant_cfg['quant_rate'])
win_lenght = float(feat_cfg['winlen'])
win_shift = float(feat_cfg['winstep'])
samples_one_window = int(win_lenght*rate_after_quant)
samples_one_shift = int(win_shift*rate_after_quant)
#### THIS IS ONLY RELEVANT WHEN USING A LISTENER WITH PYRAM STRUCT
# and this line should be adapted otherwise
time_compression = 2**int(nnet_cfg['listener_numlayers'])
#store values in config dictionary
nnet_cfg['samples_per_hlfeature'] = samples_one_shift\
*time_compression
nnet_cfg['unpredictable_samples'] = (samples_one_window+\
(time_compression-1)\
*samples_one_shift)-nnet_cfg['samples_per_hlfeature']
#create the cluster and server
server = create_server.create_server(
clusterfile=clusterfile,
job_name=job_name,
task_index=task_index,
expdir=expdir,
ssh_command=ssh_command)
#the ps should just wait
if job_name == 'ps':
server.join()
# path to where the training samples are stored
featdir = os.path.join(database_cfg['train_dir'], feat_cfg['name'])
#create the coder
with open(os.path.join(database_cfg['train_dir'], 'alphabet')) as fid:
alphabet = fid.read().split(' ')
coder = target_coder.TargetCoder(alphabet)
#create a feature reader for the training data
with open(featdir + '/maxlength', 'r') as fid:
max_length = int(fid.read())
featreader = feature_reader.FeatureReader(
scpfile=featdir + '/feats_shuffled.scp',
cmvnfile=featdir + '/cmvn.scp',
utt2spkfile=featdir + '/utt2spk',
max_length=max_length)
#read the feature dimension
with open(featdir + '/dim', 'r') as fid:
input_dim = int(fid.read())
#the path to the text file
textfile = os.path.join(database_cfg['train_dir'], 'targets')
# If nonsupervised and audio used, we also need to read samples
# these can be done with a second feature reader
if nonsupervised:
if audio_used:
featdir2 = os.path.join(database_cfg['train_dir'],
quant_cfg['name'])
with open(featdir2 + '/maxlength', 'r') as fid:
max_length_audio = int(fid.read())
audioreader = feature_reader.FeatureReader(
scpfile=featdir2 + '/feats_shuffled.scp',
cmvnfile=None,
utt2spkfile=None,
max_length=max_length_audio)
## create a batch dispenser, depending on which situation we're in
if not nonsupervised:
# in the normal supervised training mode, regular dispenser is needed
if 'las_ignoring_mode' in trainer_cfg:
if trainer_cfg['las_ignoring_mode'] == 'True':
# if we ignore unlabeled examples
dispenser = batchdispenser.AsrTextBatchDispenser(
feature_reader=featreader,
target_coder=coder,
size=int(trainer_cfg['batch_size']),
target_path=textfile)
elif trainer_cfg['las_ignoring_mode'] == 'False':
# if we choose to process the unlabeled examples
if 'fixed_ratio' in trainer_cfg:
if trainer_cfg['fixed_ratio'] == 'True':
# if we choose to process with batches with fixed
# labeled/unlabeled ratio
dispenser = \
batchdispenser.AsrTextBatchDispenserAltFixRatio(
feature_reader=featreader,
target_coder=coder,
size=int(trainer_cfg['batch_size']),
target_path=textfile,
percentage_unlabeled=1-float(
database_cfg['part_labeled']))
elif trainer_cfg['fixed_ratio'] == 'False':
# if the fixed ratio is not used
dispenser = batchdispenser.AsrTextBatchDispenserAlt(
feature_reader=featreader,
target_coder=coder,
size=int(trainer_cfg['batch_size']),
target_path=textfile)
else:
raise Exception('wrong information in fixed_ratio var')
else:
# if fixed ratio is not specified, we choose to do without it
dispenser = batchdispenser.AsrTextBatchDispenserAlt(
feature_reader=featreader,
target_coder=coder,
size=int(trainer_cfg['batch_size']),
target_path=textfile)
else:
raise Exception('wrong information in LAS_ignoring_mode var')
else:
# if no specification is made about the ignoring, ignore the unlabeled
dispenser = batchdispenser.AsrTextBatchDispenser(
feature_reader=featreader,
target_coder=coder,
size=int(trainer_cfg['batch_size']),
target_path=textfile)
else:
# when doing (partly) nonsupervised extra reconstruction features needed
if audio_used:
# when the audio is the reconstruction feature
if 'fixed_ratio' in trainer_cfg:
if trainer_cfg['fixed_ratio'] == 'True':
# if specified to work with fixed lab/unlab ratio batches
dispenser = \
batchdispenser.AsrTextAndAudioBatchDispenserFixRatio(
feature_reader=featreader,
audio_reader=audioreader,
target_coder=coder,
size=int(trainer_cfg['batch_size']),
target_path=textfile,
percentage_unlabeled=1-float(
database_cfg['part_labeled']))
elif trainer_cfg['fixed_ratio'] == 'False':
# if specified to not use the fixed ratio
dispenser = batchdispenser.AsrTextAndAudioBatchDispenser(
feature_reader=featreader,
audio_reader=audioreader,
target_coder=coder,
size=int(trainer_cfg['batch_size']),
target_path=textfile)
else:
raise Exception('wrong information in fixed_ratio var')
else:
# without specification, suppose no fixed ratio batches
dispenser = batchdispenser.AsrTextAndAudioBatchDispenser(
feature_reader=featreader,
audio_reader=audioreader,
target_coder=coder,
size=int(trainer_cfg['batch_size']),
target_path=textfile)
else:
# if no audio is used, the input features are used
if 'fixed_ratio' in trainer_cfg:
if trainer_cfg['fixed_ratio'] == 'True':
# if specified to work with fixed labeled/unlabled ratio batches
dispenser = \
batchdispenser.AsrTextAndFeatBatchDispenserFixRatio(
feature_reader=featreader,
target_coder=coder,
size=int(trainer_cfg['batch_size']),
target_path=textfile,
percentage_unlabeled=1-float(
database_cfg['part_labeled']))
elif trainer_cfg['fixed_ratio'] == 'False':
# if specified to not use the fixed ratio
dispenser = batchdispenser.AsrTextAndFeatBatchDispenser(
feature_reader=featreader,
target_coder=coder,
size=int(trainer_cfg['batch_size']),
target_path=textfile)
else:
raise Exception('wrong information in fixed_ratio var')
else:
# without specification, suppose no fixed ratio batches
dispenser = batchdispenser.AsrTextAndFeatBatchDispenser(
feature_reader=featreader,
target_coder=coder,
size=int(trainer_cfg['batch_size']),
target_path=textfile)
# read validation data. If there are text targets, they are only important
# for the validation data. If only nonsupervised, we must validate on the
# reconstructed features
if 'dev_data' in database_cfg:
# create a reader for the validation inputs
featdir = database_cfg['dev_dir'] + '/' + feat_cfg['name']
with open(featdir + '/maxlength', 'r') as fid:
max_length = int(fid.read())
val_reader = feature_reader.FeatureReader(
scpfile=featdir + '/feats.scp',
cmvnfile=featdir + '/cmvn.scp',
utt2spkfile=featdir + '/utt2spk',
max_length=max_length)
textfile = os.path.join(database_cfg['dev_dir'], 'targets')
#read the validation text targets
with open(textfile) as fid:
lines = fid.readlines()
val_text_targets = dict()
for line in lines:
splitline = line.strip().split(' ')
val_text_targets[splitline[0]] = ' '.join(splitline[1:])
if nonsupervised:
#also store the reconstruction targets
val_rec_targets = dict()
if audio_used:
audiodir = database_cfg['dev_dir'] + '/' + quant_cfg['name']
with open(audiodir + '/maxlength', 'r') as fid:
max_length_audio = int(fid.read())
val_audio_reader = feature_reader.FeatureReader(
scpfile=audiodir + '/feats.scp',
cmvnfile=None,
utt2spkfile=audiodir + '/utt2spk',
max_length=max_length_audio)
for _ in range(val_audio_reader.num_utt):
utt_id, audio, _ = val_audio_reader.get_utt()
val_rec_targets[utt_id] = audio
else: #input features are used
for _ in range(val_reader.num_utt):
utt_id, feat, _ = val_reader.get_utt()
val_rec_targets[utt_id] = feat
else:
with open(textfile) as fid:
lines = fid.readlines()
val_rec_targets = dict()
for line in lines:
splitline = line.strip().split(' ')
val_rec_targets[splitline[0]] = None
val_targets = dict()
for utt_id in val_text_targets:
val_targets[utt_id] = (val_text_targets[utt_id],
val_rec_targets[utt_id])
else:
if int(trainer_cfg['valid_utt']) > 0:
val_dispenser = dispenser.split(int(trainer_cfg['valid_utt']))
val_reader = val_dispenser.feature_reader
val_targets = val_dispenser.target_dict
else:
val_reader = None
val_targets = None
#encode the validation targets
if val_targets is not None:
for utt in val_targets:
val_targets[utt] = (dispenser.target_coder.encode(
val_targets[utt][0]), val_targets[utt][1])
#create the classifier
if nonsupervised:
if audio_used:
output_dim_second_el = int(quant_cfg['quant_levels'])
else: # input features used
output_dim_second_el = input_dim
else: # only supervised training
output_dim_second_el = None
classifier = asr_factory.factory(
conf=nnet_cfg,
output_dim=(coder.num_labels, output_dim_second_el))
#create the callable for the decoder
decoder = partial(
decoder_factory.factory,
conf=decoder_cfg,
classifier=classifier,
input_dim=input_dim,
max_input_length=val_reader.max_length,
coder=coder,
expdir=expdir)
#create the trainer
if nonsupervised:
if audio_used:
reconstruction_dim = 1
else:
reconstruction_dim = input_dim
else:
reconstruction_dim = 1
tr = trainer_factory.factory(
conf=trainer_cfg,
decoder=decoder,
classifier=classifier,
input_dim=input_dim,
reconstruction_dim=reconstruction_dim,
dispenser=dispenser,
val_reader=val_reader,
val_targets=val_targets,
expdir=expdir,
server=server,
task_index=task_index)
print 'starting training'
#train the classifier
tr.train()
if __name__ == '__main__':
#define the FLAGS
tf.app.flags.DEFINE_string('clusterfile', None,
'The file containing the cluster')
tf.app.flags.DEFINE_string('job_name', 'local', 'One of ps, worker')
tf.app.flags.DEFINE_integer('task_index', 0, 'The task index')
tf.app.flags.DEFINE_string(
'ssh_command', 'None',
'the command that should be used to create ssh tunnels')
tf.app.flags.DEFINE_string('expdir', 'expdir', 'The experimental directory')
FLAGS = tf.app.flags.FLAGS
train_asr(
clusterfile=FLAGS.clusterfile,
job_name=FLAGS.job_name,
task_index=FLAGS.task_index,
ssh_command=FLAGS.ssh_command,
expdir=FLAGS.expdir)
|
|
# -*- coding: utf-8 -*-
import pytest
from tests.utils import TestCaseBase
import sqlparse
from sqlparse.exceptions import SQLParseError
class TestFormat(TestCaseBase):
def test_keywordcase(self):
sql = 'select * from bar; -- select foo\n'
res = sqlparse.format(sql, keyword_case='upper')
self.ndiffAssertEqual(res, 'SELECT * FROM bar; -- select foo\n')
res = sqlparse.format(sql, keyword_case='capitalize')
self.ndiffAssertEqual(res, 'Select * From bar; -- select foo\n')
res = sqlparse.format(sql.upper(), keyword_case='lower')
self.ndiffAssertEqual(res, 'select * from BAR; -- SELECT FOO\n')
self.assertRaises(SQLParseError, sqlparse.format, sql,
keyword_case='foo')
def test_identifiercase(self):
sql = 'select * from bar; -- select foo\n'
res = sqlparse.format(sql, identifier_case='upper')
self.ndiffAssertEqual(res, 'select * from BAR; -- select foo\n')
res = sqlparse.format(sql, identifier_case='capitalize')
self.ndiffAssertEqual(res, 'select * from Bar; -- select foo\n')
res = sqlparse.format(sql.upper(), identifier_case='lower')
self.ndiffAssertEqual(res, 'SELECT * FROM bar; -- SELECT FOO\n')
self.assertRaises(SQLParseError, sqlparse.format, sql,
identifier_case='foo')
sql = 'select * from "foo"."bar"'
res = sqlparse.format(sql, identifier_case="upper")
self.ndiffAssertEqual(res, 'select * from "foo"."bar"')
def test_strip_comments_single(self):
sql = 'select *-- statement starts here\nfrom foo'
res = sqlparse.format(sql, strip_comments=True)
self.ndiffAssertEqual(res, 'select * from foo')
sql = 'select * -- statement starts here\nfrom foo'
res = sqlparse.format(sql, strip_comments=True)
self.ndiffAssertEqual(res, 'select * from foo')
sql = 'select-- foo\nfrom -- bar\nwhere'
res = sqlparse.format(sql, strip_comments=True)
self.ndiffAssertEqual(res, 'select from where')
self.assertRaises(SQLParseError, sqlparse.format, sql,
strip_comments=None)
def test_strip_comments_multi(self):
sql = '/* sql starts here */\nselect'
res = sqlparse.format(sql, strip_comments=True)
self.ndiffAssertEqual(res, 'select')
sql = '/* sql starts here */ select'
res = sqlparse.format(sql, strip_comments=True)
self.ndiffAssertEqual(res, 'select')
sql = '/*\n * sql starts here\n */\nselect'
res = sqlparse.format(sql, strip_comments=True)
self.ndiffAssertEqual(res, 'select')
sql = 'select (/* sql starts here */ select 2)'
res = sqlparse.format(sql, strip_comments=True)
self.ndiffAssertEqual(res, 'select (select 2)')
def test_strip_ws(self):
f = lambda sql: sqlparse.format(sql, strip_whitespace=True)
s = 'select\n* from foo\n\twhere ( 1 = 2 )\n'
self.ndiffAssertEqual(f(s), 'select * from foo where (1 = 2)')
s = 'select -- foo\nfrom bar\n'
self.ndiffAssertEqual(f(s), 'select -- foo\nfrom bar')
self.assertRaises(SQLParseError, sqlparse.format, s,
strip_whitespace=None)
def test_preserve_ws(self):
# preserve at least one whitespace after subgroups
f = lambda sql: sqlparse.format(sql, strip_whitespace=True)
s = 'select\n* /* foo */ from bar '
self.ndiffAssertEqual(f(s), 'select * /* foo */ from bar')
def test_notransform_of_quoted_crlf(self):
# Make sure that CR/CR+LF characters inside string literals don't get
# affected by the formatter.
s1 = "SELECT some_column LIKE 'value\r'"
s2 = "SELECT some_column LIKE 'value\r'\r\nWHERE id = 1\n"
s3 = "SELECT some_column LIKE 'value\\'\r' WHERE id = 1\r"
s4 = "SELECT some_column LIKE 'value\\\\\\'\r' WHERE id = 1\r\n"
f = lambda x: sqlparse.format(x)
# Because of the use of
self.ndiffAssertEqual(f(s1), "SELECT some_column LIKE 'value\r'")
self.ndiffAssertEqual(f(s2), "SELECT some_column LIKE 'value\r'\nWHERE id = 1\n")
self.ndiffAssertEqual(f(s3), "SELECT some_column LIKE 'value\\'\r' WHERE id = 1\n")
self.ndiffAssertEqual(f(s4), "SELECT some_column LIKE 'value\\\\\\'\r' WHERE id = 1\n")
def test_outputformat(self):
sql = 'select * from foo;'
self.assertRaises(SQLParseError, sqlparse.format, sql,
output_format='foo')
class TestFormatReindent(TestCaseBase):
def test_option(self):
self.assertRaises(SQLParseError, sqlparse.format, 'foo',
reindent=2)
self.assertRaises(SQLParseError, sqlparse.format, 'foo',
indent_tabs=2)
self.assertRaises(SQLParseError, sqlparse.format, 'foo',
reindent=True, indent_width='foo')
self.assertRaises(SQLParseError, sqlparse.format, 'foo',
reindent=True, indent_width=-12)
def test_stmts(self):
f = lambda sql: sqlparse.format(sql, reindent=True)
s = 'select foo; select bar'
self.ndiffAssertEqual(f(s), 'select foo;\n\nselect bar')
s = 'select foo'
self.ndiffAssertEqual(f(s), 'select foo')
s = 'select foo; -- test\n select bar'
self.ndiffAssertEqual(f(s), 'select foo; -- test\n\nselect bar')
def test_keywords(self):
f = lambda sql: sqlparse.format(sql, reindent=True)
s = 'select * from foo union select * from bar;'
self.ndiffAssertEqual(f(s), '\n'.join(['select *',
'from foo',
'union',
'select *',
'from bar;']))
def test_keywords_between(self): # issue 14
# don't break AND after BETWEEN
f = lambda sql: sqlparse.format(sql, reindent=True)
s = 'and foo between 1 and 2 and bar = 3'
self.ndiffAssertEqual(f(s), '\n'.join(['',
'and foo between 1 and 2',
'and bar = 3']))
def test_parenthesis(self):
f = lambda sql: sqlparse.format(sql, reindent=True)
s = 'select count(*) from (select * from foo);'
self.ndiffAssertEqual(f(s),
'\n'.join(['select count(*)',
'from',
' (select *',
' from foo);',
])
)
def test_where(self):
f = lambda sql: sqlparse.format(sql, reindent=True)
s = 'select * from foo where bar = 1 and baz = 2 or bzz = 3;'
self.ndiffAssertEqual(f(s), ('select *\nfrom foo\n'
'where bar = 1\n'
' and baz = 2\n'
' or bzz = 3;'))
s = 'select * from foo where bar = 1 and (baz = 2 or bzz = 3);'
self.ndiffAssertEqual(f(s), ('select *\nfrom foo\n'
'where bar = 1\n'
' and (baz = 2\n'
' or bzz = 3);'))
def test_join(self):
f = lambda sql: sqlparse.format(sql, reindent=True)
s = 'select * from foo join bar on 1 = 2'
self.ndiffAssertEqual(f(s), '\n'.join(['select *',
'from foo',
'join bar on 1 = 2']))
s = 'select * from foo inner join bar on 1 = 2'
self.ndiffAssertEqual(f(s), '\n'.join(['select *',
'from foo',
'inner join bar on 1 = 2']))
s = 'select * from foo left outer join bar on 1 = 2'
self.ndiffAssertEqual(f(s), '\n'.join(['select *',
'from foo',
'left outer join bar on 1 = 2']
))
s = 'select * from foo straight_join bar on 1 = 2'
self.ndiffAssertEqual(f(s), '\n'.join(['select *',
'from foo',
'straight_join bar on 1 = 2']
))
def test_identifier_list(self):
f = lambda sql: sqlparse.format(sql, reindent=True)
s = 'select foo, bar, baz from table1, table2 where 1 = 2'
self.ndiffAssertEqual(f(s), '\n'.join(['select foo,',
' bar,',
' baz',
'from table1,',
' table2',
'where 1 = 2']))
s = 'select a.*, b.id from a, b'
self.ndiffAssertEqual(f(s), '\n'.join(['select a.*,',
' b.id',
'from a,',
' b']))
def test_identifier_list_with_functions(self):
f = lambda sql: sqlparse.format(sql, reindent=True)
s = ("select 'abc' as foo, coalesce(col1, col2)||col3 as bar,"
"col3 from my_table")
self.ndiffAssertEqual(f(s), '\n'.join(
["select 'abc' as foo,",
" coalesce(col1, col2)||col3 as bar,",
" col3",
"from my_table"]))
def test_case(self):
f = lambda sql: sqlparse.format(sql, reindent=True)
s = 'case when foo = 1 then 2 when foo = 3 then 4 else 5 end'
self.ndiffAssertEqual(f(s), '\n'.join(['case',
' when foo = 1 then 2',
' when foo = 3 then 4',
' else 5',
'end']))
def test_case2(self):
f = lambda sql: sqlparse.format(sql, reindent=True)
s = 'case(foo) when bar = 1 then 2 else 3 end'
self.ndiffAssertEqual(f(s), '\n'.join(['case(foo)',
' when bar = 1 then 2',
' else 3',
'end']))
def test_nested_identifier_list(self): # issue4
f = lambda sql: sqlparse.format(sql, reindent=True)
s = '(foo as bar, bar1, bar2 as bar3, b4 as b5)'
self.ndiffAssertEqual(f(s), '\n'.join(['(foo as bar,',
' bar1,',
' bar2 as bar3,',
' b4 as b5)']))
def test_duplicate_linebreaks(self): # issue3
f = lambda sql: sqlparse.format(sql, reindent=True)
s = 'select c1 -- column1\nfrom foo'
self.ndiffAssertEqual(f(s), '\n'.join(['select c1 -- column1',
'from foo']))
s = 'select c1 -- column1\nfrom foo'
r = sqlparse.format(s, reindent=True, strip_comments=True)
self.ndiffAssertEqual(r, '\n'.join(['select c1',
'from foo']))
s = 'select c1\nfrom foo\norder by c1'
self.ndiffAssertEqual(f(s), '\n'.join(['select c1',
'from foo',
'order by c1']))
s = 'select c1 from t1 where (c1 = 1) order by c1'
self.ndiffAssertEqual(f(s), '\n'.join(['select c1',
'from t1',
'where (c1 = 1)',
'order by c1']))
def test_keywordfunctions(self): # issue36
f = lambda sql: sqlparse.format(sql, reindent=True)
s = 'select max(a) b, foo, bar'
self.ndiffAssertEqual(f(s), '\n'.join(['select max(a) b,',
' foo,',
' bar']))
def test_identifier_and_functions(self): # issue45
f = lambda sql: sqlparse.format(sql, reindent=True)
s = 'select foo.bar, nvl(1) from dual'
self.ndiffAssertEqual(f(s), '\n'.join(['select foo.bar,',
' nvl(1)',
'from dual']))
class TestOutputFormat(TestCaseBase):
def test_python(self):
sql = 'select * from foo;'
f = lambda sql: sqlparse.format(sql, output_format='python')
self.ndiffAssertEqual(f(sql), "sql = 'select * from foo;'")
f = lambda sql: sqlparse.format(sql, output_format='python',
reindent=True)
self.ndiffAssertEqual(f(sql), ("sql = ('select * '\n"
" 'from foo;')"))
def test_php(self):
sql = 'select * from foo;'
f = lambda sql: sqlparse.format(sql, output_format='php')
self.ndiffAssertEqual(f(sql), '$sql = "select * from foo;";')
f = lambda sql: sqlparse.format(sql, output_format='php',
reindent=True)
self.ndiffAssertEqual(f(sql), ('$sql = "select * ";\n'
'$sql .= "from foo;";'))
def test_sql(self): # "sql" is an allowed option but has no effect
sql = 'select * from foo;'
f = lambda sql: sqlparse.format(sql, output_format='sql')
self.ndiffAssertEqual(f(sql), 'select * from foo;')
def test_format_column_ordering(): # issue89
sql = 'select * from foo order by c1 desc, c2, c3;'
formatted = sqlparse.format(sql, reindent=True)
expected = '\n'.join(['select *',
'from foo',
'order by c1 desc,',
' c2,',
' c3;'])
assert formatted == expected
def test_truncate_strings():
sql = 'update foo set value = \'' + 'x' * 1000 + '\';'
formatted = sqlparse.format(sql, truncate_strings=10)
assert formatted == 'update foo set value = \'xxxxxxxxxx[...]\';'
formatted = sqlparse.format(sql, truncate_strings=3, truncate_char='YYY')
assert formatted == 'update foo set value = \'xxxYYY\';'
def test_truncate_strings_invalid_option():
pytest.raises(SQLParseError, sqlparse.format,
'foo', truncate_strings='bar')
pytest.raises(SQLParseError, sqlparse.format,
'foo', truncate_strings=-1)
pytest.raises(SQLParseError, sqlparse.format,
'foo', truncate_strings=0)
@pytest.mark.parametrize('sql', ['select verrrylongcolumn from foo',
'select "verrrylongcolumn" from "foo"'])
def test_truncate_strings_doesnt_truncate_identifiers(sql):
formatted = sqlparse.format(sql, truncate_strings=2)
assert formatted == sql
|
|
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for specs.tensor_spec."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.specs import array_spec
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import time_step as ts
TYPE_PARAMETERS = (
("tf.int32", tf.int32),
("tf.int64", tf.int64),
("tf.float32", tf.float32),
("tf.float64", tf.float64),
("tf.uint8", tf.uint8),
("tf.string", tf.string),
("tf.bool", tf.bool),
)
def example_nested_array_spec(dtype):
return {
"spec_1":
array_spec.ArraySpec((2, 3), dtype),
"bounded_spec_1":
array_spec.BoundedArraySpec((2, 3), dtype, -10, 10),
"bounded_spec_2":
array_spec.BoundedArraySpec((2, 3), dtype, -10, -10),
"bounded_array_spec_3":
array_spec.BoundedArraySpec((2,), dtype, [-10, -10], [10, 10]),
"bounded_array_spec_4":
array_spec.BoundedArraySpec((2,), dtype, [-10, -9], [10, 9]),
"dict_spec": {
"spec_2": array_spec.ArraySpec((2, 3), dtype),
"bounded_spec_2": array_spec.BoundedArraySpec((2, 3), dtype, -10, 10)
},
"tuple_spec": (
array_spec.ArraySpec((2, 3), dtype),
array_spec.BoundedArraySpec((2, 3), dtype, -10, 10),
),
"list_spec": [
array_spec.ArraySpec((2, 3), dtype),
(array_spec.ArraySpec(
(2, 3), dtype), array_spec.BoundedArraySpec((2, 3), dtype, -10,
10)),
],
}
def example_nested_tensor_spec(dtype, outer_dims=()):
minval = 0 if dtype == tf.uint8 else -10
maxval = 255 if dtype == tf.uint8 else 10
return {
"spec_1":
tensor_spec.TensorSpec(outer_dims + (2, 3), dtype),
"bounded_spec_1":
tensor_spec.BoundedTensorSpec(outer_dims + (2, 3), dtype, minval,
maxval),
"bounded_spec_2":
tensor_spec.BoundedTensorSpec(outer_dims + (2, 3), dtype, minval,
minval),
"bounded_array_spec_3":
tensor_spec.BoundedTensorSpec(outer_dims + (2,), dtype,
[minval, minval], [maxval, maxval]),
"bounded_array_spec_4":
tensor_spec.BoundedTensorSpec(outer_dims + (2,), dtype,
[minval, minval + 1],
[maxval, maxval - 1]),
"dict_spec": {
"spec_2":
tensor_spec.TensorSpec(outer_dims + (2, 3), dtype),
"bounded_spec_2":
tensor_spec.BoundedTensorSpec(outer_dims + (2, 3), dtype, minval,
maxval)
},
"tuple_spec": (
tensor_spec.TensorSpec(outer_dims + (2, 3), dtype),
tensor_spec.BoundedTensorSpec(outer_dims + (2, 3), dtype, minval,
maxval),
),
"list_spec": [
tensor_spec.TensorSpec(outer_dims + (2, 3), dtype),
(tensor_spec.TensorSpec(outer_dims + (2, 3), dtype),
tensor_spec.BoundedTensorSpec(outer_dims + (2, 3), dtype, minval,
maxval)),
],
}
@parameterized.named_parameters(*TYPE_PARAMETERS)
class BoundedTensorSpecSampleTest(tf.test.TestCase, parameterized.TestCase):
def testIntegerSamplesIncludeUpperBound(self, dtype):
if not dtype.is_integer: # Only test on integer dtypes.
return
spec = tensor_spec.BoundedTensorSpec((2, 3), dtype, 3, 3)
sample = tensor_spec.sample_spec_nest(spec)
sample_ = self.evaluate(sample)
self.assertEqual(sample_.shape, (2, 3))
self.assertTrue(np.all(sample_ == 3))
def testIntegerSamplesExcludeMaxOfDtype(self, dtype):
# Exclude non integer types and uint8 (has special sampling logic).
if not dtype.is_integer or dtype == tf.uint8:
return
spec = tensor_spec.BoundedTensorSpec((2, 3), dtype, dtype.max - 1,
dtype.max)
sample = tensor_spec.sample_spec_nest(spec)
sample_ = self.evaluate(sample)
self.assertEqual(sample_.shape, (2, 3))
self.assertTrue(np.all(sample_ == dtype.max - 1))
def testSampleWithArrayInBounds(self, dtype):
if dtype == tf.string:
self.skipTest("Not compatible with string type.")
spec = tensor_spec.BoundedTensorSpec((2, 3), dtype, (0, 0, 0), 3)
sample = tensor_spec.sample_spec_nest(spec)
self.assertEqual((2, 3), sample.shape)
sample_ = self.evaluate(sample)
self.assertEqual((2, 3), sample_.shape)
self.assertTrue(np.all(sample_ <= 3))
self.assertTrue(np.all(0 <= sample_))
def testTensorSpecSample(self, dtype):
if dtype == tf.string or dtype == tf.bool:
self.skipTest("Not compatible with string or bool type.")
spec = tensor_spec.TensorSpec((2, 3), dtype)
sample = tensor_spec.sample_spec_nest(spec)
bounded = tensor_spec.BoundedTensorSpec.from_spec(spec)
sample_ = self.evaluate(sample)
self.assertTrue(
np.all(sample_ >= bounded.minimum), (sample_.min(), sample_.max()))
self.assertTrue(
np.all(sample_ <= bounded.maximum), (sample_.min(), sample_.max()))
def testBoundedTensorSpecSample(self, dtype):
if dtype == tf.string or dtype == tf.bool:
self.skipTest("Not compatible with string or bool type.")
spec = tensor_spec.BoundedTensorSpec((2, 3), dtype, 2, 7)
sample = tensor_spec.sample_spec_nest(spec)
sample_ = self.evaluate(sample)
self.assertTrue(np.all(sample_ >= 2))
self.assertTrue(np.all(sample_ <= 7))
def testOuterDimsNestAddsDimensionsToSpecs(self, dtype):
if dtype == tf.string:
self.skipTest("Not compatible with string type.")
nested_spec = example_nested_tensor_spec(dtype)
outer_dims = (4, 3)
self.assertEqual(
tensor_spec.add_outer_dims_nest(nested_spec, outer_dims),
example_nested_tensor_spec(dtype, outer_dims))
def testAddOuterShapeWhenNotTupleOrListThrows(self, dtype):
if dtype == tf.string:
self.skipTest("Not compatible with string type.")
with self.assertRaises(ValueError):
tensor_spec.add_outer_dims_nest(1, example_nested_tensor_spec(dtype))
def testOuterDimsNestRemovesDimensionsFromSpecs(self, dtype):
if dtype == tf.string:
self.skipTest("Not compatible with string type.")
nested_spec = example_nested_tensor_spec(dtype)
larger_spec = tensor_spec.add_outer_dims_nest(nested_spec, (3, 4))
removed_spec = tensor_spec.remove_outer_dims_nest(larger_spec, 2)
self.assertEqual(nested_spec, removed_spec)
def testOuterDimsNestRemovesDimensionsFromSpecsThrows(self, dtype):
if dtype == tf.string:
self.skipTest("Not compatible with string type.")
nested_spec = example_nested_tensor_spec(dtype)
with self.assertRaises(ValueError):
tensor_spec.remove_outer_dims_nest(nested_spec, 10)
def testAddOuterDimToSpecs(self, dtype):
if dtype == tf.string:
self.skipTest("Not compatible with string type.")
nested_spec = example_nested_tensor_spec(dtype)
outer_dim = 4
self.assertEqual(
tensor_spec.add_outer_dim(nested_spec, outer_dim),
example_nested_tensor_spec(dtype, (outer_dim,)))
def testAddOuterDimNoneToSpecs(self, dtype):
if dtype == tf.string:
self.skipTest("Not compatible with string type.")
nested_spec = example_nested_tensor_spec(dtype)
outer_dim = None
self.assertEqual(
tensor_spec.add_outer_dim(nested_spec, outer_dim),
example_nested_tensor_spec(dtype, (outer_dim,)))
def testNestSample(self, dtype):
if dtype == tf.string or dtype == tf.bool:
self.skipTest("Not compatible with string or bool type.")
nested_spec = example_nested_tensor_spec(dtype)
sample = tensor_spec.sample_spec_nest(nested_spec)
spec_1 = tensor_spec.BoundedTensorSpec.from_spec(nested_spec["spec_1"])
bounded_spec_1 = nested_spec["bounded_spec_1"]
sample_ = self.evaluate(sample)
self.assertTrue(np.all(sample_["spec_1"] >= spec_1.minimum))
self.assertTrue(np.all(sample_["spec_1"] <= spec_1.maximum))
self.assertTrue(np.all(sample_["bounded_spec_1"] >= bounded_spec_1.minimum))
self.assertTrue(np.all(sample_["bounded_spec_1"] <= bounded_spec_1.maximum))
self.assertIn("spec_2", sample_["dict_spec"])
tensor_spec_2 = sample_["dict_spec"]["spec_2"]
self.assertTrue(np.all(tensor_spec_2 >= spec_1.minimum))
self.assertTrue(np.all(tensor_spec_2 <= spec_1.maximum))
self.assertIn("bounded_spec_2", sample_["dict_spec"])
sampled_bounded_spec_2 = sample_["dict_spec"]["bounded_spec_2"]
self.assertTrue(np.all(sampled_bounded_spec_2 >= spec_1.minimum))
self.assertTrue(np.all(sampled_bounded_spec_2 <= spec_1.maximum))
self.assertIn("tuple_spec", sample_)
self.assertTrue(np.all(sample_["tuple_spec"][0] >= spec_1.minimum))
self.assertTrue(np.all(sample_["tuple_spec"][0] <= spec_1.maximum))
self.assertTrue(np.all(sample_["tuple_spec"][1] >= bounded_spec_1.minimum))
self.assertTrue(np.all(sample_["tuple_spec"][1] <= bounded_spec_1.maximum))
self.assertIn("list_spec", sample_)
self.assertTrue(np.all(sample_["list_spec"][0] >= spec_1.minimum))
self.assertTrue(np.all(sample_["list_spec"][0] <= spec_1.maximum))
self.assertTrue(np.all(sample_["list_spec"][1][0] >= spec_1.minimum))
self.assertTrue(np.all(sample_["list_spec"][1][0] <= spec_1.maximum))
self.assertTrue(
np.all(sample_["list_spec"][1][1] >= bounded_spec_1.minimum))
self.assertTrue(
np.all(sample_["list_spec"][1][1] <= bounded_spec_1.maximum))
def testNestSampleOuterDims(self, dtype):
# Can't add another level of parameterized args because the test class is
# already parameterized on dtype.
if dtype == tf.string or dtype == tf.bool:
self.skipTest("Not compatible with string or bool type.")
self._testNestSampleOuterDims(dtype, use_tensor=False)
self._testNestSampleOuterDims(dtype, use_tensor=True)
def _testNestSampleOuterDims(self, dtype, use_tensor):
nested_spec = example_nested_tensor_spec(dtype)
if use_tensor:
outer_dims = tf.constant([2, 3], dtype=tf.int32)
else:
outer_dims = (2, 3)
sample = tensor_spec.sample_spec_nest(nested_spec, outer_dims=outer_dims)
bounded = tensor_spec.BoundedTensorSpec.from_spec(nested_spec["spec_1"])
sample_ = self.evaluate(sample)
self.assertEqual((2, 3) + tuple(nested_spec["spec_1"].shape.as_list()),
sample_["spec_1"].shape)
self.assertTrue(np.all(sample_["spec_1"] >= bounded.minimum))
self.assertTrue(np.all(sample_["spec_1"] <= bounded.maximum))
bounded_spec_1 = nested_spec["bounded_spec_1"]
self.assertEqual((2, 3) + tuple(bounded_spec_1.shape.as_list()),
sample_["bounded_spec_1"].shape)
self.assertTrue(np.all(sample_["bounded_spec_1"] >= bounded_spec_1.minimum))
self.assertTrue(np.all(sample_["bounded_spec_1"] <= bounded_spec_1.maximum))
self.assertIn("spec_2", sample_["dict_spec"])
tensor_spec_2 = sample_["dict_spec"]["spec_2"]
self.assertEqual(
(2, 3) + tuple(nested_spec["dict_spec"]["spec_2"].shape.as_list()),
tensor_spec_2.shape)
self.assertTrue(np.all(tensor_spec_2 >= bounded.minimum))
self.assertTrue(np.all(tensor_spec_2 <= bounded.maximum))
self.assertIn("bounded_spec_2", sample_["dict_spec"])
sampled_bounded_spec_2 = sample_["dict_spec"]["bounded_spec_2"]
self.assertEqual(
(2, 3) +
tuple(nested_spec["dict_spec"]["bounded_spec_2"].shape.as_list()),
sampled_bounded_spec_2.shape)
self.assertTrue(np.all(sampled_bounded_spec_2 >= bounded.minimum))
self.assertTrue(np.all(sampled_bounded_spec_2 <= bounded.maximum))
self.assertIn("tuple_spec", sample_)
self.assertEqual(
(2, 3) + tuple(nested_spec["tuple_spec"][0].shape.as_list()),
sample_["tuple_spec"][0].shape)
self.assertTrue(np.all(sample_["tuple_spec"][0] >= bounded.minimum))
self.assertTrue(np.all(sample_["tuple_spec"][0] <= bounded.maximum))
tuple_bounded_spec = nested_spec["tuple_spec"][1]
self.assertEqual((2, 3) + tuple(tuple_bounded_spec.shape.as_list()),
sample_["tuple_spec"][1].shape)
self.assertTrue(
np.all(sample_["tuple_spec"][1] >= tuple_bounded_spec.minimum))
self.assertTrue(
np.all(sample_["tuple_spec"][1] <= tuple_bounded_spec.maximum))
self.assertIn("list_spec", sample_)
self.assertEqual(
(2, 3) + tuple(nested_spec["list_spec"][0].shape.as_list()),
sample_["list_spec"][0].shape)
self.assertTrue(np.all(sample_["list_spec"][0] >= bounded.minimum))
self.assertTrue(np.all(sample_["list_spec"][0] <= bounded.maximum))
self.assertEqual(
(2, 3) + tuple(nested_spec["list_spec"][1][0].shape.as_list()),
sample_["list_spec"][1][0].shape)
self.assertTrue(np.all(sample_["list_spec"][1][0] >= bounded.minimum))
self.assertTrue(np.all(sample_["list_spec"][1][0] <= bounded.maximum))
list_bounded_spec = nested_spec["list_spec"][1][1]
self.assertTrue(
np.all(sample_["list_spec"][1][1] >= list_bounded_spec.minimum))
self.assertTrue(
np.all(sample_["list_spec"][1][1] <= list_bounded_spec.maximum))
def _test_batched_shape(sample_, spec_):
self.assertSequenceEqual(sample_.shape, outer_dims + tuple(spec_.shape))
tf.nest.map_structure(_test_batched_shape, sample, nested_spec)
@parameterized.named_parameters(*TYPE_PARAMETERS)
class TensorSpecZeroTest(tf.test.TestCase, parameterized.TestCase):
def testNestZero(self, dtype):
if dtype == tf.string:
self.skipTest("Not compatible with string type.")
nested_spec = example_nested_tensor_spec(dtype)
zeros = tensor_spec.zero_spec_nest(nested_spec)
zeros_ = self.evaluate(zeros)
def check_shape_and_zero(spec, value):
self.assertEqual(spec.shape, value.shape)
self.assertTrue(np.all(value == 0))
tf.nest.map_structure(check_shape_and_zero, nested_spec, zeros_)
def testNestZeroWithOuterDims(self, dtype):
if dtype == tf.string:
self.skipTest("Not compatible with string type.")
nested_spec = example_nested_tensor_spec(dtype)
zeros = tensor_spec.zero_spec_nest(nested_spec, outer_dims=[4])
zeros_ = self.evaluate(zeros)
def check_shape_and_zero(spec, value):
self.assertEqual([4] + spec.shape, value.shape)
self.assertTrue(np.all(value == 0))
tf.nest.map_structure(check_shape_and_zero, nested_spec, zeros_)
def testNestZeroWithOuterDimsTensor(self, dtype):
if dtype == tf.string:
self.skipTest("Not compatible with string type.")
nested_spec = example_nested_tensor_spec(dtype)
zeros = tensor_spec.zero_spec_nest(
nested_spec, outer_dims=[tf.constant(8, dtype=tf.int32)])
zeros_ = self.evaluate(zeros)
def check_shape_and_zero(spec, value):
self.assertEqual([8] + spec.shape, value.shape)
self.assertTrue(np.all(value == 0))
tf.nest.map_structure(check_shape_and_zero, nested_spec, zeros_)
def testOnlyTensorSpecIsSupported(self, dtype):
sparse_spec = tf.SparseTensorSpec([1], tf.int32)
with self.assertRaisesRegex(NotImplementedError, "not supported.*Sparse"):
_ = tensor_spec.zero_spec_nest(sparse_spec)
ragged_spec = tf.RaggedTensorSpec(ragged_rank=0, shape=[3, 5])
with self.assertRaisesRegex(NotImplementedError, "not supported.*Ragged"):
_ = tensor_spec.zero_spec_nest(ragged_spec)
def testEmptySpec(self, dtype):
self.assertEqual((), tensor_spec.zero_spec_nest(()))
self.assertEqual([], tensor_spec.zero_spec_nest([]))
@parameterized.named_parameters(*TYPE_PARAMETERS)
class TensorSpecTypeTest(tf.test.TestCase, parameterized.TestCase):
def testIsDiscrete(self, dtype):
spec = tensor_spec.TensorSpec((2, 3), dtype=dtype)
self.assertIs(tensor_spec.is_discrete(spec), dtype.is_integer)
def testIsContinuous(self, dtype):
spec = tensor_spec.TensorSpec((2, 3), dtype=dtype)
self.assertIs(tensor_spec.is_continuous(spec), dtype.is_floating)
def testExclusive(self, dtype):
if dtype == tf.string or dtype == tf.bool:
self.skipTest("Not compatible with string or bool type.")
spec = tensor_spec.TensorSpec((2, 3), dtype=dtype)
self.assertIs(
tensor_spec.is_discrete(spec) ^ tensor_spec.is_continuous(spec), True)
class FromSpecTest(tf.test.TestCase):
def testFromSpec(self):
example_array_spec = example_nested_array_spec(np.int32)
example_tensor_spec = tensor_spec.from_spec(example_array_spec)
flat_tensor_spec = tf.nest.flatten(example_tensor_spec)
expected_tensor_spec = tf.nest.flatten(example_nested_tensor_spec(tf.int32))
for spec, expected_spec in zip(flat_tensor_spec, expected_tensor_spec):
self.assertEqual(type(expected_spec), type(spec))
self.assertEqual(expected_spec, spec)
def testFromStringSpec(self):
spec = tensor_spec.from_spec(array_spec.ArraySpec([1], np.string_))
self.assertEqual(tf.string, spec.dtype)
class ToPlaceholderTest(tf.test.TestCase):
def skipIfExecutingEagerly(self):
# With TF 2.0 (or when executing eagerly), these tests do not make sense.
if tf.executing_eagerly():
self.skipTest("Placeholders do not make sense when executing eagerly")
def testCreatePlaceholderFromTuple(self):
self.skipIfExecutingEagerly()
specs = (
tensor_spec.TensorSpec(shape=(), dtype=tf.float32, name="act_prob"),
tensor_spec.TensorSpec(shape=(), dtype=tf.float32, name="value_pred"),
)
ph = tensor_spec.to_nest_placeholder(specs)
self.assertEqual(2, len(ph))
self.assertEqual(ph[0].name, "act_prob:0")
self.assertEqual(ph[0].dtype, tf.float32)
self.assertEqual(ph[0].shape, tf.TensorShape([]))
self.assertEqual(ph[1].name, "value_pred:0")
self.assertEqual(ph[1].dtype, tf.float32)
self.assertEqual(ph[1].shape, tf.TensorShape([]))
def testCreatePlaceholderFromTimeStepSpec(self):
self.skipIfExecutingEagerly()
obs_spec = tensor_spec.TensorSpec([2], tf.float32, "obs")
time_step_spec = ts.time_step_spec(obs_spec)
ph = tensor_spec.to_nest_placeholder(time_step_spec)
self.assertIsInstance(ph, ts.TimeStep)
self.assertEqual(ph.observation.name, "obs:0")
self.assertEqual(ph.observation.dtype, tf.float32)
self.assertEqual(ph.observation.shape, tf.TensorShape([2]))
def testCreatePlaceholderWithNameScope(self):
self.skipIfExecutingEagerly()
obs_spec = tensor_spec.TensorSpec([2], tf.float32, "obs")
time_step_spec = ts.time_step_spec(obs_spec)
ph = tensor_spec.to_nest_placeholder(time_step_spec, name_scope="action")
self.assertEqual(ph.observation.name, "action/obs:0")
if __name__ == "__main__":
tf.test.main()
|
|
# Copyright (c) 2011 - 2017, Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""``test_clissh.py``
`Unittests for cli<X>.py modules`
"""
import os
import re
import time
import socket
from unittest.mock import MagicMock
import paramiko
import pytest
from testlib import clissh
from testlib import clitelnet
from testlib import clinns
from testlib.custom_exceptions import CLIException
def create_ssh(request, host, login):
ssh_conn = clissh.CLISSH(host, username=login, sudo_prompt="[sudo] password")
return ssh_conn
def create_telnet(request, host, login):
telnet_conn = clitelnet.TelnetCMD(host, username=login, sudo_prompt="[sudo] password")
return telnet_conn
def create_nns(request, host, login):
os.system("sudo ip netns add %s" % host)
os.system("sudo ip netns exec %s ifconfig lo up" % host)
request.addfinalizer(lambda: os.system("sudo ip netns del %s" % host))
nns_obj = clinns.CLISSHNetNS(host, username=login, sudo_prompt="[sudo] password")
return nns_obj
@pytest.fixture(scope="session", params=["ssh", "telnet", "nns"])
def credentials(request):
if request.param not in request.config.option.cli_api:
pytest.skip("{0} API is skipped for test.".format(request.param.upper()))
if request.param == "nns" and os.getenv("USER") != "root":
pytest.fail("NNS unittests require root permissions.")
ipaddr = request.config.option.ssh_ip
username = request.config.option.ssh_user
password = request.config.option.ssh_pass
return ipaddr, username, password, request.param
@pytest.fixture
def cli_obj(request, credentials):
request.config.login = credentials[0]
obj = globals()["create_{0}".format(credentials[3])](request, credentials[0], credentials[1])
request.addfinalizer(obj.close)
obj.login(credentials[1], credentials[2], timeout=5)
return obj
@pytest.fixture
def request_object(request, credentials):
obj = globals()["create_{0}".format(credentials[3])](request, credentials[0], credentials[1])
return obj
@pytest.mark.unittests
class TestSSH(object):
"""CLISSH unittests.
"""
def test_login_true(self, credentials, request_object):
"""Verify login/logout.
"""
obj = request_object
obj.login(credentials[1], credentials[2], timeout=5)
obj.close()
def test_multiple_login_logout(self, credentials, request_object):
"""Verify login after logout multiple times.
"""
for i in range(5):
request_object.login(credentials[1], credentials[2], timeout=5)
request_object.close()
@pytest.mark.skipif("'telnet' not in config.option.cli_api", reason="Skip telnet testcase.")
def test_enter_exit_mode(self, cli_obj, credentials):
"""Verify enter/exit mode.
"""
message = "Telnet specific test case"
if isinstance(cli_obj, clinns.CLISSHNetNS):
pytest.skip(message)
if isinstance(cli_obj, clissh.CLISSH):
pytest.skip(message)
assert cli_obj.enter_mode(cmd="python", new_prompt=">>>") == ">>>"
out, err, _ = cli_obj.exec_command("print 'O' + 'K'")
assert "OK" in out
assert credentials[1] in cli_obj.exit_mode(exit_cmd="exit()")
@pytest.mark.skipif(True, reason="Skip this test because user doesn't have root permission")
def test_sudo_shell_command_ssh(self, cli_obj, credentials):
"""Verify sudo mode for ssh.
"""
message = "SSH specific test case"
if isinstance(cli_obj, clinns.CLISSHNetNS):
pytest.skip(message)
if isinstance(cli_obj, clitelnet.TelnetCMD):
pytest.skip(message)
cli_obj.open_shell()
# Clear shell output
time.sleep(0.5)
cli_obj.shell_read()
cli_obj.password = credentials[2]
# cmd = "env | $(which grep) TTY"
cmd = "stty -a"
data, ret_code = cli_obj.shell_command(cmd, timeout=5, sudo=True)
assert ret_code == "0"
assert data
@pytest.mark.skipif("'telnet' not in config.option.cli_api", reason="Skip telnet testcase.")
def test_sudo_shell_command_telnet(self, cli_obj, credentials):
"""Verify sudo mode for telnet.
"""
message = "Telnet specific test case"
if isinstance(cli_obj, clinns.CLISSHNetNS):
pytest.skip(message)
if isinstance(cli_obj, clissh.CLISSH):
pytest.skip(message)
cli_obj.password = credentials[2]
cmd = "ls"
data, ret_code = cli_obj.shell_command(cmd, timeout=5, sudo=True)
def test_login_false_username_ssh(self, credentials):
"""Verify AuthenticationException in case Incorrect username for ssh object.
"""
ssh_conn = clissh.CLISSH(credentials[0])
with pytest.raises(paramiko.AuthenticationException):
ssh_conn = clissh.CLISSH(credentials[0])
ssh_conn.login(ssh_conn.randstr(30), credentials[2], timeout=5)
@pytest.mark.skipif("'telnet' not in config.option.cli_api", reason="Skip telnet testcase.")
def test_login_false_username_telnet(self, credentials):
"""Verify AuthenticationException in case Incorrect username for telnet object.
"""
telnet_conn = clitelnet.TelnetCMD(credentials[0])
with pytest.raises(CLIException):
telnet_conn = clitelnet.TelnetCMD(credentials[0])
telnet_conn.login(telnet_conn.randstr(30), credentials[2], timeout=5)
def test_login_false_userpass_ssh(self, credentials):
"""Verify AuthenticationException in case Incorrect password for ssh object.
"""
ssh_conn = clissh.CLISSH(credentials[0])
with pytest.raises(paramiko.AuthenticationException):
ssh_conn = clissh.CLISSH(credentials[0])
ssh_conn.login(credentials[1], ssh_conn.randstr(30), timeout=5)
@pytest.mark.skipif("'telnet' not in config.option.cli_api", reason="Skip telnet testcase.")
def test_login_false_userpass_telnet(self, credentials):
"""Verify AuthenticationException in case Incorrect password for telnet object.
"""
telnet_conn = clitelnet.TelnetCMD(credentials[0])
with pytest.raises(CLIException):
telnet_conn = clitelnet.TelnetCMD(credentials[0])
telnet_conn.login(credentials[1], telnet_conn.randstr(30), timeout=5)
# Negative tests for nns module isn't implemented, because nns module always in 'login' mode
def test_shell_command_1(self, cli_obj):
"""Non interactive shell command. No prompt is defined.
"""
cli_obj.open_shell()
# Clear shell output
time.sleep(0.5)
cli_obj.shell_read()
# cmd = "env | $(which grep) TTY"
cmd = "stty -a"
data, ret_code = cli_obj.shell_command(cmd, timeout=5, sudo=False)
assert ret_code == "0"
assert data
def test_shell_command_2(self, cli_obj):
"""Non interactive shell command. Read prompt and set prompt.
"""
if isinstance(cli_obj, clinns.CLISSHNetNS):
pytest.skip("clinns objects don't have login procedure")
data = cli_obj.open_shell()
# Last line in login greater has to be prompt.
# Wait 3 seconds for it.
# data = cli_obj.shell_read(3)
# Read prompt
prompt = data.split("\n")[-1]
# Set prompt to ssh object
assert prompt
cli_obj.prompt = prompt
# Execute command with ret_code=False
cmd = "stty -a"
data, ret_code = cli_obj.shell_command(cmd, timeout=5, ret_code=False)
# Return code isn't read
assert ret_code is None
assert data
# Check return code manually
cmd = "echo ENV_RET_CODE=$?"
data, _ = cli_obj.shell_command(cmd, timeout=5, ret_code=False)
assert "ENV_RET_CODE=0" in data
def test_shell_command_3(self, cli_obj):
"""Non interactive shell command. Non 0 exit code.
"""
cli_obj.open_shell()
# Execute command that has to exit with non 0 exit code
cmd = "test ! -d /"
data, ret_code = cli_obj.shell_command(cmd, timeout=5, expected_rc=1)
# Return code isn't read
assert ret_code == "1"
def test_put_file(self, cli_obj, credentials):
"""Copying file to remote host.
"""
if isinstance(cli_obj, clitelnet.TelnetCMD):
pytest.xfail("put_file in not supported by clitelnet objects")
# Test file is test module itself
src = os.path.abspath(__file__)
dst = "/tmp/testfile_for_taf_clissh_put_file_method_unittest_{0}".format(credentials[3])
# Get size of test file in bytes.
fsize = os.path.getsize(src)
# Remove testfile on remote host
rm_command = "rm {0}".format(dst)
_out, _err, _ = cli_obj.exec_command(rm_command, timeout=3)
# Verify that test file doesn't exist on remote host
command = "ls {0}".format(dst)
_out, _err, _ = cli_obj.exec_command(command, timeout=3)
assert "file_method_unittest" not in _out
# Copying file to remote host, and verify that it exists
cli_obj.put_file(src, dst)
_out, _err, _ = cli_obj.exec_command(command, timeout=3)
assert "file_method_unittest" in _out
# Verify file size
command = "wc -c {0}".format(dst)
_out, _err, _ = cli_obj.exec_command(command, timeout=3)
r_fsize = _out.split(" ")[0]
assert str(fsize) == r_fsize
# Remove testfile on remote host
_out, _err, _ = cli_obj.exec_command(rm_command, timeout=3)
def test_get_file(self, tmpdir, cli_obj, credentials):
"""Copying file to remote host.
"""
if isinstance(cli_obj, clitelnet.TelnetCMD):
pytest.skip("get_file in not supported by clitelnet objects")
pid = os.getpid()
remote_file = "/tmp/testfile_for_taf_clissh_get_file_method_unittest_{0}_{1}_remote".format(credentials[3], pid)
local_file = tmpdir.join("testfile_for_taf_clissh_get_file_method_unittest_{0}_{1}_local".format(credentials[3], pid))
# Remove local file is exists
try:
local_file.remove()
except EnvironmentError:
pass
assert not local_file.exists()
# Create testfile on remote host
cli_obj.open_shell()
command = "echo Some test data > {0}".format(remote_file)
_rc, _out = cli_obj.shell_command(command, timeout=3)
# Verify that test file exists on remote host
command = "ls {0}".format(remote_file)
time.sleep(0.3)
_out, _err, _ = cli_obj.exec_command(command, timeout=3)
assert "file_method_unittest" in _out
# Copying file to remote host, and verify that it exists
cli_obj.get_file(remote_file, str(local_file))
assert local_file.exists()
# Verify file size. (text in echo command above has to create file of 15 bytes size.)
l_fsize = local_file.size()
assert l_fsize == 15
# Remove testfile on remote host
rm_command = "rm {0}".format(remote_file)
_out, _err, _ = cli_obj.exec_command(rm_command, timeout=3)
def test_interactive_command_1(self, cli_obj):
"""Interactive shell command with str actions.
"""
cli_obj.open_shell()
# Execute command
cmd = "python3"
# Add interactive commands
alternatives = []
# First check some host
alternatives.append((">>>", "a", False, True))
# Second - exit
alternatives.append(("is not defined", "exit()", False, True))
data, ret_code = cli_obj.shell_command(cmd, alternatives=alternatives, timeout=5)
# Verify output
assert ret_code == "0"
# Verify that our commands are in output
assert [s for s in data.split("\n") if ">>> a" in s]
assert [s for s in data.split("\n") if ">>> exit()" in s]
def test_interactive_command_2(self, cli_obj):
"""Interactive shell command with func action.
"""
flag = []
def append_flag():
"""Append mutable object to verify that action is called and called only once.
"""
flag.append(1)
cli_obj.open_shell()
# Execute command
# cmd = "ping -c7 127.0.0.1"
cmd = "python3"
# Add interactive commands
alternatives = []
# Call action on 3th ICMP request
alternatives.append((">>>", "import time; print('1\\n2\\nabcd\\n'); time.sleep(2)", False, True))
alternatives.append(("abcd", append_flag, False, True))
alternatives.append((">>>", "exit()", False, True))
data, ret_code = cli_obj.shell_command(cmd, alternatives=alternatives, timeout=5)
# Verify output
assert ret_code == "0"
assert flag
def test_send_command(self, cli_obj):
"""Send command without waiting exit.
"""
if isinstance(cli_obj, clinns.CLISSHNetNS):
pytest.skip("For clinns objects must be created child object first, then shell_read() can be used")
cli_obj.open_shell()
# Clear shell buffer
time.sleep(1)
cli_obj.shell_read()
# Execute command with ret_code=False
cmd = "ping -c3 127.0.0.1"
cli_obj.send_command(cmd)
# Wait untill command is executed
time.sleep(5)
# Verify output
out = cli_obj.shell_read()
# original was icmp_req=3 which is not in the output
# the standard ping output on Linux is icmp_seq=3 so use re to be safe
assert re.search(r"icmp_[rs]eq=3", out)
assert "rtt min/avg/max/mdev" in out
def test_cleared_shell_buffer(self, cli_obj):
"""Cleared buffer after open_shell().
"""
if isinstance(cli_obj, clinns.CLISSHNetNS):
pytest.skip("For clinns objects open_shell() is not implemented")
cli_obj.open_shell()
# Execute command with ret_code=False
cmd = "ping -c3 127.0.0.1"
cli_obj.send_command(cmd)
# Wait untill command is executed
time.sleep(5)
# Verify output
out = cli_obj.shell_read()
# original was icmp_req=3 which is not in the output
# the standard ping output on Linux is icmp_seq=3 so use re to be safe
assert re.search(r"icmp_[rs]eq=3", out)
assert "rtt min/avg/max/mdev" in out
assert "Last login:" not in out
def test_exec_command_timeout_telnet(self, cli_obj):
"""Verify timeout for exec_command.
"""
if isinstance(cli_obj, clinns.CLISSHNetNS) or isinstance(cli_obj, clissh.CLISSH):
pytest.skip("CLISSHException raises only for clitelnet objects")
# The following ping command requires 10s to execute.
cmd = "ping -i1 -c10 127.0.0.1"
# Set timeout to 1s
with pytest.raises(CLIException):
cli_obj.exec_command(cmd, timeout=1)
def test_exec_command_timeout_ssh(self, cli_obj):
"""Verify timeout for exec_command.
"""
if isinstance(cli_obj, clinns.CLISSHNetNS) or isinstance(cli_obj, clitelnet.TelnetCMD):
pytest.skip("CLISSHException raises only for clitelnet and clinns objects")
# The following ping command requires 10s to execute.
cmd = "ping -i1 -c10 127.0.0.1"
# Set timeout to 1s
with pytest.raises(socket.timeout):
cli_obj.exec_command(cmd, timeout=0.5)
def test_shell_command_timeout(self, cli_obj):
"""Verify timeout for shell_command.
"""
cli_obj.open_shell()
# The following ping command requires 5s to execute.
cmd = "ping -i1 -c5 127.0.0.1"
# Set timeout to 1s
with pytest.raises(CLIException):
cli_obj.shell_command(cmd, timeout=1)
def test_quiet_1(self, cli_obj):
"""Verify raising an exception on return code != 0.
"""
cli_obj.open_shell()
# The command has to return exit code 2.
cmd = "ping -l"
with pytest.raises(CLIException):
cli_obj.shell_command(cmd)
def test_quiet_2(self, cli_obj):
"""Check expected_rc parameter.
"""
cli_obj.open_shell()
# The command has to return exit code 2.
cmd = "ping -l"
cli_obj.shell_command(cmd, expected_rc="2")
def test_quiet_3(self, cli_obj, monkeypatch):
"""Verify an exception isn't raised on return code != 0 and default quiet option.
"""
cli_obj.open_shell()
# The command has to return exit code 2.
cmd = "ping -l"
monkeypatch.setattr(cli_obj, "quiet", True)
out, rc = cli_obj.shell_command(cmd)
assert rc == "2"
def test_alter_in_command(self, cli_obj):
"""Verify if prompt present in command it doesn't influence on finding prompt in output data.
"""
cli_obj.open_shell()
cmd = "echo some_test_data"
# Add interactive commands
alternatives = []
# Call action on 3th ICMP request
alternatives.append(("some_test_data", None, True, False))
data, ret_code = cli_obj.shell_command(cmd, alternatives=alternatives, timeout=5)
# Verify output
assert ret_code == "0"
assert len(data.split("\n")) == 2
@pytest.mark.skipif(True, reason="Stupid fails intermittently due to incomplete reads")
def test_send_command_continuous_output(self, cli_obj):
"""Send command without waiting exit and read continuous output.
"""
if isinstance(cli_obj, clinns.CLISSHNetNS):
pytest.xfail("For clinns objects must be created child object first, then shell_read() can be used")
if isinstance(cli_obj, clitelnet.TelnetCMD):
pytest.xfail("For clitelnet objects commands with continuous output must be launched in batch mode")
cli_obj.open_shell()
# Clear shell buffer
time.sleep(1)
cli_obj.shell_read()
# Execute command with ret_code=False
cmd = "top -d1"
cli_obj.send_command(cmd)
# Wait some time untill command is running
time.sleep(2)
# Verify output
out1 = cli_obj.shell_read()
# Verify that output contains top headers
# pytest bug with % in compound assert so split them
# https://bitbucket.org/hpk42/pytest/issue/604/valueerror-unsupported-format-character-in
# the column headers are not re-output because of curses
# only the load average header is re-written for each update
assert "%" "Cpu" in out1
# this fails intermittently, possibly increase the sleep or implement a better
# select poll or read loop. But we don't care right now
assert "users," in out1
assert "load average" in out1
# Save time from top output
regexp = re.compile(r"top - \d\d:\d\d:\d\d")
time1 = regexp.search(out1)
assert time1 is not None
time1 = time1.group()
time.sleep(2)
out2 = cli_obj.shell_read()
open("/tmp/out2", "w").write(out2)
assert "%" "Cpu" in out2
assert "users," in out2
assert "load average" in out2
# But out2 should contain differ data
time2 = regexp.search(out2)
assert time2 is not None
time2 = time2.group()
assert time1 != time2
# Send Ctrl + C
cli_obj.send_command(cli_obj.Raw("\x03"))
out3 = cli_obj.shell_read()
# Verify that there is no top output
assert "%" "Cpu" not in out3
assert "users," not in out3
assert "load average" not in out3
# Verify that there is no top output again
time.sleep(1)
out4 = cli_obj.shell_read()
assert "%" "Cpu" not in out4
assert "users," not in out4
assert "load average" not in out4
@pytest.mark.unittests
class TestCLISSH(object):
EMPTY_PASSWORD_KEY = """\
-----BEGIN RSA PRIVATE KEY-----
MIIEpgIBAAKCAQEAybpzWXae7rYCORumvBc6f+J77fhZ/WU2fiqLgv62DojfWFqY
92U0Bo8NtynU4NcVwQBrNCCpinMD3JdDcLSXsN70ON5z5FLm1Ms4gvpICei7TegC
FVTEMsa9gfiMygDAOAapLlsZP6v1F/r/zQtsV9Nqm5pTlZ5gF6e/FmlQbg/sF52K
A3sB762eBKfwq9p5/l2XfAELY4ypvGaAS+alVStuop3hhax5D6RUy1hG7IsMfT1x
tFfwqgKqbO0AahjojakTlKZ+VBrGQYb9SUWSEOTN/EdU2wYDK9u08ilSCYW1HbN7
rV4yX4ZZXZBuddll8DRVQIs5fZP1xKQBKfiSZQIDAQABAoIBAQCaIPwrGafbKXNP
YOInCfRna4tWyg8vvVpCUY1gm+5L8qX7ItWHCGsUq85F6Q8+bvevC/vcyyvenXwQ
2f3sKf9QYzjkDosro2+8nDzkTggmkgwyPRcCZ060oQaAPICNgr9azzQKOA51iJPu
K5eweY7hF6Z3lxVP1r8Cs+cbX4HVZLbmva+98476zw9MD+XEd2qkjgldLAmVCsiR
E3H862GU0yk+mReSs0Qz8OYjyWAGXPN9SmMPIv4qZg4qVLw6y17pN+A4aMmL1ThK
h9jPoAsXL+5lpi3T1rHUes1ene9hQLyv46B6TKTiTRPnP2aUeJF7xXS4aZSEVy23
0zTphoatAoGBAPMosVDw+iT8bMzmG5cFHEA2CAd/EdXKkPEvWUCf4uFcqzslImUW
Vq0asuFyOHBCHsVxsA5BmYRsCcQdn8jUuTHM3GAUwf29rmhW584W5Jznv1bByNMQ
Og35zXNYm+CgxWOaD8ayXtAZ+3GgjKA+JbpsIh1wqwrv/q4atiha1CTvAoGBANRh
pnCBFgLt6l5TsnmqB4yUiSC+SIluuz5csoQnCSgb4DBKe6yjDzqXTStHQOBv5l7o
wcuXzziX3rXZ5ym14aU20Dix7H+fjjTDCOT/A4r44PhZBKcC53RnndJ3a283BrAK
s2p4gn0iGPtG24UG9UokDD56vDChED3Bc8a3ngXrAoGBAOJXIpbBeVcsUOp513zA
GQf8Q4UW1zc2k6yt8lqhecNlS06GxnlqTcxcad5JQBfetF398W+TyJ7nIkAXg0Ci
IrEkjI4zRFA5XDtrieLglHUpk4XiZFlzZVbVDFUuSgrSHGsWYVEHgBId3VxrofsX
Xm8lcKwO0Ggh9eOCocT2pzqpAoGBAJFfIekiQqnQpjrYuXKT0sUEKvTRqp7/v4UJ
OFxCx/6/Te5gHVVm65akV/sGs76seZh/Y59zEzFeqt/4/kTLrV9ELLSR/RrCYTl2
QpFUiN1IS91SOWAEGd/QyPN2MICYvqgjOvnm8RKsE0N0FfBxeda84/CkXEpBBPfw
gcoEh1LvAoGBAN0Tv5gSXLkEaUfL6BTeOKr+PxKrdmUfLHSKTTT0MlA/oAig9FmZ
dVcroxqKsqWhQmY4EgXH20IOyNdRX4d8oMyTEA1Xyorq78DVfPQAhE2y6wO0Z8K/
mAvF7/+hRzNa4l25lailJFHR7VgwLPo24xNlWgyjn9T5JNnor8TIimoy
-----END RSA PRIVATE KEY-----
"""
def test_invalid_pkey_raises_SSHException(self, credentials):
ipaddr, username, password, param = credentials
if param == "ssh":
with pytest.raises(paramiko.SSHException):
ssh_conn = clissh.CLISSH(ipaddr, username=username, pkey="")
assert ssh_conn.pkey
def test_pkey(self, credentials):
ipaddr, username, password, param = credentials
if param == "ssh":
ssh_conn = clissh.CLISSH(ipaddr, username=username, pkey=self.EMPTY_PASSWORD_KEY)
assert ssh_conn.pkey
assert ssh_conn.pkey.get_bits() == 2048
def test_probe_port_1(self):
"""Test probe_port function.
"""
assert clissh.probe_port('127.0.0.1', 22, MagicMock()) is True
def test_probe_port_2(self):
"""Test probe_port function negative.
"""
assert clissh.probe_port('8.8.8.8', 8081, MagicMock()) is False
|
|
#!/usr/bin/env python3
"""
ingest.py: source client for Voctomix.
Copyright: 2015-2020 Carl F. Karsten <carl@nextdayvideo.com>,
Ryan Verner <ryan@nextdayvideo.com.au>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
Features:
Retrieves audio and video-caps config from core.
and client config
Uses core's clock.
Mix and match audio and video sources muxed into one stream.
Can display video locally, including frame count and fps.
Defaults to test audio and video sent to local core.
"""
import argparse
from pprint import pprint
import socket
import sys
import gi
gi.require_version('Gst', '1.0')
gi.require_version('GstNet', '1.0')
from gi.repository import Gst, GstNet, GObject, GLib
from lib.connection import Connection
def mk_video_src(args, videocaps):
# make video source part of pipeline
# args come from command line
# videocaps come from voctocore
if args.video_source == 'dv':
video_src = """
dv1394src name=videosrc {attribs} !
dvdemux name=demux !
queue max-size-time=4000000000 !
dvdec !
"""
elif args.video_source == 'hdv':
video_src = """
hdv1394src {attribs} name=videosrc !
tsdemux !
queue max-size-time=4000000000 !
decodebin !
"""
elif args.video_source == 'hdmi2usb':
# https://hdmi2usb.tv
# Note: this code works with 720p
video_src = """
v4l2src {attribs} name=videosrc !
queue max-size-time=4000000000 !
image/jpeg,width=1280,height=720 !
jpegdec !
"""
elif args.video_source == 'ximage':
# startx=0 starty=0 endx=1919 endy=1079 !
video_src = """
ximagesrc {attribs} name=videosrc
use-damage=false !
"""
elif args.video_source == 'blackmagic':
video_src = """
decklinkvideosrc {attribs} name=videosrc !
queue max-size-time=4000000000 max-size-bytes=209715200 !
"""
# yadif !
# deinterlace
elif args.video_source == 'png':
video_src = """
multifilesrc {attribs}
caps="image/png" !
pngdec !
"""
elif args.video_source == 'file':
video_src = """
multifilesrc {attribs} !
decodebin name=src
src. !
queue !
"""
elif args.video_source == 'test':
video_src = """
videotestsrc name=videosrc {attribs} !
"""
# things to render as text ontop of test video
video_src += """
clockoverlay
text="Source: {hostname}\nCaps: {videocaps}\nAttribs: {attribs}\n"
halignment=left line-alignment=left !
""".format(hostname=socket.gethostname(),
videocaps=videocaps,
attribs=args.video_attribs)
elif args.video_source == 'spacescope':
# Stereo visualizer
# pair up with test beep for a handy AV sync test.
video_src = """
audio_tee. ! queue !
spacescope shader=none style=lines {attribs} !
"""
if args.monitor:
if args.debug:
videosink="fpsdisplaysink"
else:
videosink="autovideosink"
video_src += """
tee name=t ! queue !
videoconvert ! {videosink} sync=false
t. ! queue !
""".format(videosink=videosink)
if args.video_elements:
video_src += args.video_elements + " !\n"
video_src += videocaps + " !\n"
video_src = video_src.format(attribs=args.video_attribs)
return video_src
def mk_audio_src(args, audiocaps):
d = {
'attribs': args.audio_attribs,
'base_audio_attribs':
'provide-clock=false slave-method=re-timestamp',
'audiocaps': audiocaps,
}
if args.audio_source in ['dv', 'hdv']:
# this only works if video is from DV also.
# or some gst source that gets demux ed
audio_src = """
demux.audio !
queue !
"""
elif args.audio_source == 'file':
# this only works if video is from ...
# some gst source that gets demux ed, I guess.
audio_src = """
src. !
queue !
"""
elif args.audio_source == 'pulse':
audio_src = """
pulsesrc {attribs} {base_audio_attribs} name=audiosrc !
queue max-size-time=4000000000 ! audiorate !
"""
elif args.audio_source == 'alsa':
audio_src = """
alsasrc {attribs} {base_audio_attribs} name=audiosrc !
queue max-size-time=4000000000 ! audiorate !
"""
elif args.audio_source == 'blackmagic':
audio_src = """
decklinkaudiosrc name=audiosrc {attribs} !
queue max-size-time=4000000000 !
"""
elif args.audio_source == 'test':
audio_src = """
audiotestsrc wave=ticks freq=330 {attribs} name=audiosrc !
"""
audio_src += """
{audiocaps} !
tee name=audio_tee
audio_tee. ! queue !
"""
audio_src = audio_src.format(**d)
return audio_src
def mk_client(core_ip, port):
client = "tcpclientsink host={host} port={port}".format(
host=core_ip, port=port)
return client
def mk_pipeline(args, server_caps, core_ip):
if args.src:
src = args.src.format(**server_caps)
else:
video_src = mk_video_src(args, server_caps['videocaps'])
audio_src = mk_audio_src(args, server_caps['audiocaps'])
src = """
{video_src}
mux.
{audio_src}
mux.
matroskamux name=mux !
""".format(video_src=video_src, audio_src=audio_src)
client = mk_client(core_ip, args.port)
pipeline = """
{src}
{client}
""".format(src=src, client=client)
# remove blank lines to make it more human readable
while "\n\n" in pipeline:
pipeline = pipeline.replace("\n\n", "\n")
print(pipeline)
if args.debug:
# print something to run in a shell
gst_cmd = "gst-launch-1.0 {}".format(pipeline)
# escape the ! because bash
# asl2: ! is interpreted as a command history metacharacter
gst_cmd = gst_cmd.replace("!", " \! ")
# remove all the \n to make it easy to cut/paste into shell
gst_cmd = gst_cmd.replace("\n", " ")
while " " in gst_cmd:
gst_cmd = gst_cmd.replace(" ", " ")
print("-"*78)
print(gst_cmd)
print("-"*78)
return pipeline
def get_server_conf(core_ip, source_id, args):
# establish a synchronus connection to server
conn = Connection(core_ip)
# fetch config from server
server_config = conn.fetch_config()
# Pull out the configs relevant to this client
server_conf = {
'videocaps': server_config['mix']['videocaps'],
'audiocaps': server_config['mix']['audiocaps']
}
if source_id is not None:
# get conf from server for this source,
d=server_config[source_id]
if args.debug:
pprint(d)
# stomp all over command line values
# this is backwards: command line should override conf file.
for k in d:
if args.debug:
print('--{}="{}"'.format(k,d[k]))
# python argparse converts a-b to a_b, so we will to too.
args.__setattr__(k.replace("-", "_"),d[k])
return server_conf, args
def get_clock(core_ip, core_clock_port=9998):
clock = GstNet.NetClientClock.new(
'voctocore', core_ip, core_clock_port, 0)
print('obtained NetClientClock from host: {ip}:{port}'.format(
ip=core_ip, port=core_clock_port))
print('waiting for NetClientClock to sync...')
clock.wait_for_sync(Gst.CLOCK_TIME_NONE)
print('synced with NetClientClock.')
return clock
def run_pipeline(pipeline, clock, audio_delay=0, video_delay=0):
def on_eos(bus, message):
print('Received EOS-Signal')
sys.exit(0)
def on_error(bus, message):
print('Received Error-Signal')
(error, debug) = message.parse_error()
print('Error-Details: #%u: %s' % (error.code, debug))
sys.exit(2)
print('starting pipeline...')
senderPipeline = Gst.parse_launch(pipeline)
if clock is not None:
senderPipeline.use_clock(clock)
# Delay video/audio if required
NS_TO_MS = 100000
if video_delay > 0:
print('Adjusting video sync: [{} milliseconds]'.format(video_delay))
video_delay = video_delay * NS_TO_MS
videosrc = senderPipeline.get_by_name('videosrc')
videosrc.get_static_pad('src').set_offset(video_delay)
if audio_delay > 0:
print('Adjusting audio sync: [{} milliseconds]'.format(audio_delay))
audio_delay = audio_delay * NS_TO_MS
audiosrc = senderPipeline.get_by_name('audiosrc')
audiosrc.get_static_pad('src').set_offset(audio_delay)
# Binding End-of-Stream-Signal on Source-Pipeline
senderPipeline.bus.add_signal_watch()
senderPipeline.bus.connect("message::eos", on_eos)
senderPipeline.bus.connect("message::error", on_error)
print("playing...")
senderPipeline.set_state(Gst.State.PLAYING)
mainloop = GLib.MainLoop()
try:
mainloop.run()
except KeyboardInterrupt:
print('Terminated via Ctrl-C')
print('Shutting down...')
senderPipeline.set_state(Gst.State.NULL)
print('Done.')
return
def get_args():
parser = argparse.ArgumentParser(
description='''Vocto-ingest Client with Net-time support.
Gst caps are retrieved from the server.
Run without parameters: send test av to localhost:10000
''')
parser.add_argument(
'-v', '--verbose', action='count', default=0,
help="Also print INFO and DEBUG messages.")
parser.add_argument('--source-id', action='store',
help="get config from server using this id.")
parser.add_argument(
'--src', action='store', default='',
help="gst source pipeline")
parser.add_argument(
'--video-source', action='store',
choices=[
'dv', 'hdv', 'udp_h264', 'hdmi2usb', 'blackmagic',
'ximage', 'png', 'file', 'test', 'spacescope'],
default='test',
help="Where to get video from")
parser.add_argument(
'--video-attribs', action='store', default='',
help="misc video attributes for gst")
parser.add_argument(
'--video-delay', action='store',
default=0,
type=int,
help="delay video by this many milliseconds")
parser.add_argument(
'--video-elements', action='store',
default='videoconvert ! videorate ! videoscale',
help="gst video elments ! after src")
parser.add_argument(
'--audio-source', action='store',
choices=['dv', 'hdv', 'file',
'alsa', 'pulse', 'blackmagic', 'test', ],
default='test',
help="Where to get audio from")
parser.add_argument(
'--audio-attribs', action='store',
default='',
help="misc audio attributes for gst")
parser.add_argument(
'--audio-delay', action='store',
default=0,
type=int,
help="delay audio by this many milliseconds")
parser.add_argument(
'--audio-elements', action='store',
default="audioconvert ! audioresample ! audiorate",
help="gst audio elments ! after src")
parser.add_argument(
'-m', '--monitor', action='store_true',
help="local display sink")
parser.add_argument(
'--host', action='store',
default='localhost',
help="hostname of vocto core")
parser.add_argument(
'--port', action='store',
default='10000',
help="port of vocto core")
parser.add_argument(
'--no-clock', action='store_true',
help="Don't use core's clock. (danger)")
parser.add_argument(
'--debug', action='store_true',
help="debugging things, like dump a gst-launch-1.0 command")
args = parser.parse_args()
return args
def main():
GObject.threads_init()
Gst.init([])
args = get_args()
core_ip = socket.gethostbyname(args.host)
server_caps, args = get_server_conf(core_ip, args.source_id, args)
pipeline = mk_pipeline(args, server_caps, core_ip)
if args.no_clock:
clock = None
else:
clock = get_clock(core_ip)
run_pipeline(pipeline, clock, args.audio_delay, args.video_delay)
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.