gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
import numpy as np
import scipy.signal
import cv2
import math
import part0
def blend(lapl_pyr_white, lapl_pyr_black, gauss_pyr_mask):
'''Blend the two laplacian pyramids by weighting them according to
the gaussian mask.
lapl_pyr_white - a laplacian pyramid of one image, as constructed by your
lapl_pyramid function.
lapl_pyr_black - a laplacian pyramid of another image, as constructed by your
lapl_pyramid function.
gauss_pyr_mask - a gaussian pyramid of the mask. Each value is in the range
[0, 1].
The pyramids will have the same number of levels. Furthermore, each layer is
guaranteed to have the same shape.
You should return a laplacian pyramid that is of the same dimensions as the
input pyramids. Every layer should be an alpha-blend of the corresponding layers
of the input pyramids, weighted by the gaussian mask.
Pixels where gauss_pyr_mask == 1 should be taken completely from the white image.
Pixels where gauss_pyr_mask == 0 should be taken completely from the black image.
'''
blended_pyr = []
# Insert your code here ------------------------------------------------------
# ----------------------------------------------------------------------------
return blended_pyr
def collapse(lapl_pyr):
'''Reconstruct the image based on its laplacian pyramid.
lapl_pyr - a laplacian pyramid, as constructed by the lapl_pyramid function, or
returned by the blend function.
output - an image of the same shape as the base layer of the pyramid and dtype
float.
Note: sometimes expand will return an image that is larger than the next layer.
In this case, you should crop the expanded image down to the size of the next
layer.
For example, expanding a layer of size 3x4 will result in an image of size 6x8.
If the next layer is of size 5x7, crop the expanded image to size 5x7.
'''
output = None
# Insert your code here ------------------------------------------------------
# ----------------------------------------------------------------------------
return output
def test():
'''This script will perform a unit test on your function, and provide useful
output.
'''
lapl_pyr11 =[np.array([[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.]]),
np.array([[ 0., 0.],
[ 0., 0.]])]
lapl_pyr12 =[np.array([[ 149.77, 122.46, 121.66, 178.69],
[ 138.08, 107.74, 106.84, 170.21],
[ 149.77, 122.46, 121.66, 178.69]]),
np.array([[ 124.95, 169.58],
[ 124.95, 169.57]])]
lapl_pyr21 =[np.array([[ 149. , 118.4, 99.2, 94.3, 99.2, 118.4, 149. ],
[ 137.2, 103.3, 81.9, 76.5, 81.9, 103.3, 137.2],
[ 148.1, 117.4, 97.9, 93.1, 97.9, 117.4, 148.1],
[ -63.1, -81.3, -92.8, -95.6, -92.8, -81.3, -63.1],
[ -18.5, -23.8, -27.2, -28. , -27.2, -23.8, -18.5]]),
np.array([[ 70.4, 107.1, 104.5, 82.3],
[ 76.7, 115.4, 113.1, 87.3],
[ -23.3, -29.4, -31. , -16.3]]),
np.array([[ 67.7, 100.3],
[ 34. , 50.4]])]
lapl_pyr22 =[np.array([[ -5. , -25.2, -56.4, 149.8, 110.3, 116.2, 144.8],
[ -6.5, -32.5, -72.6, 119.5, 68.6, 76.2, 113. ],
[ -7.2, -36. , -80.3, 105.2, 48.9, 57.2, 98. ],
[ -6.5, -32.5, -72.6, 119.5, 68.6, 76.2, 113. ],
[ -5. , -25.2, -56.4, 149.8, 110.3, 116.2, 144.8]]),
np.array([[ -20.9, 4.8, 102.6, 84.1],
[ -23.2, 22.3, 167.9, 133.1],
[ -20.9, 4.8, 102.6, 84.1]]),
np.array([[ 17.6, 90.8],
[ 17.6, 90.8]])]
mask_pyr1 =[np.array([[ 0., 0., 1., 1.],
[ 0., 0., 1., 1.],
[ 0., 0., 1., 1.]]),
np.array([[ 0.03, 0.46],
[ 0.03, 0.46]])]
mask_pyr2 = [np.array([[ 0., 0., 0., 0., 1., 1., 1.],
[ 0., 0., 0., 0., 1., 1., 1.],
[ 0., 0., 0., 0., 1., 1., 1.],
[ 0., 0., 0., 0., 1., 1., 1.],
[ 0., 0., 0., 0., 1., 1., 1.]]),
np.array([[ 0. , 0. , 0.5, 0.5],
[ 0. , 0. , 0.7, 0.7],
[ 0. , 0. , 0.5, 0.5]]),
np.array([[ 0. , 0.3],
[ 0. , 0.3]])]
out_pyr1 =[np.array([[ 149.77, 122.46, 0. , 0. ],
[ 138.08, 107.74, 0. , 0. ],
[ 149.77, 122.46, 0. , 0. ]]),
np.array([[ 120.58, 92.42],
[ 120.58, 92.42]])]
out_pyr2 = [np.array([[ -5. , -25.2, -56.4, 149.8, 99.2, 118.4, 149. ],
[ -6.5, -32.5, -72.6, 119.5, 81.9, 103.3, 137.2],
[ -7.2, -36. , -80.3, 105.2, 97.9, 117.4, 148.1],
[ -6.5, -32.5, -72.6, 119.5, -92.8, -81.3, -63.1],
[ -5. , -25.2, -56.4, 149.8, -27.2, -23.8, -18.5]]),
np.array([[ -20.9, 4.8, 103.5, 83.2],
[ -23.2, 22.3, 129.5, 101. ],
[ -20.9, 4.8, 35.8, 33.9]]),
np.array([[ 17.6, 93.6],
[ 17.6, 78.7]])]
outimg1 = np.array([[ 244.91, 218.31, 77.39, 41.59],
[ 243.79, 214.24, 85.99, 46.21],
[ 244.91, 218.31, 77.39, 41.59]])
outimg2 = np.array([[ 0.1, 0.1, -0.1, 253.7, 241.3, 254. , 256. ],
[ -0.3, -0.5, -2.7, 244.4, 250.3, 263.3, 263.2],
[ -0.6, -1.4, -6. , 233.4, 267.8, 278.2, 274.6],
[ -0.9, -2.1, -8.7, 224.1, 42.2, 46.1, 37.3],
[ -1. , -2.4, -9.6, 221.2, 61.5, 59.5, 47.5]])
if __name__ == "__main__":
print 'Evaluating blend.'
for left_pyr, right_pyr, mask_pyr, out_pyr in ((lapl_pyr11, lapl_pyr12, mask_pyr1, out_pyr1),
(lapl_pyr21, lapl_pyr22, mask_pyr2, out_pyr2)):
usr_out = blend(left_pyr, right_pyr, mask_pyr)
if not type(usr_out) == type(out_pyr):
if __name__ == "__main__":
print "Error- output layer has type {}. Expected type is {}.".format(
type(usr_out), type(out_pyr))
return False
if not len(usr_out) == len(out_pyr):
if __name__ == "__main__":
print "Error- blend out has len {}. Expected len is {}.".format(
len(usr_out), len(out_pyr))
return False
for usr_layer, true_layer, left_layer, right_layer, mask_layer in zip(usr_out, out_pyr,
left_pyr, right_pyr, mask_pyr):
if not type(usr_layer) == type(true_layer):
if __name__ == "__main__":
print "Error- blend out has type {}. Expected type is {}.".format(
type(usr_layer), type(true_layer))
return False
if not usr_layer.shape == true_layer.shape:
if __name__ == "__main__":
print "Error- blend output layer has shape {}. Expected shape is {}.".format(
usr_layer.shape, true_layer.shape)
return False
if not usr_layer.dtype == true_layer.dtype:
if __name__ == "__main__":
print "Error- blend output layer has dtype {}. Expected dtype is {}.".format(
usr_layer.dtype, true_layer.dtype)
return False
if not np.all(np.abs(usr_layer - true_layer) < 1):
if __name__ == "__main__":
print "Error- blend output layer has value:\n{}\nExpected value:\n{}\nInput left:\n{}\nInput right:\n{}\nInput mask:\n{}".format(
usr_layer, true_layer, left_layer, right_layer, mask_layer)
return False
if __name__ == "__main__":
print "blend passed.\n"
print "Evaluating collapse."
for pyr, img in ((out_pyr1, outimg1),(out_pyr2, outimg2)):
if __name__ == "__main__":
print "input:\n{}".format(pyr)
usr_out = collapse(pyr)
if not type(usr_out) == type(img):
if __name__ == "__main__":
print "Error- collapse out has type {}. Expected type is {}.".format(
type(usr_out), type(img))
return False
if not usr_out.shape == img.shape:
if __name__ == "__main__":
print "Error- collapse out has shape {}. Expected shape is {}.".format(
usr_out.shape, img.shape)
return False
if not usr_out.dtype == img.dtype:
if __name__ == "__main__":
print "Error- collapse out has dtype {}. Expected dtype is {}.".format(
usr_out.dtype, img.dtype)
return False
if not np.all(np.abs(usr_out - img) < 1):
if __name__ == "__main__":
print "Error- collapse out has value:\n{}\nExpected value:\n{}".format(
usr_out, img)
return False
if __name__ == "__main__":
print "collapse passed."
if __name__ == "__main__":
print "All unit tests successful."
return True
if __name__ == "__main__":
print "Performing unit tests. Your functions will be accepted if your result is\
within 2 of the correct output."
np.set_printoptions(precision=1, suppress=True)
test()
| |
import numpy as np
from scipy.fftpack import fft, ifft, fftshift
__all__ = ['cwt', 'ccwt', 'icwt', 'SDG', 'Morlet']
class MotherWavelet(object):
"""Class for MotherWavelets.
Contains methods related to mother wavelets. Also used to ensure that new
mother wavelet objects contain the minimum requirements to be used in the
cwt related functions.
"""
@staticmethod
def get_coefs(self):
"""Raise error if method for calculating mother wavelet coefficients is
missing!
"""
raise NotImplementedError('get_coefs needs to be implemented for the mother wavelet')
@staticmethod
def get_coi_coef(sampf):
"""Raise error if Cone of Influence coefficient is not set in
subclass wavelet. To follow the convention in the literature, please define your
COI coef as a function of period, not scale - this will ensure
compatibility with the scalogram method.
"""
raise NotImplementedError('coi_coef needs to be implemented in subclass wavelet')
#add methods for computing cone of influence and mask
def get_coi(self):
"""Compute cone of influence."""
y1 = self.coi_coef * np.arange(0, self.len_signal / 2)
y2 = -self.coi_coef * np.arange(0, self.len_signal / 2) + y1[-1]
coi = np.r_[y1, y2]
self.coi = coi
return coi
def get_mask(self):
"""Get mask for cone of influence.
Sets self.mask as an array of bools for use in np.ma.array('', mask=mask)
"""
mask = np.ones(self.coefs.shape)
masks = self.coi_coef * self.scales
for s in range(0, len(self.scales)):
if (s != 0) and (int(np.ceil(masks[s])) < mask.shape[1]):
mask[s,np.ceil(int(masks[s])):-np.ceil(int(masks[s]))] = 0
self.mask = mask.astype(bool)
return self.mask
class SDG(MotherWavelet):
"""Class for the SDG MotherWavelet (a subclass of MotherWavelet).
SDG(self, len_signal = None, pad_to = None, scales = None, sampf = 1,
normalize = True, fc = 'bandpass')
Parameters
----------
len_signal : int
Length of time series to be decomposed.
pad_to : int
Pad time series to a total length `pad_to` using zero padding (note,
the signal will be zero padded automatically during continuous wavelet
transform if pad_to is set). This is used in the fft function when
performing the convolution of the wavelet and mother wavelet in Fourier
space.
scales : array
Array of scales used to initialize the mother wavelet.
sampf : float
Sample frequency of the time series to be decomposed.
normalize : bool
If True, the normalized version of the mother wavelet will be used (i.e.
the mother wavelet will have unit energy).
fc : string
Characteristic frequency - use the 'bandpass' or 'center' frequency of
the Fourier spectrum of the mother wavelet to relate scale to period
(default is 'bandpass').
Returns
-------
Returns an instance of the MotherWavelet class which is used in the cwt and
icwt functions.
Examples
--------
Create instance of SDG mother wavelet, normalized, using 10 scales and the
center frequency of the Fourier transform as the characteristic frequency.
Then, perform the continuous wavelet transform and plot the scalogram.
# x = numpy.arange(0,2*numpy.pi,numpy.pi/8.)
# data = numpy.sin(x**2)
# scales = numpy.arange(10)
#
# mother_wavelet = SDG(len_signal = len(data), scales = np.arange(10),normalize = True, fc = 'center')
# wavelet = cwt(data, mother_wavelet)
# wave_coefs.scalogram()
Notes
-----
None
References
----------
Addison, P. S., 2002: The Illustrated Wavelet Transform Handbook. Taylor
and Francis Group, New York/London. 353 pp.
"""
def __init__(self,len_signal=None,pad_to=None,scales=None,sampf=1,normalize=True, fc = 'bandpass'):
"""Initilize SDG mother wavelet"""
self.name='second degree of a Gaussian (mexican hat)'
self.sampf = sampf
self.scales = scales
self.len_signal = len_signal
self.normalize = normalize
#set total length of wavelet to account for zero padding
if pad_to is None:
self.len_wavelet = len_signal
else:
self.len_wavelet = pad_to
#set admissibility constant
if normalize:
self.cg = 4 * np.sqrt(np.pi) / 3.
else:
self.cg = np.pi
#define characteristic frequency
if fc is 'bandpass':
self.fc = np.sqrt(5./2.) * self.sampf/(2 * np.pi)
elif fc is 'center':
self.fc = np.sqrt(2.) * self.sampf / (2 * np.pi)
else:
raise CharacteristicFrequencyError("fc = %s not defined"%(fc,))
# coi_coef defined under the assumption that period is used, not scale
self.coi_coef = 2 * np.pi * np.sqrt(2. / 5.) * self.fc # Torrence and
# Compo 1998
# compute coefficients for the dilated mother wavelet
self.coefs = self.get_coefs()
def get_coefs(self):
"""Calculate the coefficients for the SDG mother wavelet"""
# Create array containing values used to evaluate the wavelet function
xi=np.arange(-self.len_wavelet / 2., self.len_wavelet / 2.)
# find mother wavelet coefficients at each scale
xsd = -xi * xi / (self.scales[:,np.newaxis] * self.scales[:,np.newaxis])
if self.normalize is True:
c=2. / (np.sqrt(3) * np.power(np.pi, 0.25))
else:
c=1.
mw = c * (1. + xsd) * np.exp(xsd / 2.)
self.coefs = mw
return mw
class Morlet(MotherWavelet):
"""Class for the Morlet MotherWavelet (a subclass of MotherWavelet).
Morlet(self, len_signal = None, pad_to = None, scales = None,
sampf = 1, f0 = 0.849)
Parameters
----------
len_signal : int
Length of time series to be decomposed.
pad_to : int
Pad time series to a total length `pad_to` using zero padding (note,
the signal will be zero padded automatically during continuous wavelet
transform if pad_to is set). This is used in the fft function when
performing the convolution of the wavelet and mother wavelet in Fourier
space.
scales : array
Array of scales used to initialize the mother wavelet.
sampf : float
Sample frequency of the time series to be decomposed.
f0 : float
Central frequency of the Morlet mother wavelet. The Fourier spectrum of
the Morlet wavelet appears as a Gaussian centered on f0. f0 defaults
to a value of 0.849 (the angular frequency would be ~5.336).
Returns
-------
Returns an instance of the MotherWavelet class which is used in the cwt
and icwt functions.
Examples
--------
Create instance of Morlet mother wavelet using 10 scales, perform the
continuous wavelet transform, and plot the resulting scalogram.
# x = numpy.arange(0,2*numpy.pi,numpy.pi/8.)
# data = numpy.sin(x**2)
# scales = numpy.arange(10)
#
# mother_wavelet = Morlet(len_signal=len(data), scales = np.arange(10))
# wavelet = cwt(data, mother_wavelet)
# wave_coefs.scalogram()
Notes
-----
* Morlet wavelet is defined as having unit energy, so the `normalize` flag
will always be set to True.
* The Morlet wavelet will always use f0 as it's characteristic frequency, so
fc is set as f0.
References
----------
Addison, P. S., 2002: The Illustrated Wavelet Transform Handbook. Taylor
and Francis Group, New York/London. 353 pp.
"""
def __init__(self, len_signal=None, pad_to=None, scales=None, sampf=1,
normalize=True, f0=0.849):
"""Initilize Morlet mother wavelet."""
from scipy.integrate import trapz
from scipy.integrate import quad, Inf
self.sampf = sampf
self.scales = scales
self.len_signal = len_signal
self.normalize = True
self.name = 'Morlet'
# set total length of wavelet to account for zero padding
if pad_to is None:
self.len_wavelet = len_signal
else:
self.len_wavelet = pad_to
# define characteristic frequency
self.fc = f0
# Cone of influence coefficient
self.coi_coef = 2. * self.sampf / (self.fc + np.sqrt(2. + self.fc**2) *
np.sqrt(2)); #Torrence and Compo 1998 (in code)
# set admissibility constant
# based on the simplified Morlet wavelet energy spectrum
# in Addison (2002), eqn (2.39) - should be ok for f0 >0.84
# FIXED using quad 04/01/2011
#f = np.arange(0.001, 50, 0.001)
#y = 2. * np.sqrt(np.pi) * np.exp(-np.power((2. * np.pi * f -
# 2. * np.pi * self.fc), 2))
#self.cg = trapz(y[1:] / f[1:]) * (f[1]-f[0])
self.cg = quad(lambda x : 2. * np.sqrt(np.pi) * np.exp(-np.power((2. *
np.pi * x - 2. * np.pi * f0), 2)), -Inf, Inf)[0]
# compute coefficients for the dilated mother wavelet
self.coefs = self.get_coefs()
def get_coefs(self):
"""Calculate the coefficients for the Morlet mother wavelet."""
# Create array containing values used to evaluate the wavelet function
xi=np.arange(-self.len_wavelet / 2., self.len_wavelet / 2.)
# find mother wavelet coefficients at each scale
xsd = xi / (self.scales[:,np.newaxis])
mw = np.power(np.pi,-0.25) * \
(np.exp(np.complex(1j) * 2. * np.pi * self.fc * xsd) - \
np.exp(-np.power((2. * np.pi * self.fc), 2) / 2.)) * \
np.exp(-np.power(xsd, 2) / 2.)
self.coefs = mw
return mw
class Wavelet(object):
"""Class for Wavelet object.
The Wavelet object holds the wavelet coefficients as well as information on
how they were obtained.
"""
def __init__(self, wt, wavelet, weighting_function, signal_dtype, deep_copy=True):
"""Initialization of Wavelet object.
Parameters
----------
wt : array
Array of wavelet coefficients.
wavelet : object
Mother wavelet object used in the creation of `wt`.
weighting_function : function
Function used in the creation of `wt`.
signal_dtype : dtype
dtype of signal used in the creation of `wt`.
deep_copy : bool
If true (default), the mother wavelet object used in the creation of
the wavelet object will be fully copied and accessible through
wavelet.motherwavelet; if false, wavelet.motherwavelet will be a
reference to the motherwavelet object (that is, if you change the
mother wavelet object, you will see the changes when accessing the
mother wavelet through the wavelet object - this is NOT good for
tracking how the wavelet transform was computed, but setting
deep_copy to False will save memory).
Returns
-------
Returns an instance of the Wavelet class.
"""
from copy import deepcopy
self.coefs = wt[:,0:wavelet.len_signal]
if wavelet.len_signal != wavelet.len_wavelet:
self._pad_coefs = wt[:,wavelet.len_signal:]
else:
self._pad_coefs = None
if deep_copy:
self.motherwavelet = deepcopy(wavelet)
else:
self.motherwavelet = wavelet
self.weighting_function = weighting_function
self._signal_dtype = signal_dtype
def get_gws(self):
"""Calculate Global Wavelet Spectrum.
References
----------
Torrence, C., and G. P. Compo, 1998: A Practical Guide to Wavelet
Analysis. Bulletin of the American Meteorological Society, 79, 1,
pp. 61-78.
"""
gws = self.get_wavelet_var()
return gws
def get_wes(self):
"""Calculate Wavelet Energy Spectrum.
References
----------
Torrence, C., and G. P. Compo, 1998: A Practical Guide to Wavelet
Analysis. Bulletin of the American Meteorological Society, 79, 1,
pp. 61-78.
"""
from scipy.integrate import trapz
coef = 1. / (self.motherwavelet.fc * self.motherwavelet.cg)
wes = coef * trapz(np.power(np.abs(self.coefs), 2), axis = 1);
return wes
def get_wps(self):
"""Calculate Wavelet Power Spectrum.
References
----------
Torrence, C., and G. P. Compo, 1998: A Practical Guide to Wavelet
Analysis. Bulletin of the American Meteorological Society, 79, 1,
pp. 61-78.
"""
wps = (1./ self.motherwavelet.len_signal) * self.get_wes()
return wps
def get_wavelet_var(self):
"""Calculate Wavelet Variance (a.k.a. the Global Wavelet Spectrum of
Torrence and Compo (1998)).
References
----------
Torrence, C., and G. P. Compo, 1998: A Practical Guide to Wavelet
Analysis. Bulletin of the American Meteorological Society, 79, 1,
pp. 61-78.
"""
coef = self.motherwavelet.cg * self.motherwavelet.fc
wvar = (coef / self.motherwavelet.len_signal) * self.get_wes()
return wvar
def scalogram(self, show_coi=False, show_wps=False, ts=None, time=None,
use_period=True, ylog_base=None, xlog_base=None,
origin='top', figname=None):
""" Scalogram plotting routine.
Creates a simple scalogram, with optional wavelet power spectrum and
time series plots of the transformed signal.
Parameters
----------
show_coi : bool
Set to True to see Cone of Influence
show_wps : bool
Set to True to see the Wavelet Power Spectrum
ts : array
1D array containing time series data used in wavelet transform. If set,
time series will be plotted.
time : array of datetime objects
1D array containing time information
use_period : bool
Set to True to see figures use period instead of scale
ylog_base : float
If a log scale is desired, set `ylog_base` as float. (for log 10, set
ylog_base = 10)
xlog_base : float
If a log scale is desired, set `xlog_base` as float. (for log 10, set
xlog_base = 10) *note that this option is only valid for the wavelet power
spectrum figure.
origin : 'top' or 'bottom'
Set origin of scale axis to top or bottom of figure
Returns
-------
None
Examples
--------
Create instance of SDG mother wavelet, normalized, using 10 scales and the
center frequency of the Fourier transform as the characteristic frequency.
Then, perform the continuous wavelet transform and plot the scalogram.
# x = numpy.arange(0,2*numpy.pi,numpy.pi/8.)
# data = numpy.sin(x**2)
# scales = numpy.arange(10)
#
# mother_wavelet = SDG(len_signal = len(data), scales = np.arange(10), normalize = True, fc = 'center')
# wavelet = cwt(data, mother_wavelet)
# wave_coefs.scalogram(origin = 'bottom')
"""
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from pylab import poly_between
if ts is not None:
show_ts = True
else:
show_ts = False
if not show_wps and not show_ts:
# only show scalogram
figrow = 1
figcol = 1
elif show_wps and not show_ts:
# show scalogram and wps
figrow = 1
figcol = 4
elif not show_wps and show_ts:
# show scalogram and ts
figrow = 2
figcol = 1
else:
# show scalogram, wps, and ts
figrow = 2
figcol = 4
if time is None:
x = np.arange(self.motherwavelet.len_signal)
else:
x = time
if use_period:
y = self.motherwavelet.scales / self.motherwavelet.fc
else:
y = self.motherwavelet.scales
fig = plt.figure(figsize=(16, 12), dpi=160)
ax1 = fig.add_subplot(figrow, figcol, 1)
# if show wps, give 3/4 space to scalogram, 1/4 to wps
if show_wps:
# create temp axis at 3 or 4 col of row 1
axt = fig.add_subplot(figrow, figcol, 3)
# get location of axtmp and ax1
axt_pos = axt.get_position()
ax1_pos = ax1.get_position()
axt_points = axt_pos.get_points()
ax1_points = ax1_pos.get_points()
# set axt_pos left bound to that of ax1
axt_points[0][0] = ax1_points[0][0]
ax1.set_position(axt_pos)
fig.delaxes(axt)
if show_coi:
# coi_coef is defined using the assumption that you are using
# period, not scale, in plotting - this handles that behavior
if use_period:
coi = self.motherwavelet.get_coi() / self.motherwavelet.fc / self.motherwavelet.sampf
else:
coi = self.motherwavelet.get_coi()
coi[coi == 0] = y.min() - 0.1 * y.min()
xs, ys = poly_between(np.arange(0, len(coi)), np.max(y), coi)
ax1.fill(xs, ys, 'k', alpha=0.4, zorder = 2)
contf=ax1.contourf(x,y,np.abs(self.coefs)**2)
fig.colorbar(contf, ax=ax1, orientation='vertical', format='%2.1f')
if ylog_base is not None:
ax1.axes.set_yscale('log', basey=ylog_base)
if origin is 'top':
ax1.set_ylim((y[-1], y[0]))
elif origin is 'bottom':
ax1.set_ylim((y[0], y[-1]))
else:
raise OriginError('`origin` must be set to "top" or "bottom"')
ax1.set_xlim((x[0], x[-1]))
ax1.set_title('scalogram')
ax1.set_ylabel('time')
if use_period:
ax1.set_ylabel('period')
ax1.set_xlabel('time')
else:
ax1.set_ylabel('scales')
if time is not None:
ax1.set_xlabel('time')
else:
ax1.set_xlabel('sample')
if show_wps:
ax2 = fig.add_subplot(figrow,figcol,4,sharey=ax1)
if use_period:
ax2.plot(self.get_wps(), y, 'k')
else:
ax2.plot(self.motherwavelet.fc * self.get_wps(), y, 'k')
if ylog_base is not None:
ax2.axes.set_yscale('log', basey=ylog_base)
if xlog_base is not None:
ax2.axes.set_xscale('log', basey=xlog_base)
if origin is 'top':
ax2.set_ylim((y[-1], y[0]))
else:
ax2.set_ylim((y[0], y[-1]))
if use_period:
ax2.set_ylabel('period')
else:
ax2.set_ylabel('scales')
ax2.grid()
ax2.set_title('wavelet power spectrum')
if show_ts:
ax3 = fig.add_subplot(figrow, 2, 3, sharex=ax1)
ax3.plot(x, ts)
ax3.set_xlim((x[0], x[-1]))
ax3.legend(['time series'])
ax3.grid()
# align time series fig with scalogram fig
t = ax3.get_position()
ax3pos=t.get_points()
ax3pos[1][0]=ax1.get_position().get_points()[1][0]
t.set_points(ax3pos)
ax3.set_position(t)
if (time is not None) or use_period:
ax3.set_xlabel('time')
else:
ax3.set_xlabel('sample')
if figname is None:
plt.show()
else:
plt.savefig(figname)
plt.close('all')
def cwt(x, wavelet, weighting_function=lambda x: x**(-0.5), deep_copy=True):
"""Computes the continuous wavelet transform of x using the mother wavelet
`wavelet`.
This function computes the continuous wavelet transform of x using an
instance a mother wavelet object.
The cwt is defined as:
T(a,b) = w(a) integral(-inf,inf)(x(t) * psi*{(t-b)/a} dt
which is a convolution. In this algorithm, the convolution in the time
domain is implemented as a multiplication in the Fourier domain.
Parameters
----------
x : 1D array
Time series to be transformed by the cwt
wavelet : Instance of the MotherWavelet class
Instance of the MotherWavelet class for a particular wavelet family
weighting_function: Function used to weight
Typically w(a) = a^(-0.5) is chosen as it ensures that the
wavelets at every scale have the same energy.
deep_copy : bool
If true (default), the mother wavelet object used in the creation of
the wavelet object will be fully copied and accessible through
wavelet.motherwavelet; if false, wavelet.motherwavelet will be a
reference to the motherwavelet object (that is, if you change the
mother wavelet object, you will see the changes when accessing the
mother wavelet through the wavelet object - this is NOT good for
tracking how the wavelet transform was computed, but setting
deep_copy to False will save memory).
Returns
-------
Returns an instance of the Wavelet class. The coefficients of the transform
can be obtain by the coefs() method (i.e. wavelet.coefs() )
Examples
--------
Create instance of SDG mother wavelet, normalized, using 10 scales and the
center frequency of the Fourier transform as the characteristic frequency.
Then, perform the continuous wavelet transform and plot the scalogram.
# x = numpy.arange(0,2*numpy.pi,numpy.pi/8.)
# data = numpy.sin(x**2)
# scales = numpy.arange(10)
#
# mother_wavelet = SDG(len_signal = len(data), scales = np.arange(10), normalize = True, fc = 'center')
# wavelet = cwt(data, mother_wavelet)
# wave_coefs.scalogram()
References
----------
Addison, P. S., 2002: The Illustrated Wavelet Transform Handbook. Taylor
and Francis Group, New York/London. 353 pp.
"""
signal_dtype = x.dtype
if len(x) < wavelet.len_wavelet:
n = len(x)
x = np.resize(x, (wavelet.len_wavelet,))
x[n:] = 0
# Transform the signal and mother wavelet into the Fourier domain
xf=fft(x)
mwf=fft(wavelet.coefs.conj(), axis=1)
# Convolve (multiply in Fourier space)
wt_tmp=ifft(mwf*xf[np.newaxis,:], axis=1)
# shift output from ifft and multiply by weighting function
wt = fftshift(wt_tmp,axes=[1]) * weighting_function(wavelet.scales[:, np.newaxis])
# if mother wavelet and signal are real, only keep real part of transform
wt=wt.astype(np.lib.common_type(wavelet.coefs, x))
return Wavelet(wt,wavelet,weighting_function,signal_dtype,deep_copy)
def ccwt(x1, x2, wavelet):
"""Compute the continuous cross-wavelet transform of 'x1' and 'x2' using the
mother wavelet 'wavelet', which is an instance of the MotherWavelet class.
Parameters
----------
x1,x2 : 1D array
Time series used to compute cross-wavelet transform
wavelet : Instance of the MotherWavelet class
Instance of the MotherWavelet class for a particular wavelet family
Returns
-------
Returns an instance of the Wavelet class.
"""
xwt = cwt(x1,wavelet) * np.conjugate(cwt(x2, wavelet))
return xwt
def icwt(wavelet):
"""Compute the inverse continuous wavelet transform.
Parameters
----------
wavelet : Instance of the MotherWavelet class
instance of the MotherWavelet class for a particular wavelet family
Examples
--------
Use the Morlet mother wavelet to perform wavelet transform on 'data', then
use icwt to compute the inverse wavelet transform to come up with an estimate
of data ('data2'). Note that data2 is not exactly equal data.
# import matplotlib.pyplot as plt
# from scipy.signal import SDG, Morlet, cwt, icwt, fft, ifft
# import numpy as np
#
# x = np.arange(0,2*np.pi,np.pi/64)
# data = np.sin(8*x)
# scales=np.arange(0.5,17)
#
# mother_wavelet = Morlet(len_signal = len(data), scales = scales)
# wave_coefs=cwt(data, mother_wavelet)
# data2 = icwt(wave_coefs)
#
# plt.plot(data)
# plt.plot(data2)
# plt.show()
References
----------
Addison, P. S., 2002: The Illustrated Wavelet Transform Handbook. Taylor
and Francis Group, New York/London. 353 pp.
"""
from scipy.integrate import trapz
# if original wavelet was created using padding, make sure to include
# information that is missing after truncation (see self.coefs under __init__
# in class Wavelet.
if wavelet.motherwavelet.len_signal != wavelet.motherwavelet.len_wavelet:
full_wc = np.c_[wavelet.coefs,wavelet._pad_coefs]
else:
full_wc = wavelet.coefs
# get wavelet coefficients and take fft
wcf = fft(full_wc,axis=1)
# get mother wavelet coefficients and take fft
mwf = fft(wavelet.motherwavelet.coefs,axis=1)
# perform inverse continuous wavelet transform and make sure the result is the same type
# (real or complex) as the original data used in the transform
x = (1. / wavelet.motherwavelet.cg) * trapz(
fftshift(ifft(wcf * mwf,axis=1),axes=[1]) /
(wavelet.motherwavelet.scales[:,np.newaxis]**2),
dx = 1. / wavelet.motherwavelet.sampf, axis=0)
return x[0:wavelet.motherwavelet.len_signal].astype(wavelet._signal_dtype)
| |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import json
import click
import os
import config
import sqlite3
import time
import flask_debugtoolbar
import qrcode
import StringIO
from flask import Flask
from flask import request
from flask import g
from flask import redirect
from flask import abort
from flask import render_template
from flask import send_file
from flask import flash
from werkzeug.exceptions import HTTPException
app = Flask(__name__)
# Load default config and override config from an environment variable
app.config.update(dict(
DATABASE=os.path.join(app.root_path, config.DATABASE_PATH),
DEBUG=config.DEBUG,
DEBUG_TB_ENABLED=config.DEBUG,
DEBUG_TB_INTERCEPT_REDIRECTS=config.DEBUG,
DEBUG_TB_PROFILER_ENABLED=config.DEBUG,
DEBUG_TB_TEMPLATE_EDITOR_ENABLED=config.DEBUG,
SECRET_KEY=config.SECRET_KEY,
))
if config.DEBUG is True:
toolbar = flask_debugtoolbar.DebugToolbarExtension(app)
class JSONException(HTTPException):
def __init__(self, message, status_code=None, response=None):
self.message = message
self.response = response
if status_code is not None:
self.code = status_code
else:
self.code = 400
def get_body(self, environ):
return json.dumps({
"error": self.message,
"code": self.code
})
def get_headers(self, environ):
return [('Content-Type', 'application/json')]
ALPHABET = "23456789abcdefghijkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ"
def base62_encode(num, alphabet=ALPHABET):
"""Encode a number in Base X
`num`: The number to encode
`alphabet`: The alphabet to use for encoding
"""
if (num == 0):
return alphabet[0]
arr = []
base = len(alphabet)
while num:
rem = num % base
num = num // base
arr.append(alphabet[rem])
arr.reverse()
return ''.join(arr)
def base62_decode(string, alphabet=ALPHABET):
"""Decode a Base X encoded string into the number
Arguments:
- `string`: The encoded string
- `alphabet`: The alphabet to use for encoding
"""
base = len(alphabet)
strlen = len(string)
num = 0
idx = 0
for char in string:
power = (strlen - (idx + 1))
num += alphabet.index(char) * (base ** power)
idx += 1
return num
def connect_db():
"""Connects to the specific database."""
def make_dicts(cursor, row):
return dict((cursor.description[idx][0], value)
for idx, value in enumerate(row))
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = make_dicts
return rv
def init_db():
"""Initializes the database."""
db = get_db()
with app.open_resource('schema/v1.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
def lookup_url(link_id):
db = get_db()
cur = db.execute('SELECT url FROM urls WHERE key = ? LIMIT 1', (link_id,))
link = cur.fetchone()
if link is None:
return None
else:
return link['url']
def lookup_stats(link_id):
db = get_db()
cur = db.execute("""SELECT time FROM stats WHERE link_id IN (
SELECT id FROM urls WHERE key = ?)""",
(link_id,))
link_stats = [x['time'] for x in cur.fetchall()]
return link_stats
def save_key(key, url, api_key=None, customized=False):
db = get_db()
if customized is True:
custom = 1
else:
custom = 0
if api_key is not None:
db.execute("""INSERT INTO urls (key, url, api_key, custom)
VALUES (?, ?, ?, ?)""", (key, url, api_key, custom))
else:
db.execute("""INSERT INTO urls (key, url, custom)
VALUES (?, ?, ?)""", (key, url, custom))
db.commit()
def save_url(url, wish=None, api_key=None):
db = get_db()
if wish is not None:
exists = lookup_url(wish)
if exists is not None:
return wish
else:
save_key(wish, url, api_key, customized=True)
return wish
else:
cur = db.execute('SELECT key FROM urls WHERE url = ?', (url,))
key_exists = cur.fetchone()
if key_exists is not None and 'key' in key_exists:
return key_exists['key']
cur = db.execute("""SELECT key FROM urls WHERE custom = 0 ORDER BY id
DESC LIMIT 1""")
last_key = cur.fetchone()
if last_key is None or 'key' not in last_key or last_key['key'] == '':
key = base62_encode(8)
else:
key = base62_encode(base62_decode(last_key['key']) + 1)
save_key(key, url, api_key)
return key
def validate_api_key(key):
db = get_db()
cur = db.execute('SELECT id, limit FROM api WHERE key = ?', (key,))
res = cur.fetchone()
if res and 'id' in res and res['id'] > 0:
return True
else:
return False
@app.cli.command('initdb')
@click.option('--upgrade', default='no', help='Only upgrade to a newer version')
def initdb_command(upgrade):
if upgrade == 'yes':
print('Not implemented')
return
else:
"""Creates the database tables."""
init_db()
print('Initialized the database.')
@app.cli.command('addkey')
@click.option('--key', help='The API key to add')
@click.option('--limit', default=10000, help='Maximum requests per day')
def add_api_key(key, limit):
"""Adds an API key for authorization."""
if key is None or len(key) is not 32:
print('Keys must be exactly 32 characters long!')
return
if limit == 0:
print('INFO: Limit set to 0 - setting no limit.')
if limit < 0:
print('INFO: Limit is less than 0, disabling account')
db = get_db()
db.execute('INSERT OR REPLACE INTO api (key, dlimit) VALUES (?, ?)',
(key, limit))
db.commit()
print('Key "%s" added to the database.' % key)
@app.teardown_appcontext
def close_db(error):
"""Closes the database again at the end of the request."""
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
@app.route('/')
def index():
return render_template('index.html', base_url=config.URL)
@app.errorhandler(404)
def page_not_found(error):
return render_template('404.html'), 404
@app.route('/<link_id>')
def short_link(link_id):
url = lookup_url(link_id)
if url is None:
abort(404)
else:
db = get_db()
lid = db.execute('SELECT id FROM urls WHERE key = ? LIMIT 1',
(link_id,)).fetchone()['id']
db.execute('INSERT INTO stats (link_id, time) VALUES(?, ?)',
(lid, int(time.time()),))
db.commit()
return redirect(url, code=301)
@app.route('/save', methods=['POST'])
def save_link():
if not len(request.form['url']):
flash('No URL supplied')
return redirect('/')
wish = None
if len(request.form['wish']):
exists = lookup_url(request.form['wish'])
if exists is not None:
flash('URL already in use')
return redirect('/')
else:
wish = request.form['wish']
key = save_url(request.form['url'], wish=wish)
if key is None:
abort(500)
return redirect('/' + key + '+')
@app.route('/<link_id>+')
def link_info(link_id):
link_url = lookup_url(link_id)
if link_url is None:
abort(404)
link_stats = lookup_stats(link_id)
return render_template('info.html', url=link_url, link_id=link_id,
stats=link_stats, base_url=config.URL)
@app.route('/<img_id>.png')
def qr_code(img_id):
link_url = lookup_url(img_id)
if link_url is None:
abort(404)
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=10,
border=4
)
qr.add_data(link_url)
qr.make(fit=True)
image = qr.make_image()
image_io = StringIO.StringIO()
image.save(image_io, 'PNG')
image_io.seek(0)
return send_file(image_io, mimetype='image/png')
@app.route('/api/v1/short', methods=['POST'])
def api_v1_short():
if not request.json:
raise JSONException(message="No data supplied", status_code=400)
# TODO: api key validation
if 'key' not in request.json:
raise JSONException(message="No valid API key supplied",
status_code=401)
# TODO: url validation
if 'url' not in request.json:
raise JSONException(message="No URL supplied", status_code=400)
if 'wish' not in request.json:
wish = None
else:
wish = request.json['wish']
try:
short_link = save_url(request.json['url'], wish,
api_key=request.json['key'])
return json.dumps({
"url_long": request.json['url'],
"url_short": config.URL + short_link,
"wish": wish
})
except Exception:
return JSONException(message="Internal server error", status_code=500)
@app.route('/api/v1/long', methods=['POST'])
def api_v1_long():
if not request.json:
raise JSONException(message="No data supplied", status_code=400)
# TODO: api key validation
if 'key' not in request.json or not validate_api_key(request.json['key']):
raise JSONException(message="No valid API key supplied",
status_code=401)
if 'id' not in request.json:
raise JSONException(message="No URL id supplied", status_code=400)
try:
long_link = lookup_url(request.json['id'])
if long_link is None:
JSONException(message="Link information not found", status_code=404)
if 'statistics' in request.json and request.json['statistics'] is True:
statistics = lookup_stats(request.json['id'])
else:
statistics = []
return json.dumps({
"url_short": config.URL + request.json['id'],
"url_long": long_link,
"statistics": statistics
})
except Exception:
return JSONException(message="Internal server error", status_code=500)
if __name__ == "__main__":
app.run(host='localhost', port=9002)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Binomial distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
_binomial_prob_note = """
For each batch member of counts `value`, `P[counts]` is the probability that
after sampling `n` draws from this Binomial distribution, the number of
successes is `k`. Note that different sequences of draws can result in the
same counts, thus the probability includes a combinatorial coefficient.
`value` must be a non-negative tensor with dtype `dtype` and whose shape
can be broadcast with `self.p` and `self.n`. `counts` is only legal if it is
less than or equal to `n` and its components are equal to integer
values.
"""
class Binomial(distribution.Distribution):
"""Binomial distribution.
This distribution is parameterized by a vector `p` of probabilities and `n`,
the total counts.
#### Mathematical details
The Binomial is a distribution over the number of successes in `n` independent
trials, with each trial having the same probability of success `p`.
The probability mass function (pmf):
```pmf(k) = n! / (k! * (n - k)!) * (p)^k * (1 - p)^(n - k)```
#### Examples
Create a single distribution, corresponding to 5 coin flips.
```python
dist = Binomial(n=5., p=.5)
```
Create a single distribution (using logits), corresponding to 5 coin flips.
```python
dist = Binomial(n=5., logits=0.)
```
Creates 3 distributions with the third distribution most likely to have
successes.
```python
p = [.2, .3, .8]
# n will be broadcast to [4., 4., 4.], to match p.
dist = Binomial(n=4., p=p)
```
The distribution functions can be evaluated on counts.
```python
# counts same shape as p.
counts = [1., 2, 3]
dist.prob(counts) # Shape [3]
# p will be broadcast to [[.2, .3, .8], [.2, .3, .8]] to match counts.
counts = [[1., 2, 1], [2, 2, 4]]
dist.prob(counts) # Shape [2, 3]
# p will be broadcast to shape [5, 7, 3] to match counts.
counts = [[...]] # Shape [5, 7, 3]
dist.prob(counts) # Shape [5, 7, 3]
```
"""
def __init__(self,
n,
logits=None,
p=None,
validate_args=False,
allow_nan_stats=True,
name="Binomial"):
"""Initialize a batch of Binomial distributions.
Args:
n: Non-negative floating point tensor with shape broadcastable to
`[N1,..., Nm]` with `m >= 0` and the same dtype as `p` or `logits`.
Defines this as a batch of `N1 x ... x Nm` different Binomial
distributions. Its components should be equal to integer values.
logits: Floating point tensor representing the log-odds of a
positive event with shape broadcastable to `[N1,..., Nm]` `m >= 0`, and
the same dtype as `n`. Each entry represents logits for the probability
of success for independent Binomial distributions. Only one of
`logits` or `p` should be passed in.
p: Positive floating point tensor with shape broadcastable to
`[N1,..., Nm]` `m >= 0`, `p in [0, 1]`. Each entry represents the
probability of success for independent Binomial distributions. Only one
of `logits` or `p` should be passed in.
validate_args: `Boolean`, default `False`. Whether to assert valid values
for parameters `n`, `p`, and `x` in `prob` and `log_prob`.
If `False` and inputs are invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to prefix Ops created by this distribution class.
Examples:
```python
# Define 1-batch of a binomial distribution.
dist = Binomial(n=2., p=.9)
# Define a 2-batch.
dist = Binomial(n=[4., 5], p=[.1, .3])
```
"""
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[n]) as ns:
with ops.control_dependencies([
check_ops.assert_non_negative(
n, message="n has negative components."),
distribution_util.assert_integer_form(
n, message="n has non-integer components."),
] if validate_args else []):
self._n = array_ops.identity(n, name="n")
self._logits, self._p = distribution_util.get_logits_and_prob(
name=name, logits=logits, p=p, validate_args=validate_args)
super(Binomial, self).__init__(
dtype=self._p.dtype,
is_continuous=False,
is_reparameterized=False,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._n, self._p, self._logits],
name=ns)
@property
def n(self):
"""Number of trials."""
return self._n
@property
def logits(self):
"""Log-odds of success."""
return self._logits
@property
def p(self):
"""Probability of success."""
return self._p
def _batch_shape(self):
return array_ops.shape(self._n + self._p)
def _get_batch_shape(self):
return common_shapes.broadcast_shape(self.n.get_shape(),
self.p.get_shape())
def _event_shape(self):
return constant_op.constant([], dtype=dtypes.int32)
def _get_event_shape(self):
return tensor_shape.scalar()
@distribution_util.AppendDocstring(_binomial_prob_note)
def _log_prob(self, counts):
counts = self._check_counts(counts)
prob_prob = (counts * math_ops.log(self.p) +
(self.n - counts) * math_ops.log(1. - self.p))
combinations = (math_ops.lgamma(self.n + 1) -
math_ops.lgamma(counts + 1) -
math_ops.lgamma(self.n - counts + 1))
log_prob = prob_prob + combinations
return log_prob
@distribution_util.AppendDocstring(_binomial_prob_note)
def _prob(self, counts):
return math_ops.exp(self._log_prob(counts))
def _mean(self):
return self._n * self._p
def _variance(self):
return self._n * self._p * (1 - self._p)
def _std(self):
return math_ops.sqrt(self._variance())
@distribution_util.AppendDocstring(
"""Note that when `(n + 1) * p` is an integer, there are actually two
modes. Namely, `(n + 1) * p` and `(n + 1) * p - 1` are both modes. Here
we return only the larger of the two modes.""")
def _mode(self):
return math_ops.floor((self._n + 1) * self._p)
@distribution_util.AppendDocstring(
"""Check counts for proper shape, values, then return tensor version.""")
def _check_counts(self, counts):
counts = ops.convert_to_tensor(counts, name="counts_before_deps")
if not self.validate_args:
return counts
return control_flow_ops.with_dependencies([
check_ops.assert_non_negative(
counts, message="counts has negative components."),
check_ops.assert_less_equal(
counts, self._n, message="counts are not less than or equal to n."),
distribution_util.assert_integer_form(
counts, message="counts have non-integer components.")], counts)
| |
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis.asserts import assert_not_equal
from proboscis.asserts import assert_raises
from proboscis.asserts import assert_true
from proboscis import before_class
from proboscis.check import Check
from proboscis import test
from troveclient.compat import exceptions
from trove.tests.api.instances import create_new_instance
from trove.tests.api.instances import CreateInstance
from trove.tests.config import CONFIG
from trove.tests import DBAAS_API
from trove.tests import INSTANCES
from trove.tests import PRE_INSTANCES
from trove.tests.util import create_dbaas_client
from trove.tests.util.users import Requirements
GROUP = "dbaas.api.mgmt.hosts"
def percent_boundary(used_ram, total_ram):
"""Return a upper and lower bound for percent ram used."""
calc = int((1.0 * used_ram / total_ram) * 100)
# return calculated percent +/- 2 to account for rounding errors
lower_boundary = calc - 2
upper_boundary = calc + 2
return lower_boundary, upper_boundary
@test(groups=[DBAAS_API, GROUP, PRE_INSTANCES],
depends_on_groups=["services.initialize"],
enabled=create_new_instance())
class HostsBeforeInstanceCreation(object):
@before_class
def setUp(self):
self.user = CONFIG.users.find_user(Requirements(is_admin=True))
self.client = create_dbaas_client(self.user)
self.host = None
@test
def test_empty_index_host_list(self):
host_index_result = self.client.hosts.index()
assert_not_equal(host_index_result, None,
"list hosts call should not be empty: %s" %
str(host_index_result))
assert_true(len(host_index_result) > 0,
"list hosts length should be greater than zero: %r" %
host_index_result)
self.host = host_index_result[0]
assert_true(self.host is not None, "Expected to find a host.")
@test(depends_on=[test_empty_index_host_list])
def test_empty_index_host_list_single(self):
self.host.name = self.host.name.replace(".", "\.")
result = self.client.hosts.get(self.host)
assert_not_equal(result, None,
"Get host should not be empty for: %s" % self.host)
with Check() as check:
used_ram = int(result.usedRAM)
total_ram = int(result.totalRAM)
percent_used = int(result.percentUsed)
lower, upper = percent_boundary(used_ram, total_ram)
check.true(percent_used > lower,
"percentUsed %r is below the lower boundary %r"
% (percent_used, lower))
check.true(percent_used < upper,
"percentUsed %r is above the upper boundary %r"
% (percent_used, upper))
check.true(used_ram < total_ram,
"usedRAM %r should be less than totalRAM %r"
% (used_ram, total_ram))
check.true(percent_used < 100,
"percentUsed should be less than 100 but was %r"
% percent_used)
check.true(total_ram > 0,
"totalRAM should be greater than 0 but was %r"
% total_ram)
check.true(used_ram < total_ram,
"usedRAM %r should be less than totalRAM %r"
% (used_ram, total_ram))
@test(groups=[INSTANCES, GROUP],
depends_on=[CreateInstance],
enabled=create_new_instance())
class HostsMgmtCommands(object):
@before_class
def setUp(self):
self.user = CONFIG.users.find_user(Requirements(is_admin=True))
self.client = create_dbaas_client(self.user)
self.host = None
@test
def test_index_host_list(self):
result = self.client.hosts.index()
assert_not_equal(len(result), 0,
"list hosts should not be empty: %s" % str(result))
hosts = []
# Find a host with an instanceCount > 0
for host in result:
msg = 'Host: %s, Count: %s' % (host.name, host.instanceCount)
hosts.append(msg)
if int(host.instanceCount) > 0:
self.host = host
break
msg = "Unable to find a host with instances: %r" % hosts
assert_not_equal(self.host, None, msg)
@test(depends_on=[test_index_host_list])
def test_index_host_list_single(self):
self.host.name = self.host.name.replace(".", "\.")
result = self.client.hosts.get(self.host)
assert_not_equal(result, None,
"list hosts should not be empty: %s" % str(result))
assert_true(len(result.instances) > 0,
"instance list on the host should not be empty: %r"
% result.instances)
with Check() as check:
used_ram = int(result.usedRAM)
total_ram = int(result.totalRAM)
percent_used = int(result.percentUsed)
lower, upper = percent_boundary(used_ram, total_ram)
check.true(percent_used > lower,
"percentUsed %r is below the lower boundary %r"
% (percent_used, lower))
check.true(percent_used < upper,
"percentUsed %r is above the upper boundary %r"
% (percent_used, upper))
check.true(used_ram < total_ram,
"usedRAM %r should be less than totalRAM %r"
% (used_ram, total_ram))
check.true(percent_used < 100,
"percentUsed should be less than 100 but was %r"
% percent_used)
check.true(total_ram > 0,
"totalRAM should be greater than 0 but was %r"
% total_ram)
check.true(used_ram < total_ram,
"usedRAM %r should be less than totalRAM %r"
% (used_ram, total_ram))
# Check all active instances and validate all the fields exist
active_instance = None
for instance in result.instances:
print("instance: %s" % instance)
if instance['status'] != 'ACTIVE':
continue
active_instance = instance
check.is_not_none(instance['id'])
check.is_not_none(instance['name'])
check.is_not_none(instance['status'])
check.is_not_none(instance['server_id'])
check.is_not_none(instance['tenant_id'])
check.true(active_instance is not None, "No active instances")
def _get_ids(self):
"""Get all the ids of instances that are ACTIVE."""
ids = []
results = self.client.hosts.index()
for host in results:
result = self.client.hosts.get(host)
for instance in result.instances:
if instance['status'] == 'ACTIVE':
ids.append(instance['id'])
return ids
@test
def test_update_hosts(self):
ids = self._get_ids()
assert_not_equal(ids, [], "No active instances found")
before_versions = {}
for _id in ids:
diagnostics = self.client.diagnostics.get(_id)
before_versions[_id] = diagnostics.version
hosts = self.client.hosts.index()
for host in hosts:
self.client.hosts.update_all(host.name)
after_versions = {}
for _id in ids:
diagnostics = self.client.diagnostics.get(_id)
after_versions[_id] = diagnostics.version
assert_not_equal(before_versions, {},
"No versions found before update")
assert_not_equal(after_versions, {},
"No versions found after update")
if CONFIG.fake_mode:
for _id in after_versions:
assert_not_equal(before_versions[_id], after_versions[_id])
@test
def test_host_not_found(self):
hostname = "host@$%3dne"
assert_raises(exceptions.NotFound, self.client.hosts.get, hostname)
| |
#!/usr/bin/env python
"""
Creates lammps data files from lammps dump files, given a template lammps data file.
"""
from __future__ import print_function, absolute_import
import os
import logging
import re
import sys
import argparse
import numpy as np
from collections import defaultdict
from md_utils.md_common import (list_to_file, InvalidDataError, create_out_fname, pbc_dist,
warning, process_cfg, find_dump_section_state)
try:
# noinspection PyCompatibility
from ConfigParser import ConfigParser
except ImportError:
# noinspection PyCompatibility
from configparser import ConfigParser
__author__ = 'hmayes'
# Logging
logger = logging.getLogger('evbd2d')
# logging.basicConfig(filename='evbd2d.log', filemode='w', level=logging.DEBUG)
logging.basicConfig(level=logging.INFO)
# Error Codes
# The good status code
GOOD_RET = 0
INPUT_ERROR = 1
IO_ERROR = 2
INVALID_DATA = 3
# Constants #
# Config File Sections
MAIN_SEC = 'main'
# Config keys
DATA_TPL_FILE = 'data_tpl_file'
DUMPS_FILE = 'dump_list_file'
DUMP_FILE = 'dump_file'
WAT_O_TYPE = 'water_o_type'
WAT_H_TYPE = 'water_h_type'
H3O_O_TYPE = 'h3o_o_type'
H3O_H_TYPE = 'h3o_h_type'
PROT_RES_MOL_ID = 'prot_res_mol_id'
PROT_H_TYPE = 'prot_h_type'
PROT_H_IGNORE = 'prot_ignore_h_atom_nums'
PROT_TYPE_IGNORE_ATOMS = 'prot_res_type_ignore_atoms'
OUT_BASE_DIR = 'output_directory'
REPROD_TPL = 'reproduce_tpl_flag'
PRE_RES = 'pre_prot_res'
PROT_RES = 'prot_res'
POST_RES = 'post_prot_res'
EXCESS_H = 'excess_proton'
HYD_MOL = 'h3o_mol'
WAT_MOL = 'wat_mol'
POST_WAT = 'post_wat'
# Config keys to allow calculating charge at intermediate points:
LAST_P1 = 'last_p1'
LAST_P2 = 'last_p2'
LONE_ION = 'lone_ion'
LAST_LIPID = 'last_lipid'
LAST_HYD = 'last_hyd'
LAST_WATER = 'last_water'
LAST_ION1 = 'last_ion1'
# Defaults
DEF_CFG_FILE = 'evbd2d.ini'
# Set notation
DEF_DUMP_LIST_FILE = 'dump_list.txt'
DEF_CFG_VALS = {DUMPS_FILE: DEF_DUMP_LIST_FILE,
DUMP_FILE: None,
PROT_H_IGNORE: [],
OUT_BASE_DIR: None,
LAST_P1: -1,
LAST_P2: -1,
LONE_ION: -1,
LAST_LIPID: -1,
LAST_HYD: -1,
LAST_WATER: -1,
LAST_ION1: -1,
REPROD_TPL: False,
PROT_TYPE_IGNORE_ATOMS: [],
}
REQ_KEYS = {DATA_TPL_FILE: str,
WAT_O_TYPE: int,
WAT_H_TYPE: int,
H3O_O_TYPE: int,
H3O_H_TYPE: int,
PROT_RES_MOL_ID: int,
PROT_H_TYPE: int, }
# From data template file
NUM_ATOMS = 'num_atoms'
TAIL_CONTENT = 'tail_content'
ATOMS_CONTENT = 'atoms_content'
HEAD_CONTENT = 'head_content'
H3O_MOL = 'hydronium_molecule'
H3O_O_CHARGE = 'hydronium_o_charge'
H3O_H_CHARGE = 'hydronium_h_charge'
FIRST_H3O_H_INDEX = 'first h3o hydrogen index'
PROT_RES_MOL = 'protonatable_residue_molecule'
WATER_MOLS = 'template_water_molecules'
# For data template file processing
SEC_HEAD = 'head_section'
SEC_ATOMS = 'atoms_section'
SEC_TAIL = 'tail_section'
# For dump file processing
SEC_TIMESTEP = 'timestep'
SEC_NUM_ATOMS = 'dump_num_atoms'
SEC_BOX_SIZE = 'dump_box_size'
# For deciding if a float is close enough to a value
TOL = 0.000001
# Bundle of headers for calculating charge
CALC_CHARGE_NAMES = [LAST_P1, LAST_P2, LONE_ION, LAST_LIPID, LAST_HYD, LAST_WATER, LAST_ION1]
def read_cfg(f_loc, cfg_proc=process_cfg):
"""
Reads the given configuration file, returning a dict with the converted values supplemented by default values.
:param f_loc: The location of the file to read.
:param cfg_proc: The processor to use for the raw configuration values. Uses default values when the raw
value is missing.
:return: A dict of the processed configuration file's data.
"""
config = ConfigParser()
good_files = config.read(f_loc)
if not good_files:
raise IOError('Could not read file {}'.format(f_loc))
main_proc = cfg_proc(dict(config.items(MAIN_SEC)), DEF_CFG_VALS, REQ_KEYS)
return main_proc
def parse_cmdline(argv):
"""
Returns the parsed argument list and return code.
`argv` is a list of arguments, or `None` for ``sys.argv[1:]``.
"""
if argv is None:
argv = sys.argv[1:]
# initialize the parser object:
parser = argparse.ArgumentParser(description='Creates lammps data files from lammps dump files, given a template '
'lammps data file. The required input file provides the location of '
'the data template file, a file with a list of dump files to convert, '
'and information about the configuration of the data file, needed to '
'process the dump file to produce data files matching the template '
'(consistent ID for the hydronium ion, protonatable residue always'
'deprotonated, etc.). Currently, this script expects only one '
'protonatable residue.')
parser.add_argument("-c", "--config", help="The location of the configuration file in ini "
"The default file name is {}, located in the "
"base directory where the program as run.".format(DEF_CFG_FILE),
default=DEF_CFG_FILE, type=read_cfg)
args = None
try:
args = parser.parse_args(argv)
except IOError as e:
warning("Problems reading file:", e)
parser.print_help()
return args, IO_ERROR
except KeyError as e:
warning("Input data missing:", e)
parser.print_help()
return args, INPUT_ERROR
except InvalidDataError as e:
warning(e)
return args, INVALID_DATA
return args, GOOD_RET
def process_data_tpl(cfg):
tpl_loc = cfg[DATA_TPL_FILE]
tpl_data = {HEAD_CONTENT: [], ATOMS_CONTENT: [], TAIL_CONTENT: [], PROT_RES_MOL: [], H3O_MOL: [],
WATER_MOLS: defaultdict(list), FIRST_H3O_H_INDEX: None}
section = SEC_HEAD
num_atoms_pat = re.compile(r"(\d+).*atoms$")
atoms_pat = re.compile(r"^Atoms.*")
# put in dummy x y z
x = 0.0
y = 0.0
z = 0.0
total_charge = 0.0
# For debugging total charge
calc_charge_atom_nums = {}
for name in CALC_CHARGE_NAMES:
calc_charge_atom_nums[cfg[name]] = name
with open(tpl_loc) as f:
for line in f:
line = line.strip()
# head_content to contain Everything before 'Atoms' section
# also capture the number of atoms
if section == SEC_HEAD:
tpl_data[HEAD_CONTENT].append(line)
if NUM_ATOMS not in tpl_data:
atoms_match = num_atoms_pat.match(line)
if atoms_match:
# regex is 1-based
tpl_data[NUM_ATOMS] = int(atoms_match.group(1))
if atoms_pat.match(line):
section = SEC_ATOMS
tpl_data[HEAD_CONTENT].append('')
# atoms_content to contain everything but the xyz: atom_num, mol_num, atom_type, charge, type'
elif section == SEC_ATOMS:
if len(line) == 0:
continue
split_line = line.split()
atom_num = int(split_line[0])
mol_num = int(split_line[1])
atom_type = int(split_line[2])
charge = float(split_line[3])
description = ' '.join(split_line[7:])
atom_struct = [atom_num, mol_num, atom_type, charge, x, y, z, description]
tpl_data[ATOMS_CONTENT].append(atom_struct)
total_charge += charge
if atom_type == cfg[H3O_O_TYPE]:
tpl_data[H3O_MOL].append(atom_struct)
tpl_data[H3O_O_CHARGE] = charge
elif atom_type == cfg[H3O_H_TYPE]:
if tpl_data[FIRST_H3O_H_INDEX] is None:
tpl_data[FIRST_H3O_H_INDEX] = len(tpl_data[H3O_MOL])
tpl_data[H3O_MOL].append(atom_struct)
tpl_data[H3O_H_CHARGE] = charge
elif mol_num == cfg[PROT_RES_MOL_ID]:
tpl_data[PROT_RES_MOL].append(atom_struct)
elif atom_type == cfg[WAT_O_TYPE] or atom_type == cfg[WAT_H_TYPE]:
tpl_data[WATER_MOLS][mol_num].append(atom_struct)
if atom_num == tpl_data[NUM_ATOMS]:
section = SEC_TAIL
# Perform checks total charge
if abs(total_charge) < TOL:
print('The data file system is neutral (total charge {:.2e})'.format(total_charge))
else:
warning('The data file system is not neutral. Total charge {0:.6f}'.format(total_charge))
if len(tpl_data[PROT_RES_MOL]) == 0:
raise InvalidDataError('Did not find the input {} ({}).'.format(PROT_RES_MOL,
cfg[PROT_RES_MOL]))
for mol_list in [H3O_MOL, WATER_MOLS]:
if len(tpl_data[mol_list]) == 0:
raise InvalidDataError('In reading the data file, found no {}. Check the data file and '
'the input atom types: \n{} = {}\n{} = {}\n{} = {}\n'
'{} = {}\n{} = {}.'
''.format(mol_list,
PROT_H_TYPE, cfg[PROT_H_TYPE],
H3O_O_TYPE, cfg[H3O_O_TYPE],
H3O_H_TYPE, cfg[H3O_H_TYPE],
WAT_O_TYPE, cfg[WAT_O_TYPE],
WAT_H_TYPE, cfg[WAT_H_TYPE]))
elif atom_num in calc_charge_atom_nums:
print('After atom {0} ({1}), the total charge is: {2:.3f}'.format(atom_num,
calc_charge_atom_nums[atom_num],
total_charge))
# tail_content to contain everything after the 'Atoms' section
elif section == SEC_TAIL:
tpl_data[TAIL_CONTENT].append(line)
# Validate data section
if len(tpl_data[ATOMS_CONTENT]) != tpl_data[NUM_ATOMS]:
raise InvalidDataError('In the file {}, The length of the "Atoms" section ({}) does not equal '
'the number of atoms ({}).'.format(tpl_loc,
len(tpl_data[ATOMS_CONTENT]),
tpl_data[NUM_ATOMS]))
if cfg[REPROD_TPL]:
f_out = create_out_fname('reproduced_tpl', base_dir=cfg[OUT_BASE_DIR], ext='.data')
list_to_file(tpl_data[HEAD_CONTENT] + tpl_data[ATOMS_CONTENT][:] + tpl_data[TAIL_CONTENT],
f_out)
return tpl_data
def deprotonate(cfg, protonatable_res, excess_proton, dump_h3o_mol, water_mol_dict, box, tpl_data):
"""
Deprotonate a the residue and assign the proton to the closest water
so that the output data matches with the template.
"""
# Convert excess proton to a hydronium proton
excess_proton[1] = tpl_data[H3O_MOL][0][1] # molecule number
excess_proton[2] = cfg[H3O_H_TYPE] # type
excess_proton[3] = tpl_data[H3O_H_CHARGE] # charge
dump_h3o_mol.append(excess_proton)
min_dist_id = None
min_dist = np.linalg.norm(box)
for mol_id, molecule in water_mol_dict.items():
for atom in molecule:
if atom[2] == cfg[WAT_O_TYPE]:
dist = pbc_dist(np.asarray(excess_proton[4:7]), np.asarray(atom[4:7]), box)
if dist < min_dist:
min_dist_id = mol_id
min_dist = dist
logger.debug('Deprotonated residue: the molecule ID of the closest water '
'(to become a hydronium) is {}.'.format(min_dist_id))
# Now that have the closest water, add its atoms to the hydronium list
for atom in water_mol_dict[min_dist_id]:
dump_h3o_mol.append(atom)
# Remove the closest water from the dictionary of water molecules, and convert it to a hydronium
del water_mol_dict[min_dist_id]
for atom in dump_h3o_mol:
if atom[2] == cfg[WAT_O_TYPE]:
atom[2] = cfg[H3O_O_TYPE]
atom[3] = tpl_data[H3O_O_CHARGE]
elif atom[2] == cfg[WAT_H_TYPE]:
atom[2] = cfg[H3O_H_TYPE]
atom[3] = tpl_data[H3O_H_CHARGE]
# Make the atom type and charge of the protonatable residue the same as for the template file (switching
# from protonated to deprotonated residue)
if len(tpl_data[PROT_RES_MOL]) != len(protonatable_res):
raise InvalidDataError('Encountered dump file in which the number of atoms in the '
'protonatable residue does not equal the number of atoms in the template data file.')
def sort_wat_mols(cfg, water_dict):
"""
Sorts waters molecules
@param cfg: configuration for run. Used for getting atom types.
@param water_dict: dictionary of water atoms by molecule key
@return: a list that is ordered so all water atoms appear consecutively, oxygen first.
"""
wat_list = []
for mol in water_dict:
h_atoms = []
# to make sure oxygen first, add it, then hydrogen atoms
for atom in water_dict[mol]:
if atom[2] == cfg[WAT_O_TYPE]:
wat_list.append(atom)
else:
h_atoms.append(atom)
for atom in h_atoms:
wat_list.append(atom)
return wat_list
def assign_hyd_mol(cfg, hyd_mol):
ordered_hyd_mol = []
h_atoms = []
for atom in hyd_mol:
if atom[2] == cfg[H3O_O_TYPE]:
ordered_hyd_mol.append(atom)
else:
h_atoms.append(atom)
for atom in h_atoms:
ordered_hyd_mol.append(atom)
return ordered_hyd_mol
def process_dump_file(cfg, data_tpl_content, dump_file):
section = None
box = np.zeros((3,))
counter = 1
atom_list_order = [PRE_RES, PROT_RES, POST_RES, HYD_MOL, WAT_MOL, POST_WAT]
dump_atom_data = []
atom_lists = {PRE_RES: [],
PROT_RES: [],
POST_RES: [],
HYD_MOL: [],
WAT_MOL: [],
POST_WAT: []
}
with open(dump_file) as d:
for line in d:
line = line.strip()
if section is None:
section = find_dump_section_state(line)
if section is None:
raise InvalidDataError('Unexpected line in file {}: {}'.format(dump_file, line))
elif section == SEC_TIMESTEP:
timestep = line
# Reset variables
water_dict = defaultdict(list)
dump_atom_data = []
excess_proton = None
hydronium = []
for a_list in atom_lists:
atom_lists[a_list] = []
section = None
elif section == SEC_NUM_ATOMS:
if data_tpl_content[NUM_ATOMS] != int(line):
raise InvalidDataError('At timestep {} in file {}, the listed number of atoms ({}) does '
'not equal the number of atoms in the template data file '
'({}).'.format(timestep, dump_file, line, data_tpl_content[NUM_ATOMS]))
section = None
elif section == SEC_BOX_SIZE:
split_line = line.split()
diff = float(split_line[1]) - float(split_line[0])
box[counter - 1] = diff
if counter == 3:
counter = 0
section = None
counter += 1
elif section == SEC_ATOMS:
split_line = line.split()
# If there is an incomplete line in a dump file, move on to the next file
if len(split_line) < 7:
continue
atom_num = int(split_line[0])
mol_num = int(split_line[1])
atom_type = int(split_line[2])
charge = float(split_line[3])
x, y, z = map(float, split_line[4:7])
description = ''
atom_struct = [atom_num, mol_num, atom_type, charge, x, y, z, description]
# Keep track of separate portions of the system to allow sorting and processing
if mol_num == cfg[PROT_RES_MOL_ID]:
if atom_type == cfg[PROT_H_TYPE] and atom_num not in cfg[PROT_H_IGNORE]:
excess_proton = atom_struct
else:
atom_lists[PROT_RES].append(atom_struct)
elif atom_type == cfg[H3O_O_TYPE] or atom_type == cfg[H3O_H_TYPE]:
hydronium.append(atom_struct)
elif atom_type == cfg[WAT_O_TYPE] or atom_type == cfg[WAT_H_TYPE]:
water_dict[mol_num].append(atom_struct)
# Save everything else in three chunks for recombining sections post-processing
elif len(atom_lists[PROT_RES]) == 0:
atom_lists[PRE_RES].append(atom_struct)
elif len(water_dict) == 0:
atom_lists[POST_RES].append(atom_struct)
else:
atom_lists[POST_WAT].append(atom_struct)
if counter == data_tpl_content[NUM_ATOMS]:
counter = 0
section = None
# Now that finished reading all atom lines...
# Check and process!
if len(water_dict) == 0:
raise InvalidDataError('Found no water molecules. Check that the input types {} = {} '
'and {} = {} are in the dump '
'file.'.format(WAT_O_TYPE, cfg[WAT_O_TYPE],
WAT_H_TYPE, cfg[WAT_H_TYPE]))
if excess_proton is None:
if len(hydronium) != 4:
raise InvalidDataError('Did not find an excess proton or one hydronium ion. Check dump '
'file and input types: {} = {}; {} = {}; {} = {}'
.format(PROT_H_TYPE, cfg[PROT_H_TYPE],
H3O_O_TYPE, cfg[H3O_O_TYPE],
H3O_H_TYPE, cfg[H3O_H_TYPE]))
else:
if len(hydronium) != 0:
raise InvalidDataError('Found an excess proton and a hydronium atoms. Check dump file '
'and input types: {} = {}; {} = {}; {} = {}'
.format(PROT_H_TYPE, cfg[PROT_H_TYPE],
H3O_O_TYPE, cfg[H3O_O_TYPE],
H3O_H_TYPE, cfg[H3O_H_TYPE]))
deprotonate(cfg, atom_lists[PROT_RES], excess_proton, hydronium,
water_dict, box, data_tpl_content)
# Ensure in correct order for printing
atom_lists[HYD_MOL] = assign_hyd_mol(cfg, hydronium)
atom_lists[WAT_MOL] = sort_wat_mols(cfg, water_dict)
for a_list in atom_list_order:
dump_atom_data += atom_lists[a_list]
# overwrite atom_num, mol_num, atom_type, charge, then description
for index in range(len(dump_atom_data)):
if dump_atom_data[index][3] == data_tpl_content[ATOMS_CONTENT][index][3] or \
dump_atom_data[index][0] in cfg[PROT_TYPE_IGNORE_ATOMS]:
dump_atom_data[index][0:4] = data_tpl_content[ATOMS_CONTENT][index][0:4]
dump_atom_data[index][7] = ' '.join(data_tpl_content[ATOMS_CONTENT][index][7:])
else:
raise InvalidDataError("In reading file: {}\n found atom index {} with charge {} which "
"does not match the charge in the data template ({}). \n"
"To ignore this mis-match, list "
"the atom's index number in the keyword '{}' in the ini file."
"".format(dump_file,
dump_atom_data[index][0], dump_atom_data[index][3],
data_tpl_content[ATOMS_CONTENT][index][3],
PROT_TYPE_IGNORE_ATOMS))
d_out = create_out_fname(dump_file, suffix='_' + str(timestep),
ext='.data', base_dir=cfg[OUT_BASE_DIR])
data_tpl_content[HEAD_CONTENT][0] = "Created by evbdump2data from {} " \
"timestep {}".format(dump_file, timestep)
list_to_file(data_tpl_content[HEAD_CONTENT] + dump_atom_data + data_tpl_content[TAIL_CONTENT],
d_out)
counter += 1
if counter == 1:
print("Completed reading dumpfile {}".format(dump_file))
else:
warning("Dump file {} step {} did not have the full list of atom numbers. "
"Continuing program.".format(dump_file, timestep))
def process_dump_files(cfg, data_tpl_content):
if cfg[DUMP_FILE] is None:
dump_file_list = []
else:
dump_file_list = [cfg[DUMP_FILE]]
if os.path.isfile(cfg[DUMPS_FILE]):
with open(cfg[DUMPS_FILE]) as f:
for dump_file in f:
dump_file = dump_file.strip()
# ignore blank lines in dump file list
if len(dump_file) == 0:
continue
else:
dump_file_list.append(dump_file)
else:
if cfg[DUMPS_FILE] != DEF_DUMP_LIST_FILE:
warning("Did not find file: '{}'".format(cfg[DUMPS_FILE]))
if len(dump_file_list) == 0:
raise InvalidDataError("Found no files to process. In the configuration file, specify one file "
"with the keyword '{}' or a list of "
"files with the keyword '{}'".format(DUMP_FILE, DUMPS_FILE))
for dump_file in dump_file_list:
process_dump_file(cfg, data_tpl_content, dump_file)
def main(argv=None):
# Read input
args, ret = parse_cmdline(argv)
if ret != GOOD_RET:
return ret
# Read template and dump files
cfg = args.config
try:
data_tpl_content = process_data_tpl(cfg)
process_dump_files(cfg, data_tpl_content)
except IOError as e:
warning("Problems reading file:", e)
return IO_ERROR
except InvalidDataError as e:
warning("Problems reading data:", e)
return INVALID_DATA
return GOOD_RET # success
if __name__ == '__main__':
status = main()
sys.exit(status)
| |
import requests
from unittest import mock
from core.flickr_api_integration import (
get_flickr_photo_list,
filter_landscape_photos,
get_random_photo_selection,
get_photo_files
)
@mock.patch('requests.get')
def test_get_flickr_photo_list_when_json_returned(mock_get):
mock_get.return_value.json.return_value = {
'photos': {},
'stat': 'ok'
}
result = get_flickr_photo_list()
assert result == {
'photos': {},
'stat': 'ok'
}
@mock.patch('requests.get')
def test_get_flickr_photo_list_when_bad_request(mock_get):
mock_get.side_effect = requests.exceptions.RequestException
result = get_flickr_photo_list()
assert result is None
def test_filter_landscape_photos_when_dict_has_photos():
sample_photo_object = {
'photos': {
'page': 1,
'pages': 192,
'perpage': 100,
'total': 19190,
'photo': [{
'id': '49955170588',
'owner': '128162583@N08',
'secret': 'c6fcd9f6d8',
'server': '65535',
'farm': 66,
'title': '_MG_5772',
'ispublic': 1,
'isfriend': 0,
'isfamily': 0,
'o_width': '4882',
'o_height': '2888'
}, {
'id': '49956011912',
'owner': '128162583@N08',
'secret': '13e1098854',
'server': '65535',
'farm': 66,
'title': '_MG_5367',
'ispublic': 1,
'isfriend': 0,
'isfamily': 0,
'o_width': '3456',
'o_height': '5184'
}, {
'id': '49955956852',
'owner': '128162583@N08',
'secret': '10fe590daa',
'server': '65535',
'farm': 66,
'title': '_MG_5763',
'ispublic': 1,
'isfriend': 0,
'isfamily': 0,
'o_width': '3672',
'o_height': '2354'
}, {
'id': '49955684711',
'owner': '128162583@N08',
'secret': '612c5f96a1',
'server': '65535',
'farm': 66,
'title': '_MG_5393',
'ispublic': 1,
'isfriend': 0,
'isfamily': 0,
'o_width': '3456',
'o_height': '5184'
}, {
'id': '49955674596',
'owner': '128162583@N08',
'secret': 'bb357b3728',
'server': '65535',
'farm': 66,
'title': '_MG_5759',
'ispublic': 1,
'isfriend': 0,
'isfamily': 0,
'o_width': '4632',
'o_height': '2141'
}]
},
'stat': 'ok'
}
sample_photo_object_result = [{
'id': '49955170588',
'owner': '128162583@N08',
'secret': 'c6fcd9f6d8',
'server': '65535',
'farm': 66,
'title': '_MG_5772',
'ispublic': 1,
'isfriend': 0,
'isfamily': 0,
'o_width': '4882',
'o_height': '2888'
}, {
'id': '49955956852',
'owner': '128162583@N08',
'secret': '10fe590daa',
'server': '65535',
'farm': 66,
'title': '_MG_5763',
'ispublic': 1,
'isfriend': 0,
'isfamily': 0,
'o_width': '3672',
'o_height': '2354'
}, {
'id': '49955674596',
'owner': '128162583@N08',
'secret': 'bb357b3728',
'server': '65535',
'farm': 66,
'title': '_MG_5759',
'ispublic': 1,
'isfriend': 0,
'isfamily': 0,
'o_width': '4632',
'o_height': '2141'
}]
result = filter_landscape_photos(sample_photo_object)
assert result == sample_photo_object_result
def test_filter_landscape_photos_when_dict_no_photos():
result = filter_landscape_photos({
'stat': 'fail',
'code': 112,
'message': 'Method \'flickr.people.getPublicPhtos\' not found'})
assert result is None
def test_filter_landscape_photos_when_empty_dict():
result = filter_landscape_photos({})
assert result is None
def test_filter_landscape_photos_when_string_passed():
result = filter_landscape_photos('some value')
assert result is None
def test_filter_landscape_photos_when_int_passed():
result = filter_landscape_photos(7)
assert result is None
def test_filter_landscape_photos_when_none_passed():
result = filter_landscape_photos(None)
assert result is None
@mock.patch('random.sample')
def test_get_random_photo_selection_when_list_present(mock_library):
get_random_photo_selection([], 2)
assert mock_library.called
def test_get_random_photo_selection_when_list_empty():
result = get_random_photo_selection([], 2)
assert result is None
def test_get_random_photo_selection_when_string_passed():
result = get_random_photo_selection('', 2)
assert result is None
def test_get_random_photo_selection_when_none_passed():
result = get_random_photo_selection(None, 2)
assert result is None
def test_get_random_photo_selection_when_int_passed():
result = get_random_photo_selection(2, 2)
assert result is None
@mock.patch('io.BytesIO')
@mock.patch('core.flickr_api_integration.ImageFile')
@mock.patch('requests.get')
def test_get_photo_files_when_image_returned(mock_get, mock_image_file, mock_bytes_io):
mock_get.return_value.content = b'some initial binary data: \x00\x01'
mock_bytes_io.return_value = b'\x00\x01'
result = get_photo_files([{
'id': '49955674596',
'owner': '128162583@N08',
'secret': 'bb357b3728',
'server': '65535',
'farm': 66,
'title': '_MG_5759',
'ispublic': 1,
'isfriend': 0,
'isfamily': 0,
'o_width': '4632',
'o_height': '2141'
}])
mock_bytes_io.assert_called_once_with(b'some initial binary data: \x00\x01')
mock_image_file.assert_called_once_with(b'\x00\x01', name='49955674596.jpg')
assert isinstance(result, list)
assert len(result) == 1
@mock.patch('requests.get')
def test_get_photo_files_when_bad_request(mock_get):
mock_get.side_effect = requests.exceptions.RequestException
result = get_photo_files([{
'id': '49955674596',
'owner': '128162583@N08',
'secret': 'bb357b3728',
'server': '65535',
'farm': 66,
'title': '_MG_5759',
'ispublic': 1,
'isfriend': 0,
'isfamily': 0,
'o_width': '4632',
'o_height': '2141'
}])
assert result is None
def test_get_photo_files_when_empty_object():
result = get_photo_files([{}])
assert result is None
def test_get_photo_files_when_dict_passed():
result = get_photo_files({})
assert result is None
def test_get_photo_files_when_string_passed():
result = get_photo_files('some string')
assert result is None
def test_get_photo_files_when_int_passed():
result = get_photo_files(3)
assert result is None
def test_get_photo_files_when_none_passed():
result = get_photo_files(None)
assert result is None
| |
#!/usr/bin/env python
# Copyright 2016 VMWare, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# An example of connecting to (local) VSAN SIMS and fetching simple data
# from VSAN and VsphereContainerService
#
# Usage:
#
# 1. Drop VsphereContainerService*py files to
# /lib/python2.7/site-packages/pyMo/vim/vsan or
# /lib64/python3.5/site-packages/pyMo/vim/vsan
# Do not forget /etc/init.d/vsanmgmtd restart
#
# 1a. Drop the file below (vmodl_test.py) in local folder or wherever import works from
#
# 2. In Python, run the following:
# import vmodl_test
# stub = vmodl_test.connect_to_vcs()
# vmodl_test.get_tenants(stub) # print tenant list
import os, sys
# nothing to test until we put VMODL back in the VIB
# See https://github.com/vmware/docker-volume-vsphere/pull/975 for details.
if "INSTALL_VMODL" not in os.environ:
print("Skipping VMODL test - INSTALL_VMODL is not defined")
sys.exit(0)
import ssl
sys.path.append('/lib64/python3.5/site-packages/pyMo/vim/vsan')
sys.path.append('/lib/python2.7/site-packages/pyMo/vim/vsan')
import pyVim
import pyVim.connect
import pyVim.host
import pyVmomi
import pyVmomi.VmomiSupport
from pyVmomi import vim, vmodl
from vsanPerfPyMo import VsanPerformanceManager
import random
import unittest
import log_config
import vmdk_ops
import vmdk_ops_test
import vmdk_utils
import VsphereContainerService
si = None
TENANT_NAME = "TEST_TENANT_NAME"
TENANT_DESC = "TEST_TENANT_DESCRIPTION"
NEW_TENANT_NAME = "TEST_TENANT_NAME_2"
NEW_TENANT_DESC = "TEST_TENANT_DESCRIPTION_2"
TENANT_PREFIX = "TEST_TENANT_"
LONG_TENANT_NAME = "01234567890123456789012345678901234567890123456789\
012345678901234"
LONG_TENANT_DESC = "01234567890123456789012345678901234567890123456789\
01234567890123456789012345678901234567890123456789\
01234567890123456789012345678901234567890123456789\
01234567890123456789012345678901234567890123456789\
01234567890123456789012345678901234567890123456789\
0123456"
VM_NOT_EXIST = "VM_NOT_EXIST"
DS_NOT_EXIST = "DS_NOT_EXIST"
def connect_to_vcs(host="localhost", port=443):
"""
Connect to VCS - currently utilizing VSAN mgmt service on ESX (/vsan) - and return SOAP stub
"""
si = vmdk_ops.get_si()
# pylint: disable=no-member
hostSystem = pyVim.host.GetHostSystem(si)
token = hostSystem.configManager.vsanSystem.FetchVsanSharedSecret()
version = pyVmomi.VmomiSupport.newestVersions.Get("vim")
stub = pyVmomi.SoapStubAdapter(host=host,
port=port,
version=version,
path="/vsan",
poolSize=0)
vpm = vim.cluster.VsanPerformanceManager("vsan-performance-manager", stub)
# Disable certificate check during SSL communication
disable_certificate_check()
logged_in = vpm.Login(token)
if not logged_in:
print("Failed to get sims stub for host %s" % host)
raise OSError("Failed to login to VSAN mgmt server")
return stub
def disable_certificate_check():
ssl._create_default_https_context = ssl._create_unverified_context
def get_tenants(stub):
vcs = vim.vcs.VsphereContainerService("vsphere-container-service", stub)
tenantMgr = vcs.GetTenantManager()
return tenantMgr.GetTenants()
class TestVsphereContainerService(unittest.TestCase):
"""
Unit tests for VsphereContainerServiceImpl
"""
vcs = None
tenantMgr = None
random_id = random.randint(0, 65536)
vm1_name = 'vm1_name_' + str(random_id)
vm1 = None
random_id = random.randint(0, 65536)
vm2_name = 'vm2_name_' + str(random_id)
vm2 = None
datastore = None
datastore2 = None
@classmethod
def setUpClass(cls):
stub = connect_to_vcs()
cls.vcs = vim.vcs.VsphereContainerService("vsphere-container-service", stub)
cls.tenantMgr = cls.vcs.GetTenantManager()
cls.setup_datastore()
cls.create_vms()
@classmethod
def setup_datastore(cls):
datastores = vmdk_utils.get_datastore_objects()
if datastores:
cls.datastore = datastores[0].info.name
if len(datastores) > 1:
cls.datastore2 = datastores[1].info.name
else:
cls.fail("Datastore is not available!")
@classmethod
def create_vms(cls):
si = vmdk_ops.get_si()
error, cls.vm1 = vmdk_ops_test.create_vm(si=si,
vm_name=cls.vm1_name,
datastore_name=cls.datastore)
if error:
cls.fail("Failed to create VM1!")
error, cls.vm2 = vmdk_ops_test.create_vm(si=si,
vm_name=cls.vm2_name,
datastore_name=cls.datastore)
if error:
cls.fail("Failed to create VM2!")
@classmethod
def tearDownClass(cls):
""" Cleanup after all tests """
cls.cleanup_vms()
@classmethod
def cleanup_vms(cls):
si = vmdk_ops.get_si()
vmdk_ops_test.remove_vm(si, cls.vm1)
vmdk_ops_test.remove_vm(si, cls.vm2)
def tearDown(self):
""" Cleanup after each test """
self.cleanup_tenants()
def cleanup_tenants(self):
tenants = self.tenantMgr.GetTenants()
for tenant in tenants:
if tenant.name.startswith(TENANT_PREFIX):
self.tenantMgr.RemoveTenant(tenant.name)
def test_create_tenant(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Verify the result
self.assertTrue(tenant)
self.assertEqual(tenant.name, TENANT_NAME)
self.assertEqual(tenant.description, TENANT_DESC)
def test_create_tenant_invalid_args(self):
# Create a tenant with empty name
empty_name = ""
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.CreateTenant(name=empty_name)
# Create a tenant with name longer than 64 characters
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.CreateTenant(name=LONG_TENANT_NAME)
# Create a tenant with description longer than 256 characters
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.CreateTenant(name=TENANT_NAME, description=LONG_TENANT_DESC)
def test_create_tenant_already_exists(self):
# Create a tenant
self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Create a tenant with same name
with self.assertRaises(vim.fault.AlreadyExists):
self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
def test_get_tenant(self):
# Create a tenant
self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Get the tenant
tenants = self.tenantMgr.GetTenants(name=TENANT_NAME)
# Verify the result
self.assertTrue(tenants)
self.assertEqual(tenants[0].name, TENANT_NAME)
self.assertEqual(tenants[0].description, TENANT_DESC)
def test_get_tenant_not_exists(self):
# Get the tenant
tenants = self.tenantMgr.GetTenants(name=TENANT_NAME)
# Verify the result
self.assertFalse(tenants)
def test_get_all_tenants(self):
# Create 2 tenants
self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
self.tenantMgr.CreateTenant(name=NEW_TENANT_NAME, description=NEW_TENANT_NAME)
# Get all tenants
tenants = self.tenantMgr.GetTenants()
# Verify the result
self.assertTrue(tenants)
self.assertEqual(len(tenants), 3) # plus DEFAULT tenant
def test_remove_tenant(self):
# Create a tenant
self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Remove the tenant
self.tenantMgr.RemoveTenant(name=TENANT_NAME)
# Verify the result
tenants = self.tenantMgr.GetTenants(name=TENANT_NAME)
self.assertFalse(tenants)
def test_remove_tenant_not_exists(self):
# Remove a tenant not exists
with self.assertRaises(vim.fault.NotFound):
self.tenantMgr.RemoveTenant(name=TENANT_NAME)
def test_update_tenant(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Update the tenant
self.tenantMgr.UpdateTenant(name=TENANT_NAME, new_name=NEW_TENANT_NAME, description=NEW_TENANT_DESC)
# Verify the result
tenants = self.tenantMgr.GetTenants(name=NEW_TENANT_NAME)
self.assertTrue(tenants)
self.assertEqual(tenants[0].name, NEW_TENANT_NAME)
self.assertEqual(tenants[0].description, NEW_TENANT_DESC)
def test_update_tenant_invalid_args(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Update the tenant with same name
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.UpdateTenant(name=TENANT_NAME, new_name=TENANT_NAME)
# Update a tenant with empty name
empty_name = ""
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.UpdateTenant(name=TENANT_NAME, new_name=empty_name)
# Update the tenant with new name longer than 64 characters
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.UpdateTenant(name=TENANT_NAME, new_name=LONG_TENANT_NAME)
# Create a tenant with new description longer than 256 characters
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.UpdateTenant(name=TENANT_NAME, new_name=NEW_TENANT_NAME, description=LONG_TENANT_DESC)
def test_update_tenant_not_exists(self):
# Update a tenant not exists
with self.assertRaises(vim.fault.NotFound):
self.tenantMgr.UpdateTenant(name=TENANT_NAME, new_name=NEW_TENANT_NAME)
def test_update_tenant_already_exists(self):
# Create 2 tenants
self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
self.tenantMgr.CreateTenant(name=NEW_TENANT_NAME, description=NEW_TENANT_DESC)
# Update one tenant with same name as the other tenant
with self.assertRaises(vim.fault.AlreadyExists):
self.tenantMgr.UpdateTenant(name=TENANT_NAME, new_name=NEW_TENANT_NAME)
def test_add_vms(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Add a VM to the tenant
vms = [self.vm1_name]
self.tenantMgr.AddVMs(tenant, vms)
# Verify the result
result=self.tenantMgr.GetTenants(name=TENANT_NAME)
self.assertTrue(result)
self.assertEqual(result[0].vms, vms)
def test_add_vms_tenant_not_exists(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Remove the tenant
self.tenantMgr.RemoveTenant(name=TENANT_NAME)
# Add a VM to the noon-existent tenant
vms = [self.vm1_name]
with self.assertRaises(vim.fault.NotFound):
self.tenantMgr.AddVMs(tenant, vms)
def test_add_vms_already_exists(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Add a VM to the tenant
vms = [self.vm1_name]
self.tenantMgr.AddVMs(tenant, vms)
# Add the same VM again
with self.assertRaises(vim.fault.AlreadyExists):
self.tenantMgr.AddVMs(tenant, vms)
def test_add_vms_not_exist(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Add a non-existent VM to the tenant
vms = [VM_NOT_EXIST]
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.AddVMs(tenant, vms)
def test_remove_vms(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Add 2 VMs to the tenant
vms = [self.vm1_name, self.vm2_name]
self.tenantMgr.AddVMs(tenant, vms)
# Remove the VMs from the tenant
self.tenantMgr.RemoveVMs(tenant, vms)
# Verify the result
result = self.tenantMgr.GetTenants(name=TENANT_NAME)
self.assertTrue(result)
self.assertEqual(result[0].vms, [])
def test_remove_vms_tenant_not_exists(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Remove the tenant
self.tenantMgr.RemoveTenant(name=TENANT_NAME)
# Remove a VM from the non-existent tenant
vms = [self.vm1_name]
with self.assertRaises(vim.fault.NotFound):
self.tenantMgr.RemoveVMs(tenant, vms)
def test_remove_vms_not_exist(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Remove a non-existent VM from the tenant
vms = [VM_NOT_EXIST]
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.RemoveVMs(tenant, vms)
def test_remove_vms_not_related(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Remove a VM not belonging to this tenant
vms = [self.vm1_name]
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.RemoveVMs(tenant, vms)
def test_get_vms(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Add 2 VMs to the tenant
vms = [self.vm1_name, self.vm2_name]
self.tenantMgr.AddVMs(tenant, vms)
# Verify the result
result = self.tenantMgr.GetTenants(name=TENANT_NAME)
self.assertTrue(result)
self.assertEqual(result[0].vms, vms)
def test_replace_vms(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Add VM1 to the tenant
vm1 = [self.vm1_name]
self.tenantMgr.AddVMs(tenant, vm1)
# Replace with VM2
vm2 = [self.vm2_name]
self.tenantMgr.ReplaceVMs(tenant, vm2)
# Verify the result
result = self.tenantMgr.GetTenants(name=TENANT_NAME)
self.assertTrue(result)
vms = result[0].vms
self.assertEqual(vms, vm2)
def test_replace_vms_tenant_not_exists(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Remove the tenant
self.tenantMgr.RemoveTenant(name=TENANT_NAME)
# Replace a VM for the non-existent tenant
vms = [self.vm1_name]
with self.assertRaises(vim.fault.NotFound):
self.tenantMgr.ReplaceVMs(tenant, vms)
def test_replace_vms_not_exist(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Add a VM to the tenant
vms = [self.vm1_name]
self.tenantMgr.AddVMs(tenant, vms)
# Replace a non-existent VM for the tenant
vms = [VM_NOT_EXIST]
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.ReplaceVMs(tenant, vms)
def create_privilege(self):
privilege = vim.vcs.storage.DatastoreAccessPrivilege()
privilege.datastore = self.datastore
privilege.allow_create = True
privilege.volume_max_size = 512
privilege.volume_total_size = 1024
return privilege
def create_privilege_2(self):
privilege = vim.vcs.storage.DatastoreAccessPrivilege()
privilege.datastore = self.datastore2
privilege.allow_create = False
privilege.volume_max_size = 1024
privilege.volume_total_size = 2048
return privilege
def test_add_privilege(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Create a privilege
privilege = self.create_privilege()
# Add privilege to the tenant
self.tenantMgr.AddPrivilege(tenant, privilege)
# Verify the privilege
result = self.tenantMgr.GetTenants(name=TENANT_NAME)
self.assertTrue(result)
p = result[0].privileges
self.assertTrue(p)
self.assertEqual(p[0].datastore, self.datastore)
self.assertEqual(p[0].allow_create, True)
self.assertEqual(p[0].volume_max_size, 512)
self.assertEqual(p[0].volume_total_size, 1024)
# Verify the default datastore
self.assertEqual(result[0].default_datastore, self.datastore)
def test_add_privilege_default_datastore_false(self):
if not self.datastore2:
return
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Create 2 privileges
p1 = self.create_privilege()
p2 = self.create_privilege_2()
# Add the 1st privilege to the tenant
self.tenantMgr.AddPrivilege(tenant, p1)
# Add the 2nd privilege to the tenant, with default_datastore set to false
self.tenantMgr.AddPrivilege(tenant, p2, default_datastore=False)
# Get the tenant
result = self.tenantMgr.GetTenants(name=TENANT_NAME)
self.assertTrue(result)
# Verify the default datastore
self.assertEqual(result[0].default_datastore, self.datastore)
def test_add_privilege_default_datastore_true(self):
if not self.datastore2:
return
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Create 2 privileges
p1 = self.create_privilege()
p2 = self.create_privilege_2()
# Add the 1st privilege to the tenant
self.tenantMgr.AddPrivilege(tenant, p1)
# Add the 2nd privilege to the tenant, with default_datastore set to true
self.tenantMgr.AddPrivilege(tenant, p2, default_datastore=True)
# Get the tenant
result = self.tenantMgr.GetTenants(name=TENANT_NAME)
self.assertTrue(result)
# Verify the default datastore
self.assertEqual(result[0].default_datastore, self.datastore2)
def test_add_privilege_tenant_not_exists(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Remove the tenant
self.tenantMgr.RemoveTenant(name=TENANT_NAME)
# Create a privilege
privilege = self.create_privilege()
# Add privilege to the non-existent tenant
with self.assertRaises(vim.fault.NotFound):
self.tenantMgr.AddPrivilege(tenant, privilege)
def test_add_privilege_already_exists(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Create a privilege
privilege = self.create_privilege()
# Add the privilege to the tenant
self.tenantMgr.AddPrivilege(tenant, privilege)
# Add the same privilege to the tenant again
with self.assertRaises(vim.fault.AlreadyExists):
self.tenantMgr.AddPrivilege(tenant, privilege)
def test_add_privilege_invalid_datastore(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Create a privilege with invalid datastore
privilege = vim.vcs.storage.DatastoreAccessPrivilege()
privilege.datastore = DS_NOT_EXIST
privilege.allow_create = False
privilege.volume_max_size = 1024
privilege.volume_total_size = 2048
# Add the privilege to the tenant
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.AddPrivilege(tenant, privilege)
def test_add_privilege_invalid_volume_size(self):
""" Test add privilege with volume_total_size lesser than existing volume_max_size """
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Create a privilege with invalid volume size settings
privilege = vim.vcs.storage.DatastoreAccessPrivilege()
privilege.datastore = self.datastore
privilege.allow_create = False
privilege.volume_max_size = 2048
privilege.volume_total_size = 1024
# Add the privilege to the tenant
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.AddPrivilege(tenant, privilege)
def test_remove_privilege(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Create a privilege
privilege = self.create_privilege()
# Add privilege to the tenant
self.tenantMgr.AddPrivilege(tenant, privilege)
# Remove privilege from the tenant
self.tenantMgr.RemovePrivilege(tenant, self.datastore)
# Verify the privilege
result = self.tenantMgr.GetTenants(name=TENANT_NAME)
self.assertTrue(result)
self.assertFalse(result[0].privileges)
# Verify the default datastore
self.assertFalse(result[0].default_datastore)
def test_remove_privilege_tenant_not_exists(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Create a privilege
privilege = self.create_privilege()
# Add privilege to the tenant
self.tenantMgr.AddPrivilege(tenant, privilege)
# Remove the tenant
self.tenantMgr.RemoveTenant(name=TENANT_NAME)
# Remove privilege from the non-existent tenant
with self.assertRaises(vim.fault.NotFound):
self.tenantMgr.RemovePrivilege(tenant, privilege.datastore)
def test_remove_privilege_invalid_arg_1(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Remove a privilege with non-existent datastore from the tenant
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.RemovePrivilege(tenant, DS_NOT_EXIST)
def test_remove_privilege_invalid_arg_2(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Remove a privilege not associated with this tenant
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.RemovePrivilege(tenant, self.datastore)
def test_update_privilege(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Create a privilege
privilege = self.create_privilege()
# Add privilege to the tenant
self.tenantMgr.AddPrivilege(tenant, privilege)
# Update the privilege
self.tenantMgr.UpdatePrivilege(tenant, self.datastore, allow_create=False, volume_max_size=1024, volume_total_size=2048)
# Verify the privilege
result = self.tenantMgr.GetTenants(name=TENANT_NAME)
self.assertTrue(result)
p = result[0].privileges
self.assertTrue(p)
self.assertEqual(p[0].datastore, self.datastore)
self.assertEqual(p[0].allow_create, False)
self.assertEqual(p[0].volume_max_size, 1024)
self.assertEqual(p[0].volume_total_size, 2048)
def test_update_privilege_with_invalid_volume_size(self):
""" Test privilege update with volume_max_size greater than volume_total_size """
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Create a privilege without volume size settings
privilege = vim.vcs.storage.DatastoreAccessPrivilege()
privilege.datastore = self.datastore
privilege.allow_create = True
# Add privilege to the tenant
self.tenantMgr.AddPrivilege(tenant, privilege)
# Update the privilege with invalid volume size
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.UpdatePrivilege(tenant, self.datastore, volume_max_size=2048, volume_total_size=1024)
def test_update_privilege_with_invalid_total_size(self):
""" Test privilege update with volume_total_size lesser than existing volume_max_size """
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Create a privilege without volume size settings
privilege = vim.vcs.storage.DatastoreAccessPrivilege()
privilege.datastore = self.datastore
privilege.allow_create = True
privilege.volume_max_size = 2048
# Add privilege to the tenant
self.tenantMgr.AddPrivilege(tenant, privilege)
# Update the privilege with invalid volume size
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.UpdatePrivilege(tenant, self.datastore, volume_total_size=1024)
def test_update_privilege_with_invalid_max_size(self):
""" Test privilege update with volume_max_size greater than existing volume_total_size """
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Create a privilege without volume size settings
privilege = vim.vcs.storage.DatastoreAccessPrivilege()
privilege.datastore = self.datastore
privilege.allow_create = True
privilege.volume_total_size = 1024
# Add privilege to the tenant
self.tenantMgr.AddPrivilege(tenant, privilege)
# Update the privilege with invalid volume size
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.UpdatePrivilege(tenant, self.datastore, volume_max_size=2048)
def test_update_privilege_tenant_not_exists(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Create a privilege
privilege = self.create_privilege()
# Add privilege to the tenant
self.tenantMgr.AddPrivilege(tenant, privilege)
# Remove the tenant
self.tenantMgr.RemoveTenant(name=TENANT_NAME)
# Update the privilege
with self.assertRaises(vim.fault.NotFound):
self.tenantMgr.UpdatePrivilege(tenant, self.datastore, allow_create=False, volume_max_size=1024, volume_total_size=2048)
def test_update_privilege_datastore_not_exists(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Update the privilege with non-existent datastore
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.UpdatePrivilege(tenant, DS_NOT_EXIST, allow_create=False, volume_max_size=1024, volume_total_size=2048)
def test_update_privilege_datastore_not_related(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Update the privilege with a datastore not associated with this tenant
with self.assertRaises(vmodl.fault.InvalidArgument):
self.tenantMgr.UpdatePrivilege(tenant, self.datastore, allow_create=False, volume_max_size=1024, volume_total_size=2048)
def test_get_privilege(self):
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Create a privilege
p1 = self.create_privilege()
# Add privileges to the tenant
self.tenantMgr.AddPrivilege(tenant, p1)
# Get the tenant
result = self.tenantMgr.GetTenants(name=TENANT_NAME)
self.assertTrue(result)
# Verify the privilege
privileges = result[0].privileges
self.assertTrue(privileges)
self.assertEqual(len(privileges), 1)
privilege = privileges[0]
self.assertTrue(privilege)
self.assertEqual(privilege.allow_create, True)
self.assertEqual(privilege.volume_max_size, 512)
self.assertEqual(privilege.volume_total_size, 1024)
def test_get_privileges(self):
if not self.datastore2:
return
# Create a tenant
tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)
# Create 2 privileges
p1 = self.create_privilege()
p2 = self.create_privilege_2()
# Add privileges to the tenant
self.tenantMgr.AddPrivilege(tenant, p1)
self.tenantMgr.AddPrivilege(tenant, p2)
# Get the tenant
result = self.tenantMgr.GetTenants(name=TENANT_NAME)
self.assertTrue(result)
# Verify the privileges
privileges = result[0].privileges
self.assertTrue(privileges)
self.assertEqual(len(privileges), 2)
privilege1 = None
privilege2 = None
for privilege in privileges:
if privilege.datastore == self.datastore:
privilege1 = privilege
elif privilege.datastore == self.datastore2:
privilege2 = privilege
self.assertTrue(privilege1)
self.assertTrue(privilege2)
self.assertEqual(privilege1.allow_create, True)
self.assertEqual(privilege1.volume_max_size, 512)
self.assertEqual(privilege1.volume_total_size, 1024)
self.assertEqual(privilege2.allow_create, False)
self.assertEqual(privilege2.volume_max_size, 1024)
self.assertEqual(privilege2.volume_total_size, 2048)
if __name__ == "__main__":
log_config.configure()
unittest.main()
| |
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Firebase token minting and validation sub module."""
import datetime
import time
import cachecontrol
import requests
from google.auth import credentials
from google.auth import iam
from google.auth import jwt
from google.auth import transport
import google.auth.exceptions
import google.oauth2.id_token
import google.oauth2.service_account
from firebase_admin import exceptions
from firebase_admin import _auth_utils
from firebase_admin import _http_client
# ID token constants
ID_TOKEN_ISSUER_PREFIX = 'https://securetoken.google.com/'
ID_TOKEN_CERT_URI = ('https://www.googleapis.com/robot/v1/metadata/x509/'
'securetoken@system.gserviceaccount.com')
# Session cookie constants
COOKIE_ISSUER_PREFIX = 'https://session.firebase.google.com/'
COOKIE_CERT_URI = 'https://www.googleapis.com/identitytoolkit/v3/relyingparty/publicKeys'
MIN_SESSION_COOKIE_DURATION_SECONDS = int(datetime.timedelta(minutes=5).total_seconds())
MAX_SESSION_COOKIE_DURATION_SECONDS = int(datetime.timedelta(days=14).total_seconds())
# Custom token constants
MAX_TOKEN_LIFETIME_SECONDS = int(datetime.timedelta(hours=1).total_seconds())
FIREBASE_AUDIENCE = ('https://identitytoolkit.googleapis.com/google.'
'identity.identitytoolkit.v1.IdentityToolkit')
RESERVED_CLAIMS = set([
'acr', 'amr', 'at_hash', 'aud', 'auth_time', 'azp', 'cnf', 'c_hash',
'exp', 'firebase', 'iat', 'iss', 'jti', 'nbf', 'nonce', 'sub'
])
METADATA_SERVICE_URL = ('http://metadata.google.internal/computeMetadata/v1/instance/'
'service-accounts/default/email')
ALGORITHM_RS256 = 'RS256'
ALGORITHM_NONE = 'none'
# Emulator fake account
AUTH_EMULATOR_EMAIL = 'firebase-auth-emulator@example.com'
class _EmulatedSigner(google.auth.crypt.Signer):
key_id = None
def __init__(self):
pass
def sign(self, message):
return b''
class _SigningProvider:
"""Stores a reference to a google.auth.crypto.Signer."""
def __init__(self, signer, signer_email, alg=ALGORITHM_RS256):
self._signer = signer
self._signer_email = signer_email
self._alg = alg
@property
def signer(self):
return self._signer
@property
def signer_email(self):
return self._signer_email
@property
def alg(self):
return self._alg
@classmethod
def from_credential(cls, google_cred):
return _SigningProvider(google_cred.signer, google_cred.signer_email)
@classmethod
def from_iam(cls, request, google_cred, service_account):
signer = iam.Signer(request, google_cred, service_account)
return _SigningProvider(signer, service_account)
@classmethod
def for_emulator(cls):
return _SigningProvider(_EmulatedSigner(), AUTH_EMULATOR_EMAIL, ALGORITHM_NONE)
class TokenGenerator:
"""Generates custom tokens and session cookies."""
ID_TOOLKIT_URL = 'https://identitytoolkit.googleapis.com/v1'
def __init__(self, app, http_client, url_override=None):
self.app = app
self.http_client = http_client
self.request = transport.requests.Request()
url_prefix = url_override or self.ID_TOOLKIT_URL
self.base_url = '{0}/projects/{1}'.format(url_prefix, app.project_id)
self._signing_provider = None
def _init_signing_provider(self):
"""Initializes a signing provider by following the go/firebase-admin-sign protocol."""
if _auth_utils.is_emulated():
return _SigningProvider.for_emulator()
# If the SDK was initialized with a service account, use it to sign bytes.
google_cred = self.app.credential.get_credential()
if isinstance(google_cred, google.oauth2.service_account.Credentials):
return _SigningProvider.from_credential(google_cred)
# If the SDK was initialized with a service account email, use it with the IAM service
# to sign bytes.
service_account = self.app.options.get('serviceAccountId')
if service_account:
return _SigningProvider.from_iam(self.request, google_cred, service_account)
# If the SDK was initialized with some other credential type that supports signing
# (e.g. GAE credentials), use it to sign bytes.
if isinstance(google_cred, credentials.Signing):
return _SigningProvider.from_credential(google_cred)
# Attempt to discover a service account email from the local Metadata service. Use it
# with the IAM service to sign bytes.
resp = self.request(url=METADATA_SERVICE_URL, headers={'Metadata-Flavor': 'Google'})
if resp.status != 200:
raise ValueError(
'Failed to contact the local metadata service: {0}.'.format(resp.data.decode()))
service_account = resp.data.decode()
return _SigningProvider.from_iam(self.request, google_cred, service_account)
@property
def signing_provider(self):
"""Initializes and returns the SigningProvider instance to be used."""
if not self._signing_provider:
try:
self._signing_provider = self._init_signing_provider()
except Exception as error:
url = 'https://firebase.google.com/docs/auth/admin/create-custom-tokens'
raise ValueError(
'Failed to determine service account: {0}. Make sure to initialize the SDK '
'with service account credentials or specify a service account ID with '
'iam.serviceAccounts.signBlob permission. Please refer to {1} for more '
'details on creating custom tokens.'.format(error, url))
return self._signing_provider
def create_custom_token(self, uid, developer_claims=None, tenant_id=None):
"""Builds and signs a Firebase custom auth token."""
if developer_claims is not None:
if not isinstance(developer_claims, dict):
raise ValueError('developer_claims must be a dictionary')
disallowed_keys = set(developer_claims.keys()) & RESERVED_CLAIMS
if disallowed_keys:
if len(disallowed_keys) > 1:
error_message = ('Developer claims {0} are reserved and '
'cannot be specified.'.format(
', '.join(disallowed_keys)))
else:
error_message = ('Developer claim {0} is reserved and '
'cannot be specified.'.format(
', '.join(disallowed_keys)))
raise ValueError(error_message)
if not uid or not isinstance(uid, str) or len(uid) > 128:
raise ValueError('uid must be a string between 1 and 128 characters.')
signing_provider = self.signing_provider
now = int(time.time())
payload = {
'iss': signing_provider.signer_email,
'sub': signing_provider.signer_email,
'aud': FIREBASE_AUDIENCE,
'uid': uid,
'iat': now,
'exp': now + MAX_TOKEN_LIFETIME_SECONDS,
}
if tenant_id:
payload['tenant_id'] = tenant_id
if developer_claims is not None:
payload['claims'] = developer_claims
header = {'alg': signing_provider.alg}
try:
return jwt.encode(signing_provider.signer, payload, header=header)
except google.auth.exceptions.TransportError as error:
msg = 'Failed to sign custom token. {0}'.format(error)
raise TokenSignError(msg, error)
def create_session_cookie(self, id_token, expires_in):
"""Creates a session cookie from the provided ID token."""
id_token = id_token.decode('utf-8') if isinstance(id_token, bytes) else id_token
if not isinstance(id_token, str) or not id_token:
raise ValueError(
'Illegal ID token provided: {0}. ID token must be a non-empty '
'string.'.format(id_token))
if isinstance(expires_in, datetime.timedelta):
expires_in = int(expires_in.total_seconds())
if isinstance(expires_in, bool) or not isinstance(expires_in, int):
raise ValueError('Illegal expiry duration: {0}.'.format(expires_in))
if expires_in < MIN_SESSION_COOKIE_DURATION_SECONDS:
raise ValueError('Illegal expiry duration: {0}. Duration must be at least {1} '
'seconds.'.format(expires_in, MIN_SESSION_COOKIE_DURATION_SECONDS))
if expires_in > MAX_SESSION_COOKIE_DURATION_SECONDS:
raise ValueError('Illegal expiry duration: {0}. Duration must be at most {1} '
'seconds.'.format(expires_in, MAX_SESSION_COOKIE_DURATION_SECONDS))
url = '{0}:createSessionCookie'.format(self.base_url)
payload = {
'idToken': id_token,
'validDuration': expires_in,
}
try:
body, http_resp = self.http_client.body_and_response('post', url, json=payload)
except requests.exceptions.RequestException as error:
raise _auth_utils.handle_auth_backend_error(error)
else:
if not body or not body.get('sessionCookie'):
raise _auth_utils.UnexpectedResponseError(
'Failed to create session cookie.', http_response=http_resp)
return body.get('sessionCookie')
class CertificateFetchRequest(transport.Request):
"""A google-auth transport that supports HTTP cache-control.
Also injects a timeout to each outgoing HTTP request.
"""
def __init__(self, timeout_seconds=None):
self._session = cachecontrol.CacheControl(requests.Session())
self._delegate = transport.requests.Request(self.session)
self._timeout_seconds = timeout_seconds
@property
def session(self):
return self._session
@property
def timeout_seconds(self):
return self._timeout_seconds
def __call__(self, url, method='GET', body=None, headers=None, timeout=None, **kwargs):
timeout = timeout or self.timeout_seconds
return self._delegate(
url, method=method, body=body, headers=headers, timeout=timeout, **kwargs)
class TokenVerifier:
"""Verifies ID tokens and session cookies."""
def __init__(self, app):
timeout = app.options.get('httpTimeout', _http_client.DEFAULT_TIMEOUT_SECONDS)
self.request = CertificateFetchRequest(timeout)
self.id_token_verifier = _JWTVerifier(
project_id=app.project_id, short_name='ID token',
operation='verify_id_token()',
doc_url='https://firebase.google.com/docs/auth/admin/verify-id-tokens',
cert_url=ID_TOKEN_CERT_URI,
issuer=ID_TOKEN_ISSUER_PREFIX,
invalid_token_error=_auth_utils.InvalidIdTokenError,
expired_token_error=ExpiredIdTokenError)
self.cookie_verifier = _JWTVerifier(
project_id=app.project_id, short_name='session cookie',
operation='verify_session_cookie()',
doc_url='https://firebase.google.com/docs/auth/admin/verify-id-tokens',
cert_url=COOKIE_CERT_URI,
issuer=COOKIE_ISSUER_PREFIX,
invalid_token_error=InvalidSessionCookieError,
expired_token_error=ExpiredSessionCookieError)
def verify_id_token(self, id_token):
return self.id_token_verifier.verify(id_token, self.request)
def verify_session_cookie(self, cookie):
return self.cookie_verifier.verify(cookie, self.request)
class _JWTVerifier:
"""Verifies Firebase JWTs (ID tokens or session cookies)."""
def __init__(self, **kwargs):
self.project_id = kwargs.pop('project_id')
self.short_name = kwargs.pop('short_name')
self.operation = kwargs.pop('operation')
self.url = kwargs.pop('doc_url')
self.cert_url = kwargs.pop('cert_url')
self.issuer = kwargs.pop('issuer')
if self.short_name[0].lower() in 'aeiou':
self.articled_short_name = 'an {0}'.format(self.short_name)
else:
self.articled_short_name = 'a {0}'.format(self.short_name)
self._invalid_token_error = kwargs.pop('invalid_token_error')
self._expired_token_error = kwargs.pop('expired_token_error')
def verify(self, token, request):
"""Verifies the signature and data for the provided JWT."""
token = token.encode('utf-8') if isinstance(token, str) else token
if not isinstance(token, bytes) or not token:
raise ValueError(
'Illegal {0} provided: {1}. {0} must be a non-empty '
'string.'.format(self.short_name, token))
if not self.project_id:
raise ValueError(
'Failed to ascertain project ID from the credential or the environment. Project '
'ID is required to call {0}. Initialize the app with a credentials.Certificate '
'or set your Firebase project ID as an app option. Alternatively set the '
'GOOGLE_CLOUD_PROJECT environment variable.'.format(self.operation))
header, payload = self._decode_unverified(token)
issuer = payload.get('iss')
audience = payload.get('aud')
subject = payload.get('sub')
expected_issuer = self.issuer + self.project_id
project_id_match_msg = (
'Make sure the {0} comes from the same Firebase project as the service account used '
'to authenticate this SDK.'.format(self.short_name))
verify_id_token_msg = (
'See {0} for details on how to retrieve {1}.'.format(self.url, self.short_name))
emulated = _auth_utils.is_emulated()
error_message = None
if audience == FIREBASE_AUDIENCE:
error_message = (
'{0} expects {1}, but was given a custom '
'token.'.format(self.operation, self.articled_short_name))
elif not emulated and not header.get('kid'):
if header.get('alg') == 'HS256' and payload.get(
'v') == 0 and 'uid' in payload.get('d', {}):
error_message = (
'{0} expects {1}, but was given a legacy custom '
'token.'.format(self.operation, self.articled_short_name))
else:
error_message = 'Firebase {0} has no "kid" claim.'.format(self.short_name)
elif not emulated and header.get('alg') != 'RS256':
error_message = (
'Firebase {0} has incorrect algorithm. Expected "RS256" but got '
'"{1}". {2}'.format(self.short_name, header.get('alg'), verify_id_token_msg))
elif audience != self.project_id:
error_message = (
'Firebase {0} has incorrect "aud" (audience) claim. Expected "{1}" but '
'got "{2}". {3} {4}'.format(self.short_name, self.project_id, audience,
project_id_match_msg, verify_id_token_msg))
elif issuer != expected_issuer:
error_message = (
'Firebase {0} has incorrect "iss" (issuer) claim. Expected "{1}" but '
'got "{2}". {3} {4}'.format(self.short_name, expected_issuer, issuer,
project_id_match_msg, verify_id_token_msg))
elif subject is None or not isinstance(subject, str):
error_message = (
'Firebase {0} has no "sub" (subject) claim. '
'{1}'.format(self.short_name, verify_id_token_msg))
elif not subject:
error_message = (
'Firebase {0} has an empty string "sub" (subject) claim. '
'{1}'.format(self.short_name, verify_id_token_msg))
elif len(subject) > 128:
error_message = (
'Firebase {0} has a "sub" (subject) claim longer than 128 characters. '
'{1}'.format(self.short_name, verify_id_token_msg))
if error_message:
raise self._invalid_token_error(error_message)
try:
if emulated:
verified_claims = payload
else:
verified_claims = google.oauth2.id_token.verify_token(
token,
request=request,
audience=self.project_id,
certs_url=self.cert_url)
verified_claims['uid'] = verified_claims['sub']
return verified_claims
except google.auth.exceptions.TransportError as error:
raise CertificateFetchError(str(error), cause=error)
except ValueError as error:
if 'Token expired' in str(error):
raise self._expired_token_error(str(error), cause=error)
raise self._invalid_token_error(str(error), cause=error)
def _decode_unverified(self, token):
try:
header = jwt.decode_header(token)
payload = jwt.decode(token, verify=False)
return header, payload
except ValueError as error:
raise self._invalid_token_error(str(error), cause=error)
class TokenSignError(exceptions.UnknownError):
"""Unexpected error while signing a Firebase custom token."""
def __init__(self, message, cause):
exceptions.UnknownError.__init__(self, message, cause)
class CertificateFetchError(exceptions.UnknownError):
"""Failed to fetch some public key certificates required to verify a token."""
def __init__(self, message, cause):
exceptions.UnknownError.__init__(self, message, cause)
class ExpiredIdTokenError(_auth_utils.InvalidIdTokenError):
"""The provided ID token is expired."""
def __init__(self, message, cause):
_auth_utils.InvalidIdTokenError.__init__(self, message, cause)
class RevokedIdTokenError(_auth_utils.InvalidIdTokenError):
"""The provided ID token has been revoked."""
def __init__(self, message):
_auth_utils.InvalidIdTokenError.__init__(self, message)
class InvalidSessionCookieError(exceptions.InvalidArgumentError):
"""The provided string is not a valid Firebase session cookie."""
def __init__(self, message, cause=None):
exceptions.InvalidArgumentError.__init__(self, message, cause)
class ExpiredSessionCookieError(InvalidSessionCookieError):
"""The provided session cookie is expired."""
def __init__(self, message, cause):
InvalidSessionCookieError.__init__(self, message, cause)
class RevokedSessionCookieError(InvalidSessionCookieError):
"""The provided session cookie has been revoked."""
def __init__(self, message):
InvalidSessionCookieError.__init__(self, message)
| |
import builtins
import getpass
import sys
from datetime import date
from io import StringIO
from unittest import mock
from django.apps import apps
from django.contrib.auth import management
from django.contrib.auth.management import (
create_permissions, get_default_username,
)
from django.contrib.auth.management.commands import (
changepassword, createsuperuser,
)
from django.contrib.auth.models import Group, Permission, User
from django.contrib.contenttypes.models import ContentType
from django.core.management import call_command
from django.core.management.base import CommandError
from django.db import migrations
from django.test import TestCase, override_settings
from django.utils.translation import gettext_lazy as _
from .models import (
CustomUser, CustomUserNonUniqueUsername, CustomUserWithFK, Email,
)
def mock_inputs(inputs):
"""
Decorator to temporarily replace input/getpass to allow interactive
createsuperuser.
"""
def inner(test_func):
def wrapped(*args):
class mock_getpass:
@staticmethod
def getpass(prompt=b'Password: ', stream=None):
if callable(inputs['password']):
return inputs['password']()
return inputs['password']
def mock_input(prompt):
assert '__proxy__' not in prompt
response = ''
for key, val in inputs.items():
if key in prompt.lower():
if callable(val):
response = val()
else:
response = val
break
return response
old_getpass = createsuperuser.getpass
old_input = builtins.input
createsuperuser.getpass = mock_getpass
builtins.input = mock_input
try:
test_func(*args)
finally:
createsuperuser.getpass = old_getpass
builtins.input = old_input
return wrapped
return inner
class MockTTY:
"""
A fake stdin object that pretends to be a TTY to be used in conjunction
with mock_inputs.
"""
def isatty(self):
return True
class GetDefaultUsernameTestCase(TestCase):
def setUp(self):
self.old_get_system_username = management.get_system_username
def tearDown(self):
management.get_system_username = self.old_get_system_username
def test_actual_implementation(self):
self.assertIsInstance(management.get_system_username(), str)
def test_simple(self):
management.get_system_username = lambda: 'joe'
self.assertEqual(management.get_default_username(), 'joe')
def test_existing(self):
User.objects.create(username='joe')
management.get_system_username = lambda: 'joe'
self.assertEqual(management.get_default_username(), '')
self.assertEqual(
management.get_default_username(check_db=False), 'joe')
def test_i18n(self):
# 'Julia' with accented 'u':
management.get_system_username = lambda: 'J\xfalia'
self.assertEqual(management.get_default_username(), 'julia')
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'},
])
class ChangepasswordManagementCommandTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='joe', password='qwerty')
self.stdout = StringIO()
self.stderr = StringIO()
def tearDown(self):
self.stdout.close()
self.stderr.close()
@mock.patch.object(getpass, 'getpass', return_value='password')
def test_get_pass(self, mock_get_pass):
call_command('changepassword', username='joe', stdout=self.stdout)
self.assertIs(User.objects.get(username='joe').check_password('password'), True)
@mock.patch.object(getpass, 'getpass', return_value='')
def test_get_pass_no_input(self, mock_get_pass):
with self.assertRaisesMessage(CommandError, 'aborted'):
call_command('changepassword', username='joe', stdout=self.stdout)
@mock.patch.object(changepassword.Command, '_get_pass', return_value='new_password')
def test_system_username(self, mock_get_pass):
"""The system username is used if --username isn't provided."""
username = getpass.getuser()
User.objects.create_user(username=username, password='qwerty')
call_command('changepassword', stdout=self.stdout)
self.assertIs(User.objects.get(username=username).check_password('new_password'), True)
def test_nonexistent_username(self):
with self.assertRaisesMessage(CommandError, "user 'test' does not exist"):
call_command('changepassword', username='test', stdout=self.stdout)
@mock.patch.object(changepassword.Command, '_get_pass', return_value='not qwerty')
def test_that_changepassword_command_changes_joes_password(self, mock_get_pass):
"Executing the changepassword management command should change joe's password"
self.assertTrue(self.user.check_password('qwerty'))
call_command('changepassword', username='joe', stdout=self.stdout)
command_output = self.stdout.getvalue().strip()
self.assertEqual(
command_output,
"Changing password for user 'joe'\nPassword changed successfully for user 'joe'"
)
self.assertTrue(User.objects.get(username="joe").check_password("not qwerty"))
@mock.patch.object(changepassword.Command, '_get_pass', side_effect=lambda *args: str(args))
def test_that_max_tries_exits_1(self, mock_get_pass):
"""
A CommandError should be thrown by handle() if the user enters in
mismatched passwords three times.
"""
msg = "Aborting password change for user 'joe' after 3 attempts"
with self.assertRaisesMessage(CommandError, msg):
call_command('changepassword', username='joe', stdout=self.stdout, stderr=self.stderr)
@mock.patch.object(changepassword.Command, '_get_pass', return_value='1234567890')
def test_password_validation(self, mock_get_pass):
"""
A CommandError should be raised if the user enters in passwords which
fail validation three times.
"""
abort_msg = "Aborting password change for user 'joe' after 3 attempts"
with self.assertRaisesMessage(CommandError, abort_msg):
call_command('changepassword', username='joe', stdout=self.stdout, stderr=self.stderr)
self.assertIn('This password is entirely numeric.', self.stderr.getvalue())
@mock.patch.object(changepassword.Command, '_get_pass', return_value='not qwerty')
def test_that_changepassword_command_works_with_nonascii_output(self, mock_get_pass):
"""
#21627 -- Executing the changepassword management command should allow
non-ASCII characters from the User object representation.
"""
# 'Julia' with accented 'u':
User.objects.create_user(username='J\xfalia', password='qwerty')
call_command('changepassword', username='J\xfalia', stdout=self.stdout)
class MultiDBChangepasswordManagementCommandTestCase(TestCase):
multi_db = True
@mock.patch.object(changepassword.Command, '_get_pass', return_value='not qwerty')
def test_that_changepassword_command_with_database_option_uses_given_db(self, mock_get_pass):
"""
changepassword --database should operate on the specified DB.
"""
user = User.objects.db_manager('other').create_user(username='joe', password='qwerty')
self.assertTrue(user.check_password('qwerty'))
out = StringIO()
call_command('changepassword', username='joe', database='other', stdout=out)
command_output = out.getvalue().strip()
self.assertEqual(
command_output,
"Changing password for user 'joe'\nPassword changed successfully for user 'joe'"
)
self.assertTrue(User.objects.using('other').get(username="joe").check_password('not qwerty'))
@override_settings(
SILENCED_SYSTEM_CHECKS=['fields.W342'], # ForeignKey(unique=True)
AUTH_PASSWORD_VALIDATORS=[{'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'}],
)
class CreatesuperuserManagementCommandTestCase(TestCase):
def test_no_email_argument(self):
new_io = StringIO()
with self.assertRaisesMessage(CommandError, 'You must use --email with --noinput.'):
call_command('createsuperuser', interactive=False, username='joe', stdout=new_io)
def test_basic_usage(self):
"Check the operation of the createsuperuser management command"
# We can use the management command to create a superuser
new_io = StringIO()
call_command(
"createsuperuser",
interactive=False,
username="joe",
email="joe@somewhere.org",
stdout=new_io
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = User.objects.get(username="joe")
self.assertEqual(u.email, 'joe@somewhere.org')
# created password should be unusable
self.assertFalse(u.has_usable_password())
@mock_inputs({
'password': "nopasswd",
'u\u017eivatel': 'foo', # username (cz)
'email': 'nolocale@somewhere.org'})
def test_non_ascii_verbose_name(self):
username_field = User._meta.get_field('username')
old_verbose_name = username_field.verbose_name
username_field.verbose_name = _('u\u017eivatel')
new_io = StringIO()
try:
call_command(
"createsuperuser",
interactive=True,
stdout=new_io,
stdin=MockTTY(),
)
finally:
username_field.verbose_name = old_verbose_name
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
def test_verbosity_zero(self):
# We can suppress output on the management command
new_io = StringIO()
call_command(
"createsuperuser",
interactive=False,
username="joe2",
email="joe2@somewhere.org",
verbosity=0,
stdout=new_io
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, '')
u = User.objects.get(username="joe2")
self.assertEqual(u.email, 'joe2@somewhere.org')
self.assertFalse(u.has_usable_password())
def test_email_in_username(self):
new_io = StringIO()
call_command(
"createsuperuser",
interactive=False,
username="joe+admin@somewhere.org",
email="joe@somewhere.org",
stdout=new_io
)
u = User._default_manager.get(username="joe+admin@somewhere.org")
self.assertEqual(u.email, 'joe@somewhere.org')
self.assertFalse(u.has_usable_password())
@override_settings(AUTH_USER_MODEL='auth_tests.CustomUser')
def test_swappable_user(self):
"A superuser can be created when a custom user model is in use"
# We can use the management command to create a superuser
# We skip validation because the temporary substitution of the
# swappable User model messes with validation.
new_io = StringIO()
call_command(
"createsuperuser",
interactive=False,
email="joe@somewhere.org",
date_of_birth="1976-04-01",
stdout=new_io,
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = CustomUser._default_manager.get(email="joe@somewhere.org")
self.assertEqual(u.date_of_birth, date(1976, 4, 1))
# created password should be unusable
self.assertFalse(u.has_usable_password())
@override_settings(AUTH_USER_MODEL='auth_tests.CustomUser')
def test_swappable_user_missing_required_field(self):
"A Custom superuser won't be created when a required field isn't provided"
# We can use the management command to create a superuser
# We skip validation because the temporary substitution of the
# swappable User model messes with validation.
new_io = StringIO()
with self.assertRaisesMessage(CommandError, 'You must use --email with --noinput.'):
call_command(
"createsuperuser",
interactive=False,
stdout=new_io,
stderr=new_io,
)
self.assertEqual(CustomUser._default_manager.count(), 0)
@override_settings(
AUTH_USER_MODEL='auth_tests.CustomUserNonUniqueUsername',
AUTHENTICATION_BACKENDS=['my.custom.backend'],
)
def test_swappable_user_username_non_unique(self):
@mock_inputs({
'username': 'joe',
'password': 'nopasswd',
})
def createsuperuser():
new_io = StringIO()
call_command(
"createsuperuser",
interactive=True,
email="joe@somewhere.org",
stdout=new_io,
stdin=MockTTY(),
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
for i in range(2):
createsuperuser()
users = CustomUserNonUniqueUsername.objects.filter(username="joe")
self.assertEqual(users.count(), 2)
def test_skip_if_not_in_TTY(self):
"""
If the command is not called from a TTY, it should be skipped and a
message should be displayed (#7423).
"""
class FakeStdin:
"""A fake stdin object that has isatty() return False."""
def isatty(self):
return False
out = StringIO()
call_command(
"createsuperuser",
stdin=FakeStdin(),
stdout=out,
interactive=True,
)
self.assertEqual(User._default_manager.count(), 0)
self.assertIn("Superuser creation skipped", out.getvalue())
def test_passing_stdin(self):
"""
You can pass a stdin object as an option and it should be
available on self.stdin.
If no such option is passed, it defaults to sys.stdin.
"""
sentinel = object()
command = createsuperuser.Command()
call_command(
command,
stdin=sentinel,
stdout=StringIO(),
stderr=StringIO(),
interactive=False,
verbosity=0,
username='janet',
email='janet@example.com',
)
self.assertIs(command.stdin, sentinel)
command = createsuperuser.Command()
call_command(
command,
stdout=StringIO(),
stderr=StringIO(),
interactive=False,
verbosity=0,
username='joe',
email='joe@example.com',
)
self.assertIs(command.stdin, sys.stdin)
@override_settings(AUTH_USER_MODEL='auth_tests.CustomUserWithFK')
def test_fields_with_fk(self):
new_io = StringIO()
group = Group.objects.create(name='mygroup')
email = Email.objects.create(email='mymail@gmail.com')
call_command(
'createsuperuser',
interactive=False,
username=email.pk,
email=email.email,
group=group.pk,
stdout=new_io,
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = CustomUserWithFK._default_manager.get(email=email)
self.assertEqual(u.username, email)
self.assertEqual(u.group, group)
non_existent_email = 'mymail2@gmail.com'
msg = 'email instance with email %r does not exist.' % non_existent_email
with self.assertRaisesMessage(CommandError, msg):
call_command(
'createsuperuser',
interactive=False,
username=email.pk,
email=non_existent_email,
stdout=new_io,
)
@override_settings(AUTH_USER_MODEL='auth_tests.CustomUserWithFK')
def test_fields_with_fk_interactive(self):
new_io = StringIO()
group = Group.objects.create(name='mygroup')
email = Email.objects.create(email='mymail@gmail.com')
@mock_inputs({
'password': 'nopasswd',
'username (email.id)': email.pk,
'email (email.email)': email.email,
'group (group.id)': group.pk,
})
def test(self):
call_command(
'createsuperuser',
interactive=True,
stdout=new_io,
stdin=MockTTY(),
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = CustomUserWithFK._default_manager.get(email=email)
self.assertEqual(u.username, email)
self.assertEqual(u.group, group)
test(self)
def test_default_username(self):
"""createsuperuser uses a default username when one isn't provided."""
# Get the default username before creating a user.
default_username = get_default_username()
new_io = StringIO()
entered_passwords = ['password', 'password']
def return_passwords():
return entered_passwords.pop(0)
@mock_inputs({'password': return_passwords, 'username': ''})
def test(self):
call_command(
'createsuperuser',
interactive=True,
stdin=MockTTY(),
stdout=new_io,
stderr=new_io,
)
self.assertEqual(new_io.getvalue().strip(), 'Superuser created successfully.')
self.assertTrue(User.objects.filter(username=default_username).exists())
test(self)
def test_password_validation(self):
"""
Creation should fail if the password fails validation.
"""
new_io = StringIO()
# Returns '1234567890' the first two times it is called, then
# 'password' subsequently.
def bad_then_good_password(index=[0]):
index[0] += 1
if index[0] <= 2:
return '1234567890'
return 'password'
@mock_inputs({
'password': bad_then_good_password,
'username': 'joe1234567890',
})
def test(self):
call_command(
"createsuperuser",
interactive=True,
stdin=MockTTY(),
stdout=new_io,
stderr=new_io,
)
self.assertEqual(
new_io.getvalue().strip(),
"This password is entirely numeric.\n"
"Superuser created successfully."
)
test(self)
def test_blank_username(self):
"""Creation fails if --username is blank."""
new_io = StringIO()
def test(self):
with self.assertRaisesMessage(CommandError, 'Username cannot be blank.'):
call_command(
'createsuperuser',
username='',
stdin=MockTTY(),
stdout=new_io,
stderr=new_io,
)
test(self)
def test_invalid_username(self):
"""Creation fails if the username fails validation."""
user_field = User._meta.get_field(User.USERNAME_FIELD)
new_io = StringIO()
entered_passwords = ['password', 'password']
# Enter an invalid (too long) username first and then a valid one.
invalid_username = ('x' * user_field.max_length) + 'y'
entered_usernames = [invalid_username, 'janet']
def return_passwords():
return entered_passwords.pop(0)
def return_usernames():
return entered_usernames.pop(0)
@mock_inputs({'password': return_passwords, 'username': return_usernames})
def test(self):
call_command(
'createsuperuser',
interactive=True,
stdin=MockTTY(),
stdout=new_io,
stderr=new_io,
)
self.assertEqual(
new_io.getvalue().strip(),
'Error: Ensure this value has at most %s characters (it has %s).\n'
'Superuser created successfully.' % (user_field.max_length, len(invalid_username))
)
test(self)
def test_existing_username(self):
"""Creation fails if the username already exists."""
user = User.objects.create(username='janet')
new_io = StringIO()
entered_passwords = ['password', 'password']
# Enter the existing username first and then a new one.
entered_usernames = [user.username, 'joe']
def return_passwords():
return entered_passwords.pop(0)
def return_usernames():
return entered_usernames.pop(0)
@mock_inputs({'password': return_passwords, 'username': return_usernames})
def test(self):
call_command(
'createsuperuser',
interactive=True,
stdin=MockTTY(),
stdout=new_io,
stderr=new_io,
)
self.assertEqual(
new_io.getvalue().strip(),
'Error: That username is already taken.\n'
'Superuser created successfully.'
)
test(self)
def test_validation_mismatched_passwords(self):
"""
Creation should fail if the user enters mismatched passwords.
"""
new_io = StringIO()
# The first two passwords do not match, but the second two do match and
# are valid.
entered_passwords = ["password", "not password", "password2", "password2"]
def mismatched_passwords_then_matched():
return entered_passwords.pop(0)
@mock_inputs({
'password': mismatched_passwords_then_matched,
'username': 'joe1234567890',
})
def test(self):
call_command(
"createsuperuser",
interactive=True,
stdin=MockTTY(),
stdout=new_io,
stderr=new_io,
)
self.assertEqual(
new_io.getvalue().strip(),
"Error: Your passwords didn't match.\n"
"Superuser created successfully."
)
test(self)
def test_validation_blank_password_entered(self):
"""
Creation should fail if the user enters blank passwords.
"""
new_io = StringIO()
# The first two passwords are empty strings, but the second two are
# valid.
entered_passwords = ["", "", "password2", "password2"]
def blank_passwords_then_valid():
return entered_passwords.pop(0)
@mock_inputs({
'password': blank_passwords_then_valid,
'username': 'joe1234567890',
})
def test(self):
call_command(
"createsuperuser",
interactive=True,
stdin=MockTTY(),
stdout=new_io,
stderr=new_io,
)
self.assertEqual(
new_io.getvalue().strip(),
"Error: Blank passwords aren't allowed.\n"
"Superuser created successfully."
)
test(self)
class MultiDBCreatesuperuserTestCase(TestCase):
multi_db = True
def test_createsuperuser_command_with_database_option(self):
"""
changepassword --database should operate on the specified DB.
"""
new_io = StringIO()
call_command(
'createsuperuser',
interactive=False,
username='joe',
email='joe@somewhere.org',
database='other',
stdout=new_io,
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
user = User.objects.using('other').get(username='joe')
self.assertEqual(user.email, 'joe@somewhere.org')
class CreatePermissionsTests(TestCase):
def setUp(self):
self._original_permissions = Permission._meta.permissions[:]
self._original_default_permissions = Permission._meta.default_permissions
self.app_config = apps.get_app_config('auth')
def tearDown(self):
Permission._meta.permissions = self._original_permissions
Permission._meta.default_permissions = self._original_default_permissions
ContentType.objects.clear_cache()
def test_default_permissions(self):
permission_content_type = ContentType.objects.get_by_natural_key('auth', 'permission')
Permission._meta.permissions = [
('my_custom_permission', 'Some permission'),
]
create_permissions(self.app_config, verbosity=0)
# add/change/delete permission by default + custom permission
self.assertEqual(Permission.objects.filter(
content_type=permission_content_type,
).count(), 4)
Permission.objects.filter(content_type=permission_content_type).delete()
Permission._meta.default_permissions = []
create_permissions(self.app_config, verbosity=0)
# custom permission only since default permissions is empty
self.assertEqual(Permission.objects.filter(
content_type=permission_content_type,
).count(), 1)
def test_unavailable_models(self):
"""
#24075 - Permissions shouldn't be created or deleted if the ContentType
or Permission models aren't available.
"""
state = migrations.state.ProjectState()
# Unavailable contenttypes.ContentType
with self.assertNumQueries(0):
create_permissions(self.app_config, verbosity=0, apps=state.apps)
# Unavailable auth.Permission
state = migrations.state.ProjectState(real_apps=['contenttypes'])
with self.assertNumQueries(0):
create_permissions(self.app_config, verbosity=0, apps=state.apps)
| |
from __future__ import unicode_literals
import io
import logging
import re
from django.utils import six
from django.utils.encoding import force_bytes
from django.utils.translation import ugettext as _
from djblets.util.properties import AliasProperty, TypedProperty
from reviewboard.deprecation import RemovedInReviewBoard50Warning
from reviewboard.diffviewer.errors import DiffParserError
from reviewboard.scmtools.core import Revision
logger = logging.getLogger(__name__)
class ParsedDiffFile(object):
"""A parsed file from a diff.
This stores information on a single file represented in a diff, including
the contents of that file's diff, as parsed by :py:class:`DiffParser` or
one of its subclasses.
Parsers should set the attributes on this based on the contents of the
diff, and should add any data found in the diff.
This class is meant to be used internally and by subclasses of
:py:class:`DiffParser`.
Attributes:
binary (bool);
Whether this represents a binary file.
copied (bool):
Whether this represents a file that has been copied. The file
may or may not be modified in the process.
deleted (bool):
Whether this represents a file that has been deleted.
delete_count (int):
The number of delete (``-``) lines found in the file.
insert_count (int):
The number of insert (``+``) lines found in the file.
is_symlink (bool):
Whether this represents a file that is a symbolic link to another
file.
moved (bool):
Whether this represents a file that has been moved/renamed. The
file may or may not be modified in the process.
parser (DiffParser):
The diff parser that parsed this file.
skip (bool):
Whether this file should be skipped by the parser. If any of the
parser methods set this, the file will stop parsing and will be
excluded from results.
"""
#: The parsed original name of the file.
#:
#: Type:
#: bytes
orig_filename = TypedProperty(bytes)
#: The parsed file details of the original file.
#:
#: This will usually be a revision.
#:
#: Type:
#: bytes or reviewboard.scmtools.core.Revision
orig_file_details = TypedProperty((bytes, Revision))
#: The parsed modified name of the file.
#:
#: This may be the same as :py:attr:`orig_filename`.
#:
#: Type:
#: bytes
modified_filename = TypedProperty(bytes)
#: The parsed file details of the modified file.
#:
#: This will usually be a revision.
#:
#: Type:
#: bytes or reviewboard.scmtools.core.Revision
modified_file_details = TypedProperty((bytes, Revision))
#: The parsed value for an Index header.
#:
#: If present in the diff, this usually contains a filename, but may
#: contain other content as well, depending on the variation of the diff
#: format.
#:
#: Type:
#: bytes
index_header_value = TypedProperty(bytes)
#: The parsed original name of the file.
#:
#: Deprecated:
#: 4.0:
#: Use :py:attr:`orig_filename` instead.
origFile = AliasProperty('orig_filename',
convert_to_func=force_bytes,
deprecated=True,
deprecation_warning=RemovedInReviewBoard50Warning)
#: The parsed file details of the original file.
#:
#: Deprecated:
#: 4.0:
#: Use :py:attr:`orig_file_details` instead.
origInfo = AliasProperty('orig_file_details',
convert_to_func=force_bytes,
deprecated=True,
deprecation_warning=RemovedInReviewBoard50Warning)
#: The parsed original name of the file.
#:
#: Deprecated:
#: 4.0:
#: Use :py:attr:`modified_filename` instead.
newFile = AliasProperty('modified_filename',
convert_to_func=force_bytes,
deprecated=True,
deprecation_warning=RemovedInReviewBoard50Warning)
#: The parsed file details of the modified file.
#:
#: Deprecated:
#: 4.0:
#: Use :py:attr:`modified_file_details` instead.
newInfo = AliasProperty('modified_file_details',
convert_to_func=force_bytes,
deprecated=True,
deprecation_warning=RemovedInReviewBoard50Warning)
#: The parsed value for an Index header.
#:
#: Deprecated:
#: 4.0:
#: Use :py:attr:`index_header_value` instead.
index = AliasProperty('index_header_value',
convert_to_func=force_bytes,
deprecated=True,
deprecation_warning=RemovedInReviewBoard50Warning)
def __init__(self, parser=None):
"""Initialize the parsed file information.
Args:
parser (reviewboard.diffviewer.parser.DiffParser, optional):
The diff parser that parsed this file.
"""
if parser is None:
RemovedInReviewBoard50Warning.warn(
'Diff parsers must pass themselves as a parameter when'
'creating a ParsedDiffFile. This will be mandatory in '
'Review Board 5.0.')
self.parser = parser
self.binary = False
self.deleted = False
self.moved = False
self.copied = False
self.is_symlink = False
self.insert_count = 0
self.delete_count = 0
self.skip = False
self.extra_data = {}
self._data_io = io.BytesIO()
self._data = None
self._deprecated_info = {}
def __setitem__(self, key, value):
"""Set information on the parsed file from a diff.
This is a legacy implementation used to help diff parsers retain
compatibility with the old dictionary-based ways of setting parsed
file information. Callers should be updated to set attributes instead.
Deprecated:
4.0:
This will be removed in Review Board 5.0.
Args:
key (str):
The key to set.
value (object):
The value to set.
"""
self._warn_old_usage_deprecation()
self._deprecated_info[key] = value
setattr(self, key, value)
def __getitem__(self, key):
"""Return information on the parsed file from a diff.
This is a legacy implementation used to help diff parsers retain
compatibility with the old dictionary-based ways of setting parsed
file information. Callers should be updated to access attributes
instead.
Deprecated:
4.0:
This will be removed in Review Board 5.0.
Args:
key (str):
The key to retrieve.
Returns:
object:
The resulting value.
Raises:
KeyError:
The key is invalid.
"""
self._warn_old_usage_deprecation()
return self._deprecated_info[key]
def __contains__(self, key):
"""Return whether an old parsed file key has been explicitly set.
This is a legacy implementation used to help diff parsers retain
compatibility with the old dictionary-based ways of setting parsed
file information. Callers should be updated to check attribute values
instead.
Deprecated:
4.0:
This will be removed in Review Board 5.0.
Args:
key (str):
The key to check.
Returns:
bool:
``True`` if the key has been explicitly set by a diff parser.
``False`` if it has not.
"""
self._warn_old_usage_deprecation()
return key in self._deprecated_info
def set(self, key, value):
"""Set information on the parsed file from a diff.
This is a legacy implementation used to help diff parsers retain
compatibility with the old dictionary-based ways of setting parsed
file information. Callers should be updated to set attributes instead.
Deprecated:
4.0:
This will be removed in Review Board 5.0.
Args:
key (str):
The key to set.
value (object):
The value to set.
"""
self._warn_old_usage_deprecation()
self._deprecated_info[key] = value
setattr(self, key, value)
def get(self, key, default=None):
"""Return information on the parsed file from a diff.
This is a legacy implementation used to help diff parsers retain
compatibility with the old dictionary-based ways of setting parsed
file information. Callers should be updated to access attributes
instead.
Deprecated:
4.0:
This will be removed in Review Board 5.0.
Args:
key (str):
The key to retrieve.
default (object, optional):
The default value to return.
Returns:
object:
The resulting value.
"""
self._warn_old_usage_deprecation()
return self._deprecated_info.get(key, default)
def update(self, items):
"""Update information on the parsed file from a diff.
This is a legacy implementation used to help diff parsers retain
compatibility with the old dictionary-based ways of setting parsed
file information. Callers should be updated to set individual
attributes instead.
Deprecated:
4.0:
This will be removed in Review Board 5.0.
Args:
items (dict):
The keys and values to set.
"""
self._warn_old_usage_deprecation()
for key, value in six.iteritems(items):
self._deprecated_info[key] = value
setattr(self, key, value)
@property
def data(self):
"""The data for this diff.
This must be accessed after :py:meth:`finalize` has been called.
"""
if self._data is None:
raise ValueError('ParsedDiffFile.data cannot be accessed until '
'finalize() is called.')
return self._data
def finalize(self):
"""Finalize the parsed diff.
This makes the diff data available to consumers and closes the buffer
for writing.
"""
self._data = self._data_io.getvalue()
self._data_io.close()
def prepend_data(self, data):
"""Prepend data to the buffer.
Args:
data (bytes):
The data to prepend.
"""
if data:
new_data_io = io.BytesIO()
new_data_io.write(data)
new_data_io.write(self._data_io.getvalue())
self._data_io.close()
self._data_io = new_data_io
def append_data(self, data):
"""Append data to the buffer.
Args:
data (bytes):
The data to append.
"""
if data:
self._data_io.write(data)
def _warn_old_usage_deprecation(self):
"""Warn that a DiffParser is populating information in an old way."""
if self.parser is None:
message = (
'Diff parsers must be updated to populate attributes on a '
'ParsedDiffFile, instead of setting the information in a '
'dictionary. This will be required in Review Board 5.0.'
)
else:
message = (
'%r must be updated to populate attributes on a '
'ParsedDiffFile, instead of setting the information in a '
'dictionary. This will be required in Review Board 5.0.'
% type(self.parser)
)
RemovedInReviewBoard50Warning.warn(message, stacklevel=3)
class DiffParser(object):
"""Parses diff files, allowing subclasses to specialize parsing behavior.
This class provides the base functionality for parsing Unified Diff files.
It looks for common information present in many variations of diffs,
such as ``Index:`` lines, in order to extract files and their modified
content from a diff.
Subclasses can extend the parsing behavior to extract additional metadata
or handle special representations of changes. They may want to override the
following methods:
* :py:meth:`parse_special_header`
* :py:meth:`parse_diff_header`
* :py:meth:`parse_filename_header`
* :py:meth:`parse_after_headers`
* :py:meth:`get_orig_commit_id`
* :py:meth:`normalize_diff_filename`
"""
#: A separator string below an Index header.
#:
#: This is commonly found immediately below an ``Index:`` header, meant
#: to help locate the beginning of the metadata or changes made to a file.
#:
#: Its presence and location is not guaranteed.
INDEX_SEP = b'=' * 67
def __init__(self, data):
"""Initialize the parser.
Args:
data (bytes):
The diff content to parse.
Raises:
TypeError:
The provided ``data`` argument was not a ``bytes`` type.
"""
from reviewboard.diffviewer.diffutils import split_line_endings
if not isinstance(data, bytes):
raise TypeError(
_('%s expects bytes values for "data", not %s')
% (type(self).__name__, type(data)))
self.base_commit_id = None
self.new_commit_id = None
self.data = data
self.lines = split_line_endings(data)
def parse(self):
"""Parse the diff.
This will parse the content of the file, returning any files that
were found.
Returns:
list of ParsedDiffFile:
The resulting list of files.
Raises:
reviewboard.diffviewer.errors.DiffParserError:
There was an error parsing part of the diff. This may be a
corrupted diff, or an error in the parsing implementation.
Details are in the error message.
"""
logger.debug('%s.parse: Beginning parse of diff, size = %s',
type(self).__name__, len(self.data))
preamble = io.BytesIO()
self.files = []
parsed_file = None
i = 0
# Go through each line in the diff, looking for diff headers.
while i < len(self.lines):
next_linenum, new_file = self.parse_change_header(i)
if new_file:
# This line is the start of a new file diff.
#
# First, finalize the last one.
if self.files:
self.files[-1].finalize()
parsed_file = new_file
# We need to prepend the preamble, if we have one.
parsed_file.prepend_data(preamble.getvalue())
preamble.close()
preamble = io.BytesIO()
self.files.append(parsed_file)
i = next_linenum
else:
if parsed_file:
i = self.parse_diff_line(i, parsed_file)
else:
preamble.write(self.lines[i])
preamble.write(b'\n')
i += 1
if self.files:
self.files[-1].finalize()
preamble.close()
logger.debug('%s.parse: Finished parsing diff.', type(self).__name__)
return self.files
def parse_diff_line(self, linenum, parsed_file):
"""Parse a line of data in a diff.
This will append the line to the parsed file's data, and if the
content represents active changes to a file, its insert/delete counts
will be updated to reflect them.
Args:
linenum (int):
The 0-based line number.
parsed_file (ParsedDiffFile):
The current parsed diff file info.
Returns:
int:
The next line number to parse.
"""
line = self.lines[linenum]
if (parsed_file.orig_filename is not None and
parsed_file.modified_filename is not None):
if line.startswith(b'-'):
parsed_file.delete_count += 1
elif line.startswith(b'+'):
parsed_file.insert_count += 1
parsed_file.append_data(line)
parsed_file.append_data(b'\n')
return linenum + 1
def parse_change_header(self, linenum):
"""Parse a header before a change to a file.
This will attempt to parse the following information, starting at the
specified line in the diff:
1. Any special file headers (such as ``Index:`` lines) through
:py:meth:`parse_special_header`
2. A standard Unified Diff file header (through
:py:meth:`parse_diff_header`)
3. Any content after the header (through
:py:meth:`parse_after_headers`)
If the special or diff headers are able to populate the original and
modified filenames and revisions/file details, and none of the methods
above mark the file as skipped (by setting
:py:attr:`ParsedDiffFile.skip`), then this will finish by appending
all parsed data and returning a parsed file entry.
Subclasses that need to control parsing logic should override one or
more of the above methods.
Args:
linenum (int):
The line number to begin parsing.
Returns:
tuple:
A tuple containing the following:
1. The next line number to parse
2. The populated :py:class:`ParsedDiffFile` instance for this file
Raises:
reviewboard.diffviewer.errors.DiffParserError:
There was an error parsing the change header. This may be
a corrupted diff, or an error in the parsing implementation.
Details are in the error message.
"""
parsed_file = ParsedDiffFile(parser=self)
start = linenum
linenum = self.parse_special_header(linenum, parsed_file)
linenum = self.parse_diff_header(linenum, parsed_file)
skip = (
parsed_file.skip or
parsed_file.orig_filename is None or
parsed_file.orig_file_details is None or
parsed_file.modified_filename is None or
parsed_file.modified_file_details is None
)
if skip:
return linenum, None
# If we have enough information to represent a header, build the
# file to return.
if linenum < len(self.lines):
linenum = self.parse_after_headers(linenum, parsed_file)
if parsed_file.skip:
return linenum, None
# The header is part of the diff, so make sure it gets in the
# diff content.
for line in self.lines[start:linenum]:
parsed_file.append_data(line)
parsed_file.append_data(b'\n')
return linenum, parsed_file
def parse_special_header(self, linenum, parsed_file):
"""Parse a special diff header marking the start of a new file's info.
This attempts to locate an ``Index:`` line at the specified line
number, which usually indicates the beginning of file's information in
a diff (for Unified Diff variants that support it). By default, this
method expects the line to be found at ``linenum``.
If present, the value found immediately after the ``Index:`` will be
stored in :py:attr:`ParsedDiffFile.index_header_value`, allowing
subclasses to make a determination based on its contents (which may
vary between types of diffs, but should include at least a filename.
If the ``Index:`` line is not present, this won't do anything by
default.
Subclasses can override this to parse additional information before the
standard diff header. They may also set :py:attr:`ParsedFileDiff.skip`
to skip the rest of this file and begin parsing a new entry at the
returned line number.
Args:
linenum (int):
The line number to begin parsing.
parsed_file (ParsedDiffFile):
The file currently being parsed.
Returns:
int:
The next line number to parse.
Raises:
reviewboard.diffviewer.errors.DiffParserError:
There was an error parsing the special header. This may be
a corrupted diff, or an error in the parsing implementation.
Details are in the error message.
"""
try:
index_line = self.lines[linenum]
is_index = index_line.startswith(b'Index: ')
except IndexError:
is_index = False
if is_index:
# Try to find the "====" line.
temp_linenum = linenum + 1
while temp_linenum + 1 < len(self.lines):
line = self.lines[temp_linenum]
if line == self.INDEX_SEP:
# We found the line. This is looking like a valid diff
# for CVS, Subversion, and other systems. Try to parse
# the data from the line.
try:
parsed_file.index_header_value = \
index_line.split(None, 1)[1]
# Set these for backwards-compatibility.
#
# This should be removed in Review Board 5.0.
parsed_file._deprecated_info['index'] = \
parsed_file.index_header_value
except ValueError:
raise DiffParserError('Malformed Index line', linenum)
linenum = temp_linenum + 1
break
elif line.startswith((b'---', b'+++')):
# We never found that line, but we did hit the start of
# a diff file. We can't treat the "Index:" line as special
# in this case.
break
temp_linenum += 1
return linenum
def parse_diff_header(self, linenum, parsed_file):
"""Parse a standard header before changes made to a file.
This attempts to parse the ``---`` (original) and ``+++`` (modified)
file lines, which are usually present right before any changes to the
file. By default, this method expects the ``---`` line to be found at
``linenum``.
If found, this will populate :py:attr:`ParsedDiffFile.orig_filename`,
:py:attr:`ParsedDiffFile.orig_file_details`,
:py:attr:`ParsedDiffFile.modified_filename`, and
:py:attr:`ParsedDiffFile.modified_file_details`.
This calls out to :py:meth:`parse_filename_header` to help parse
the contents immediately after the ``---`` or ``+++``.
Subclasses can override this to parse these lines differently, or to
to process the results of these lines (such as converting special
filenames to states like "deleted" or "new file"). They may also set
:py:class:`ParsedFileDiff.skip` to skip the rest of this file and begin
parsing a new entry at the returned line number.
Args:
linenum (int):
The line number to begin parsing.
parsed_file (ParsedDiffFile):
The file currently being parsed.
Returns:
int:
The next line number to parse.
Raises:
reviewboard.diffviewer.errors.DiffParserError:
There was an error parsing the diff header. This may be a
corrupted diff, or an error in the parsing implementation.
Details are in the error message.
"""
try:
line1 = self.lines[linenum]
line2 = self.lines[linenum + 1]
is_diff_header = (
# Unified diff headers
(line1.startswith(b'--- ') and line2.startswith(b'+++ ')) or
# Context diff headers
(line1.startswith(b'*** ') and line2.startswith(b'--- ') and
not line1.endswith(b' ****'))
)
except IndexError:
is_diff_header = False
if is_diff_header:
# This is a unified or context diff header. Parse the
# file and extra info.
try:
(parsed_file.orig_filename,
parsed_file.orig_file_details) = \
self.parse_filename_header(self.lines[linenum][4:],
linenum)
linenum += 1
(parsed_file.modified_filename,
parsed_file.modified_file_details) = \
self.parse_filename_header(self.lines[linenum][4:],
linenum)
# Set these for backwards-compatibility.
#
# This should be removed in Review Board 5.0.
parsed_file._deprecated_info['origFile'] = \
parsed_file.orig_filename
parsed_file._deprecated_info['origInfo'] = \
parsed_file.orig_file_details
parsed_file._deprecated_info['newFile'] = \
parsed_file.modified_filename
parsed_file._deprecated_info['newInfo'] = \
parsed_file.modified_file_details
linenum += 1
except ValueError:
raise DiffParserError(
'The diff file is missing revision information',
linenum)
return linenum
def parse_after_headers(self, linenum, parsed_file):
"""Parse information after a diff header but before diff data.
This attempts to parse the information found after
:py:meth:`parse_diff_headers` is called, but before gathering any lines
that are part of the diff contents. It's intended for the few diff
formats that may place content at this location.
By default, this does nothing.
Subclasses can override this to provide custom parsing of any lines
that may exist here. They may also set :py:class:`ParsedFileDiff.skip`
to skip the rest of this file and begin parsing a new entry at the
returned line number.
Args:
linenum (int):
The line number to begin parsing.
parsed_file (ParsedDiffFile):
The file currently being parsed.
Returns:
int:
The next line number to parse.
Raises:
reviewboard.diffviewer.errors.DiffParserError:
There was an error parsing the diff header. This may be a
corrupted diff, or an error in the parsing implementation.
Details are in the error message.
"""
return linenum
def parse_filename_header(self, s, linenum):
"""Parse the filename found in a diff filename line.
This parses the value after a ``---`` or ``+++`` indicator (or a
special variant handled by a subclass), normalizing the filename and
any following file details, and returning both for processing and
storage.
Often times, the file details will be a revision for the original
file, but this is not guaranteed, and is up to the variation of the
diff format.
By default, this will assume that a filename and file details are
separated by either a single tab, or two or more spaces. If neither
are found, this will fail to parse.
This must parse only the provided value, and cannot parse subsequent
lines.
Subclasses can override this behavior to parse these lines another
way, or to normalize filenames (handling escaping or filenames with
spaces as needed by that particular diff variation).
Args:
s (bytes):
The value to parse.
linenum (int):
The line number containing the value to parse.
Returns:
tuple:
A tuple containing:
1. The filename (as bytes)
2. The additional file information (as bytes)
Raises:
reviewboard.diffviewer.errors.DiffParserError:
There was an error parsing the diff header. This may be a
corrupted diff, or an error in the parsing implementation.
Details are in the error message.
"""
if b'\t' in s:
# There's a \t separating the filename and info. This is the
# best case scenario, since it allows for filenames with spaces
# without much work.
return s.split(b'\t', 1)
# There's spaces being used to separate the filename and info.
# This is technically wrong, so all we can do is assume that
# 1) the filename won't have multiple consecutive spaces, and
# 2) there's at least 2 spaces separating the filename and info.
if b' ' in s:
return re.split(br' +', s, 1)
raise DiffParserError('No valid separator after the filename was '
'found in the diff header',
linenum)
def raw_diff(self, diffset_or_commit):
"""Return a raw diff as a string.
This takes a DiffSet or DiffCommit and generates a new, single diff
file that represents all the changes made. It's used to regenerate
a diff and serve it up for other tools or processes to use.
Subclasses can override this to provide any special logic for building
the diff.
Args:
diffset_or_commit (reviewboard.diffviewer.models.diffset.DiffSet or
reviewboard.diffviewer.models.diffcommit
.DiffCommit):
The DiffSet or DiffCommit to render.
If passing in a DiffSet, only the cumulative diff's file
contents will be returned.
If passing in a DiffCommit, only that commit's file contents
will be returned.
Returns:
bytes:
The diff composed of all the component FileDiffs.
Raises:
TypeError:
The provided ``diffset_or_commit`` wasn't of a supported type.
"""
if hasattr(diffset_or_commit, 'cumulative_files'):
filediffs = diffset_or_commit.cumulative_files
elif hasattr(diffset_or_commit, 'files'):
filediffs = diffset_or_commit.files.all()
else:
raise TypeError('%r is not a valid value. Please pass a DiffSet '
'or DiffCommit.'
% diffset_or_commit)
return b''.join(
filediff.diff
for filediff in filediffs
)
def get_orig_commit_id(self):
"""Return the commit ID of the original revision for the diff.
By default, this returns ``None``. Subclasses would override this if
they work with repositories that always look up changes to a file by
the ID of the commit that made the changes instead of a per-file
revision or ID.
Non-``None`` values returned by this method will override the values
being stored in :py:attr:`FileDiff.source_revision
<reviewboard.diffviewer.models.filediff.FileDiff.source_revision>`.
Implementations would likely want to parse out the commit ID from
some prior header and return it here. By the time this is called, all
files will have been parsed already.
Returns:
bytes:
The commit ID used to override the source revision of any created
:py:class:`~reviewboard.diffviewer.models.filediff.FileDiff`
instances.
"""
return None
def normalize_diff_filename(self, filename):
"""Normalize filenames in diffs.
This returns a normalized filename suitable for populating in
:py:attr:`FileDiff.source_file
<reviewboard.diffviewer.models.filediff.FileDiff.source_file>` or
:py:attr:`FileDiff.dest_file
<reviewboard.diffviewer.models.filediff.FileDiff.dest_file>`, or
for when presenting a filename to the UI.
By default, this strips off any leading slashes, which might occur due
to differences in various diffing methods or APIs.
Subclasses can override this to provide additional methods of
normalization.
Args:
filename (unicode):
The filename to normalize.
Returns:
unicode:
The normalized filename.
"""
if filename.startswith('/'):
return filename[1:]
else:
return filename
| |
"""
Utility methods for testing.
"""
import os
import vcf
from Bio import SeqIO
from django.conf import settings
from django.contrib.auth.models import User
from main.models import AlignmentGroup
from main.models import Chromosome
from main.models import Dataset
from main.models import ExperimentSample
from main.models import Project
from main.models import ReferenceGenome
from main.models import ExperimentSampleToAlignment
from utils.import_util import copy_and_add_dataset_source
from variants.vcf_parser import parse_alignment_group_vcf
from variants.vcf_parser import parse_vcf
TEST_USERNAME = 'gmcdev'
TEST_PASSWORD = 'g3n3d3z'
TEST_EMAIL = 'gmcdev@genomedesigner.freelogy.org'
TEST_PROJECT_NAME = 'recoli'
TEST_REF_GENOME_LABEL = 'mg1655'
TEST_DATA_DIR = os.path.join(settings.PWD, 'test_data')
TEST_GENOME_SNPS = os.path.join(settings.PWD, 'test_data',
'fake_genome_and_reads',
'test_genome_snps.vcf')
# A set of data consisting of a small annotated genome, many samples, and some
# designed SNPs which are each in some of the samples.
class FullVCFTestSet:
TEST_DIR = os.path.join(settings.PWD, 'test_data', 'full_vcf_test_set')
NUM_SAMPLES = 3
TEST_GENBANK = os.path.join(TEST_DIR, 'mg1655_tolC_through_zupT.gb')
FASTQ1 = [os.path.join(TEST_DIR, 'sample%d.simLibrary.1.fq' % i)
for i in range(NUM_SAMPLES)]
FASTQ2 = [os.path.join(TEST_DIR, 'sample%d.simLibrary.2.fq' % i)
for i in range(NUM_SAMPLES)]
TEST_DESIGNED_SNPS = os.path.join(TEST_DIR, 'designed_snps.vcf')
def create_common_entities():
"""Creates the most common entities for testing.
Returns at a User, Project, ReferenceGenome that are all
related.
"""
user = User.objects.create_user(
TEST_USERNAME, password=TEST_PASSWORD, email=TEST_EMAIL)
project = Project.objects.create(
title=TEST_PROJECT_NAME, owner=user.get_profile())
reference_genome = ReferenceGenome.objects.create(
project=project,
label=TEST_REF_GENOME_LABEL)
chromosome = Chromosome.objects.create(
reference_genome=reference_genome,
label='Chromosome',
num_bases=9001)
sample_1 = ExperimentSample.objects.create(
project=project,
label='es1')
sample_2 = ExperimentSample.objects.create(
project=project,
label='es2')
alignment_group_1 = AlignmentGroup.objects.create(
label='Alignment 1',
reference_genome=reference_genome,
aligner=AlignmentGroup.ALIGNER.BWA)
return {
'user': user,
'project': project,
'reference_genome': reference_genome,
'chromosome': chromosome,
'sample_1': sample_1,
'sample_2': sample_2,
'alignment_group_1': alignment_group_1
}
def create_common_entities_w_variants():
"""Creates the most common entities for testing.
Returns at a User, Project, ReferenceGenome, alignment,
and variants that are all related.
"""
# this is the number of samples in the VCF file
num_samples = 10
user = User.objects.create_user(
TEST_USERNAME, password=TEST_PASSWORD, email=TEST_EMAIL)
project = Project.objects.create(
title=TEST_PROJECT_NAME, owner=user.get_profile())
reference_genome = ReferenceGenome.objects.create(
project=project,
label=TEST_REF_GENOME_LABEL)
chromosome = Chromosome.objects.create(
reference_genome=reference_genome,
label='Chromosome',
seqrecord_id='Chromosome',
num_bases=9001)
alignment_group = AlignmentGroup.objects.create(
label='Alignment 1',
reference_genome=reference_genome,
aligner=AlignmentGroup.ALIGNER.BWA)
Chromosome.objects.create(
reference_genome=reference_genome,
label='Chromosome',
num_bases=2000)
VCF_DATATYPE = Dataset.TYPE.VCF_FREEBAYES
copy_and_add_dataset_source(
alignment_group, VCF_DATATYPE,
VCF_DATATYPE, TEST_GENOME_SNPS)
# Create experiment sample objects having UIDs that correspond to those
# in the vcf file. This is a bit "fake" in that the actual pipeline we
# will be generating the vcf file from the samples (see add_groups()
# stage of pipeline.
with open(TEST_GENOME_SNPS) as fh:
reader = vcf.Reader(fh)
experiment_sample_uids = reader.samples
samples = [ExperimentSample.objects.create(
uid=sample_uid,
project=project,
label='fakename:' + sample_uid)
for sample_uid in experiment_sample_uids]
# add samples to alignment group
for sample in samples:
ExperimentSampleToAlignment.objects.get_or_create(
alignment_group=alignment_group,
experiment_sample=sample)
# Parse the vcf
parse_alignment_group_vcf(alignment_group, VCF_DATATYPE)
return {
'user': user,
'project': project,
'reference_genome': reference_genome,
'chromosome': chromosome,
'samples': samples,
'alignment_group': alignment_group
}
def create_sample_and_alignment(
project, alignment_group, sample_uid, bwa_alignment=None):
sample = ExperimentSample.objects.create(
uid=sample_uid, project=project, label=sample_uid)
sample_alignment = ExperimentSampleToAlignment.objects.create(
alignment_group=alignment_group, experiment_sample=sample)
if bwa_alignment is not None:
copy_and_add_dataset_source(
sample_alignment, Dataset.TYPE.BWA_ALIGN, Dataset.TYPE.BWA_ALIGN,
bwa_alignment)
return {
'sample': sample,
'sample_alignment': sample_alignment
}
def create_recoli_sv_data_from_vcf(project):
"""Populate database with SVs from lumpy vcf.
"""
VCF_PARSER_TEST_DATA_DIR = os.path.join(TEST_DATA_DIR, 'vcf_parser_test_data')
LUMPY_4_SAMPLES_RECOLI_VCF = os.path.join(
VCF_PARSER_TEST_DATA_DIR, 'lumpy_4_samples_recoli.vcf')
SAMPLE_1_UID = '3990b0f4'
SAMPLE_2_UID = '0e250e34'
SAMPLE_3_UID = '396ea926'
SAMPLE_4_UID = '4a09d3dd'
reference_genome = ReferenceGenome.objects.create(
project=project, label='myref')
Chromosome.objects.create(
reference_genome=reference_genome,
label='the chrom',
seqrecord_id='U00096.2',
num_bases=5000000000)
alignment_group = AlignmentGroup.objects.create(
label='Alignment 1', reference_genome=reference_genome,
aligner=AlignmentGroup.ALIGNER.BWA)
# Connect lumpy vcf as Dataset.
lumpy_vcf_dataset = copy_and_add_dataset_source(
alignment_group, Dataset.TYPE.VCF_LUMPY, Dataset.TYPE.VCF_LUMPY,
LUMPY_4_SAMPLES_RECOLI_VCF)
# Create samples corresponding to sample ids in vcf.
create_sample_and_alignment(
project, alignment_group, SAMPLE_1_UID)
create_sample_and_alignment(
project, alignment_group, SAMPLE_2_UID)
create_sample_and_alignment(
project, alignment_group, SAMPLE_3_UID)
create_sample_and_alignment(
project, alignment_group, SAMPLE_4_UID)
# Now we have everything we need to parse the vcf.
parse_vcf(lumpy_vcf_dataset, alignment_group)
def are_fastas_same(fasta_1, fasta_2):
""""Returns tuple ((bool)fastas equal, (list)indexes of dissimilarity)
"""
with open(fasta_1, 'r') as fasta_1_fh, \
open(fasta_2, 'r') as fasta_2_fh:
fasta_1_seqrecord_list = list(SeqIO.parse(fasta_1_fh, 'fasta'))
fasta_2_seqrecord_list = list(SeqIO.parse(fasta_2_fh, 'fasta'))
assert len(fasta_1_seqrecord_list) == 1
assert len(fasta_2_seqrecord_list) == 1
seq_1 = fasta_1_seqrecord_list[0].seq
seq_2 = fasta_2_seqrecord_list[0].seq
if str(seq_1) == str(seq_2):
return (True, [])
else:
eq = map(lambda x, y: x == y, seq_1, seq_2)
indexes = [i for i, x in enumerate(eq) if x == 0]
return (False, indexes)
| |
'''
A module containing some functions I found useful.
A few of which are used in the main fitting code
'''
from __future__ import division
import os
import sys
import numpy as np
import astropy.units as u
import matplotlib.pyplot as plt
import montage_wrapper as montage
from astropy.io import fits
from astropy.wcs import WCS
from astropy.table import Table, Column
from astropy.coordinates import SkyCoord
def adxy(header, ra, dec):
'''
Converts RA and Dec to x and y pixel positions
'''
ra = np.array(ra)
dec = np.array(dec)
x, y = WCS(header).all_world2pix(ra, dec, 0)
return x, y
def angsep(ra0, dec0, ra, dec, frame='icrs'):
'''
Gives angular separation between the input position and the position after
fitting in arcsec. Note: input should be in degrees
'''
ra0 = np.array(ra0)
dec0 = np.array(dec0)
ra = np.array(ra)
dec = np.array(dec)
c1 = SkyCoord(ra0 * u.deg, dec0 * u.deg, frame=frame)
c2 = SkyCoord(ra * u.deg, dec * u.deg, frame=frame)
return c1.separation(c2).value * 3600.
def data2table(arrays=[], names=[], units=[], filename='d2table.fits'):
'''
Given a list of arrays, names and units, constructs table and writes
to file. Units (optional) should be given as astropy.units.
If extenstion of filename is .dat (.fits) it will be saved as an
ascii (FITS) table.
Returns the table that it saved to file
'''
t = Table()
if len(units) > 0:
for info in zip(names, arrays, units):
t.add_column(
Column(name=info[0], data=info[1], unit=info[2]))
else:
for nm, arr in zip(names, arrays):
t.add_column(Column(name=nm, data=arr))
if os.path.splitext(filename)[1] == '.dat':
t.write(filename, format='ipac')
elif os.path.splitext(filename)[1] == '.fits':
t.write(filename)
else:
print 'Unacceptable filename given. Table not being saved.'
return t
def get_pixscale(header):
'''
Retrieves pixelscale in arcsec/pix from the image header
'''
pixscale = 0
keys = header.keys()
for key in keys:
if 'PIXSCALE' in keys:
pixscale = abs(header['PIXSCALE'])
if 'PXSCAL1' in keys:
pixscale = abs(header['PXSCAL1'])
if 'CDELT1' in keys:
pixscale = abs(header['CDELT1']) * 3600. # in arcsec/pixel
if 'CD1_1' in keys:
pixscale = abs(header['CD1_1']) * 3600. # in arcsec/pixel
return pixscale
def nmatch(ra0, dec0, ra, dec, nneb, frame='icrs'):
'''
Returns indices of the nth nearest positional counterpart and the angular
separation between them
Assumes both positions are in the same frame
'''
ra0 = np.array(ra0)
dec0 = np.array(dec0)
ra = np.array(ra)
dec = np.array(dec)
reference_cat = SkyCoord(ra0 * u.deg, dec0 * u.deg, frame=frame)
other_cat = SkyCoord(ra * u.deg, dec * u.deg, frame=frame)
indices, angseps, physeps = reference_cat.match_to_catalog_sky(
other_cat, nneb)
return indices, angseps.to(u.arcsec).value
def matching(ra0, dec0, ra, dec, frame='icrs'):
'''
Returns indices of nearest positional counterpart and the angular
separation between them
Assumes both positions are in the same frame
'''
indices, angseps = nmatch(ra0, dec0, ra, dec, 1, frame=frame)
return indices, angseps
def progress_bar(iterator, array, barLen=50):
'''
Displays progress bars for "for" loops
Input: i, array/list being looped over, optional: barLen = integer
'''
fraction_done = float(iterator + 1) / len(array)
sys.stdout.write("\r")
progress = ""
for i in range(barLen):
if i < int(barLen * fraction_done):
progress += "#"
else:
progress += " "
sys.stdout.write('Progress ')
sys.stdout.write("[ %s ] %.0f %%" % (progress, fraction_done * 100))
sys.stdout.flush()
if iterator == len(array)-1:
print " "
def read_image(image_path, extension=0):
'''
Reads in an image and corresponding header
'''
hdu = fits.open(image_path)
image, header = hdu[extension].data, hdu[extension].header
# To save some memory close the hdulist
hdu.close()
return image, header
def remove_existing(file_path, verbose=True):
'''
Overwrites file in the save directory that has a file name the same as
the given filename
'''
filename = os.path.basename(file_path)
files = os.listdir(os.path.dirname(file_path))
if filename in files:
os.remove(file_path)
if verbose:
print 'Removing ', str(filename)
return
def res_sum(x0, y0, chi, half_fwhm_pix):
'''
Sums the residual flux in the beam around the position at which extraction
took place
'''
x_floor, y_floor = int(x0 + 0.5), int(y0 + 0.5)
res = chi[y_floor - half_fwhm_pix: y_floor + half_fwhm_pix + 1,
x_floor - half_fwhm_pix: x_floor + half_fwhm_pix + 1]
left_over = res.sum()
return left_over
def subimage(image_path, ra, dec, boxsize, showsub=False):
'''
Returns a subimage and subheader. Boxsize should be given in arcsec
This is to create a temporary subimage of a source for a quick view. To
save the subimage, do it externally using
astropy.io.fits.PrimaryHDU.writeto or montage_wrapper.mSubimage.
'''
_, header = read_image(image_path)
x, y = adxy(header, ra, dec)
# Center subimage on the pixel the source is located in (better centering)
xpix, ypix = int(x + 0.5), int(y + 0.5)
ra_center, dec_center = xyad(header, xpix, ypix)
montage.commands.mSubimage(in_image=image_path,
out_image='temp.fits',
ra=ra_center,
dec=dec_center,
xsize=boxsize / 3600)
subimage, subheader = read_image('temp.fits')
remove_existing(os.path.join(os.getcwd(),'temp.fits'), verbose=False)
if showsub:
pdict = dict(interpolation='nearest',
origin='lower',
cmap='gray')
plt.imshow(subimage, **pdict)
plt.show()
return subimage, subheader
def stamps2file(images=[], headers=[], names=[], file_path=None):
'''
Saves list of images, headers, and extension names to a FITS file
'''
primary = fits.PrimaryHDU()
if len(headers) == 0:
headers = [primary.header] * len(images)
if len(names) == 0:
names = ['none'] * len(images)
hdus = [primary]
for im, hdr, nm in zip(images, headers, names):
hdu = fits.ImageHDU(data=im, header=hdr, name=nm)
hdus.append(hdu)
hdulist = fits.HDUList(hdus)
hdulist.writeto(file_path, clobber=True)
return hdulist
def xyad(header, x, y):
'''
Converts x and y pixel positions to RA and Dec
'''
x = np.array(x)
y = np.array(y)
ra, dec = WCS(header).all_pix2world(x, y, 0)
return ra, dec
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
from django import http
from django.contrib import messages
from django.core.urlresolvers import reverse
from django_openstack import api
from django_openstack.tests.view_tests import base
from glance.common import exception as glance_exception
from openstackx.api import exceptions as api_exceptions
from mox import IgnoreArg, IsA
class FakeQuota:
ram = 100
class ImageViewTests(base.BaseViewTests):
def setUp(self):
super(ImageViewTests, self).setUp()
image_dict = {'name': 'visibleImage',
'container_format': 'novaImage'}
self.visibleImage = api.Image(image_dict)
image_dict = {'name': 'invisibleImage',
'container_format': 'aki'}
self.invisibleImage = api.Image(image_dict)
self.images = (self.visibleImage, self.invisibleImage)
flavor = self.mox.CreateMock(api.Flavor)
flavor.id = 1
flavor.name = 'm1.massive'
flavor.vcpus = 1000
flavor.disk = 1024
flavor.ram = 10000
self.flavors = (flavor,)
keypair = self.mox.CreateMock(api.KeyPair)
keypair.key_name = 'keyName'
self.keypairs = (keypair,)
def test_index(self):
self.mox.StubOutWithMock(api, 'token_get_tenant')
api.token_get_tenant(IsA(http.HttpRequest), self.TEST_TENANT)
self.mox.StubOutWithMock(api, 'image_list_detailed')
api.image_list_detailed(IsA(http.HttpRequest)).AndReturn(self.images)
self.mox.ReplayAll()
res = self.client.get(reverse('dash_images', args=[self.TEST_TENANT]))
self.assertTemplateUsed(res, 'dash_images.html')
self.assertIn('images', res.context)
images = res.context['images']
self.assertEqual(len(images), 1)
self.assertEqual(images[0].name, 'visibleImage')
self.mox.VerifyAll()
def test_index_no_images(self):
self.mox.StubOutWithMock(api, 'token_get_tenant')
api.token_get_tenant(IsA(http.HttpRequest), self.TEST_TENANT)
self.mox.StubOutWithMock(api, 'image_list_detailed')
api.image_list_detailed(IsA(http.HttpRequest)).AndReturn([])
self.mox.StubOutWithMock(messages, 'info')
messages.info(IsA(http.HttpRequest), IsA(str))
self.mox.ReplayAll()
res = self.client.get(reverse('dash_images', args=[self.TEST_TENANT]))
self.assertTemplateUsed(res, 'dash_images.html')
self.mox.VerifyAll()
def test_index_client_conn_error(self):
self.mox.StubOutWithMock(api, 'token_get_tenant')
api.token_get_tenant(IsA(http.HttpRequest), self.TEST_TENANT)
self.mox.StubOutWithMock(api, 'image_list_detailed')
exception = glance_exception.ClientConnectionError('clientConnError')
api.image_list_detailed(IsA(http.HttpRequest)).AndRaise(exception)
self.mox.StubOutWithMock(messages, 'error')
messages.error(IsA(http.HttpRequest), IsA(str))
self.mox.ReplayAll()
res = self.client.get(reverse('dash_images', args=[self.TEST_TENANT]))
self.assertTemplateUsed(res, 'dash_images.html')
self.mox.VerifyAll()
def test_index_glance_error(self):
self.mox.StubOutWithMock(api, 'token_get_tenant')
api.token_get_tenant(IsA(http.HttpRequest), self.TEST_TENANT)
self.mox.StubOutWithMock(api, 'image_list_detailed')
exception = glance_exception.Error('glanceError')
api.image_list_detailed(IsA(http.HttpRequest)).AndRaise(exception)
self.mox.StubOutWithMock(messages, 'error')
messages.error(IsA(http.HttpRequest), IsA(str))
self.mox.ReplayAll()
res = self.client.get(reverse('dash_images', args=[self.TEST_TENANT]))
self.assertTemplateUsed(res, 'dash_images.html')
self.mox.VerifyAll()
def test_launch_get(self):
IMAGE_ID = '1'
self.mox.StubOutWithMock(api, 'image_get')
api.image_get(IsA(http.HttpRequest),
IMAGE_ID).AndReturn(self.visibleImage)
self.mox.StubOutWithMock(api, 'tenant_quota_get')
api.tenant_quota_get(IsA(http.HttpRequest),
self.TEST_TENANT).AndReturn(FakeQuota)
self.mox.StubOutWithMock(api, 'token_get_tenant')
api.token_get_tenant(IsA(http.HttpRequest),
self.TEST_TENANT).AndReturn(self.TEST_TENANT)
self.mox.StubOutWithMock(api, 'flavor_list')
api.flavor_list(IsA(http.HttpRequest)).AndReturn(self.flavors)
self.mox.StubOutWithMock(api, 'keypair_list')
api.keypair_list(IsA(http.HttpRequest)).AndReturn(self.keypairs)
self.mox.ReplayAll()
res = self.client.get(reverse('dash_images_launch',
args=[self.TEST_TENANT, IMAGE_ID]))
self.assertTemplateUsed(res, 'dash_launch.html')
image = res.context['image']
self.assertEqual(image.name, self.visibleImage.name)
self.assertEqual(res.context['tenant'], self.TEST_TENANT)
form = res.context['form']
form_flavorfield = form.fields['flavor']
self.assertIn('m1.massive', form_flavorfield.choices[0][1])
form_keyfield = form.fields['key_name']
self.assertEqual(form_keyfield.choices[0][0],
self.keypairs[0].key_name)
self.mox.VerifyAll()
def test_launch_post(self):
FLAVOR_ID = self.flavors[0].id
IMAGE_ID = '1'
KEY_NAME = self.keypairs[0].key_name
SERVER_NAME = 'serverName'
USER_DATA = 'userData'
form_data = {'method': 'LaunchForm',
'flavor': FLAVOR_ID,
'image_id': IMAGE_ID,
'key_name': KEY_NAME,
'name': SERVER_NAME,
'user_data': USER_DATA,
'tenant_id': self.TEST_TENANT,
}
self.mox.StubOutWithMock(api, 'image_get')
api.image_get(IsA(http.HttpRequest),
IMAGE_ID).AndReturn(self.visibleImage)
self.mox.StubOutWithMock(api, 'token_get_tenant')
api.token_get_tenant(IsA(http.HttpRequest),
self.TEST_TENANT).AndReturn(self.TEST_TENANT)
self.mox.StubOutWithMock(api, 'tenant_quota_get')
api.tenant_quota_get(IsA(http.HttpRequest),
self.TEST_TENANT).AndReturn(FakeQuota)
self.mox.StubOutWithMock(api, 'flavor_list')
api.flavor_list(IsA(http.HttpRequest)).AndReturn(self.flavors)
self.mox.StubOutWithMock(api, 'keypair_list')
api.keypair_list(IsA(http.HttpRequest)).AndReturn(self.keypairs)
# called again by the form
api.image_get(IsA(http.HttpRequest),
IMAGE_ID).AndReturn(self.visibleImage)
self.mox.StubOutWithMock(api, 'flavor_get')
api.flavor_get(IsA(http.HttpRequest),
IsA(unicode)).AndReturn(self.flavors[0])
self.mox.StubOutWithMock(api, 'server_create')
api.server_create(IsA(http.HttpRequest), SERVER_NAME,
self.visibleImage, self.flavors[0],
KEY_NAME, USER_DATA)
self.mox.StubOutWithMock(messages, 'success')
messages.success(IsA(http.HttpRequest), IsA(str))
self.mox.ReplayAll()
res = self.client.post(reverse('dash_images_launch',
args=[self.TEST_TENANT, IMAGE_ID]),
form_data)
self.assertRedirectsNoFollow(res, reverse('dash_instances',
args=[self.TEST_TENANT]))
self.mox.VerifyAll()
def test_launch_flavorlist_error(self):
IMAGE_ID = '1'
self.mox.StubOutWithMock(api, 'image_get')
api.image_get(IsA(http.HttpRequest),
IMAGE_ID).AndReturn(self.visibleImage)
self.mox.StubOutWithMock(api, 'token_get_tenant')
api.token_get_tenant(IsA(http.HttpRequest),
self.TEST_TENANT).AndReturn(self.TEST_TENANT)
self.mox.StubOutWithMock(api, 'tenant_quota_get')
api.tenant_quota_get(IsA(http.HttpRequest),
self.TEST_TENANT).AndReturn(FakeQuota)
exception = api_exceptions.ApiException('apiException')
self.mox.StubOutWithMock(api, 'flavor_list')
api.flavor_list(IsA(http.HttpRequest)).AndRaise(exception)
self.mox.StubOutWithMock(api, 'keypair_list')
api.keypair_list(IsA(http.HttpRequest)).AndReturn(self.keypairs)
self.mox.ReplayAll()
res = self.client.get(reverse('dash_images_launch',
args=[self.TEST_TENANT, IMAGE_ID]))
self.assertTemplateUsed(res, 'dash_launch.html')
form = res.context['form']
form_flavorfield = form.fields['flavor']
self.assertIn('m1.tiny', form_flavorfield.choices[0][1])
self.mox.VerifyAll()
def test_launch_keypairlist_error(self):
IMAGE_ID = '2'
self.mox.StubOutWithMock(api, 'image_get')
api.image_get(IsA(http.HttpRequest),
IMAGE_ID).AndReturn(self.visibleImage)
self.mox.StubOutWithMock(api, 'token_get_tenant')
api.token_get_tenant(IsA(http.HttpRequest),
self.TEST_TENANT).AndReturn(self.TEST_TENANT)
self.mox.StubOutWithMock(api, 'tenant_quota_get')
api.tenant_quota_get(IsA(http.HttpRequest),
self.TEST_TENANT).AndReturn(FakeQuota)
self.mox.StubOutWithMock(api, 'flavor_list')
api.flavor_list(IsA(http.HttpRequest)).AndReturn(self.flavors)
exception = api_exceptions.ApiException('apiException')
self.mox.StubOutWithMock(api, 'keypair_list')
api.keypair_list(IsA(http.HttpRequest)).AndRaise(exception)
self.mox.ReplayAll()
res = self.client.get(reverse('dash_images_launch',
args=[self.TEST_TENANT, IMAGE_ID]))
self.assertTemplateUsed(res, 'dash_launch.html')
form = res.context['form']
form_keyfield = form.fields['key_name']
self.assertEqual(len(form_keyfield.choices), 0)
self.mox.VerifyAll()
def test_launch_form_apiexception(self):
FLAVOR_ID = self.flavors[0].id
IMAGE_ID = '1'
KEY_NAME = self.keypairs[0].key_name
SERVER_NAME = 'serverName'
USER_DATA = 'userData'
form_data = {'method': 'LaunchForm',
'flavor': FLAVOR_ID,
'image_id': IMAGE_ID,
'key_name': KEY_NAME,
'name': SERVER_NAME,
'tenant_id': self.TEST_TENANT,
'user_data': USER_DATA,
}
self.mox.StubOutWithMock(api, 'image_get')
api.image_get(IgnoreArg(),
IMAGE_ID).AndReturn(self.visibleImage)
self.mox.StubOutWithMock(api, 'token_get_tenant')
api.token_get_tenant(IgnoreArg(),
self.TEST_TENANT).AndReturn(self.TEST_TENANT)
self.mox.StubOutWithMock(api, 'tenant_quota_get')
api.tenant_quota_get(IsA(http.HttpRequest),
self.TEST_TENANT).AndReturn(FakeQuota)
self.mox.StubOutWithMock(api, 'flavor_list')
api.flavor_list(IgnoreArg()).AndReturn(self.flavors)
self.mox.StubOutWithMock(api, 'keypair_list')
api.keypair_list(IgnoreArg()).AndReturn(self.keypairs)
# called again by the form
api.image_get(IgnoreArg(),
IMAGE_ID).AndReturn(self.visibleImage)
self.mox.StubOutWithMock(api, 'flavor_get')
api.flavor_get(IgnoreArg(),
IsA(unicode)).AndReturn(self.flavors[0])
self.mox.StubOutWithMock(api, 'server_create')
exception = api_exceptions.ApiException('apiException')
api.server_create(IsA(http.HttpRequest), SERVER_NAME,
self.visibleImage, self.flavors[0],
KEY_NAME,
USER_DATA).AndRaise(exception)
self.mox.StubOutWithMock(messages, 'error')
messages.error(IsA(http.HttpRequest), IsA(str))
self.mox.ReplayAll()
url = reverse('dash_images_launch',
args=[self.TEST_TENANT, IMAGE_ID])
res = self.client.post(url, form_data)
self.assertTemplateUsed(res, 'dash_launch.html')
self.mox.VerifyAll()
| |
# -*- coding:utf-8 -*-
"""
tests.backend
~~~~~~~~~~~~~
:copyright: (c) 2015 by Jason Lai.
:license: BSD, see LICENSE for more details.
"""
from faker import Factory
from functools import partial
fake = Factory.create()
def test_redis_backend_basic(rb, fake_manager, fake_coll):
fake_manager.collmap = {'t1': fake_coll, 't2': fake_coll}
for name, coll in fake_manager.collmap.items():
rb.set_collection_index(name, coll)
for name, coll in fake_manager.collmap.items():
pair = rb.get_collection_index(name)
assert pair == [name, fake_coll.__class__.__name__]
# ---------------------- check get all indexes ----------------------
rv = rb.get_collection_indexes()
matching = {'t1': '_t', 't2': '_t'}
assert rv == matching
# if name not exist get_collection_index should return None
pair = rb.get_collection_index('not-exists')
assert pair is None
def test_redis_backend_metadata(rb, fake_coll):
taggings = [fake.domain_name() for i in range(10)]
ts_pairs = [(exp, exp-100) for exp in range(200, 300, 10)]
first_ts, mid_ts, last_ts = ts_pairs[0][1], ts_pairs[4][1], ts_pairs[-1][1]
args = ['hello', 'world', 42]
# ---------------- check metadata set and query operation ----------------
for i, pair in enumerate(ts_pairs, 1):
exp, ts = pair
for t in taggings:
rb.set_collection_metadata(fake_coll, t, exp, ts, *args)
assert rb.get_collection_length(fake_coll) == [i]
rv = rb.query_collection_metadata(fake_coll, t, 0, 1000)
assert len(rv) == i
assert rv[i-1] == ([exp] + args, ts)
rv = rb.query_collection_metadata_tagging(fake_coll, 0, 1000)
assert len(rv) == i
assert len(rv[ts]) == len(taggings)
rv = rb.query_collection_metadata_all(fake_coll, 0, 1000)
assert len(rv) == i
assert len(rv[ts]) == len(taggings)
for info in rv[ts].values():
assert info == [exp] + args
# ------------------- check metadata delete operations -------------------
# delete one tagging info in first ts
rb.del_collection_metadata_by_range(fake_coll, taggings[0],
first_ts, first_ts)
rv = rb.query_collection_metadata(fake_coll, t, 0, 1000)
assert len(rv) == len(ts_pairs)
rv = rb.query_collection_metadata_tagging(fake_coll, 0, 1000)
assert len(rv) == len(ts_pairs)
assert len(rv[first_ts]) == len(taggings) - 1
assert len(rv[last_ts]) == len(rv[mid_ts]) == len(taggings)
assert rb.get_collection_length(fake_coll) == [len(taggings)]
# delete all the taggings in first ts
for t in taggings[1:]:
rb.del_collection_metadata_by_range(fake_coll, t, first_ts, first_ts)
rv = rb.query_collection_metadata(fake_coll, t, 0, 1000)
assert len(rv) == len(ts_pairs) - 1
rv = rb.query_collection_metadata_tagging(fake_coll, 0, 1000)
assert len(rv) == len(ts_pairs) - 1
assert first_ts not in rv
assert len(rv[last_ts]) == len(rv[mid_ts]) == len(taggings)
assert rb.get_collection_length(fake_coll) == [len(taggings) - 1]
# delete all taggings info in last five ts
for exp, ts in ts_pairs[-5:]:
for t in taggings:
rb.del_collection_metadata_by_range(fake_coll, t, ts, ts)
rv = rb.query_collection_metadata(fake_coll, t, 0, 1000)
assert len(rv) == len(ts_pairs) - 6
rv = rb.query_collection_metadata_tagging(fake_coll, 0, 1000)
assert len(rv) == len(ts_pairs) - 6
assert first_ts not in rv and last_ts not in rv
assert len(rv[mid_ts]) == len(taggings)
assert rb.get_collection_length(fake_coll) == [len(taggings) - 6]
# ------------------ check no metadata exists situations ------------------
# delete a not exists ts
rb.del_collection_metadata_by_range(fake_coll, taggings[4], 9999, 9999)
# delete a not exists tagging in mid_ts
rb.del_collection_metadata_by_range(fake_coll, taggings[4], mid_ts, mid_ts)
rb.del_collection_metadata_by_range(fake_coll, taggings[4], mid_ts, mid_ts)
# query a unexists ts
assert rb.query_collection_metadata(fake_coll, mid_ts, 9999, 9999) is None
assert rb.query_collection_metadata_tagging(fake_coll, 9999, 9999) is None
assert rb.query_collection_metadata_all(fake_coll, 9999, 9999) is None
def _add_inc_coll_item(rb, coll, tagging, ts, value):
rb.set_collection_metadata(coll, tagging, ts+100, ts)
rb.inc_coll_cache_set(coll, _mk_inc_coll_field(tagging, ts), value)
def _mk_inc_coll_field(tagging, ts):
field_key = '{}:{}'.format(ts, tagging)
return field_key
def _assert_inc_coll_cache_size(rb, coll, cache_len, md_len):
_md_len, _cache_len = rb.get_collection_length(coll, klass="IncreaseCollection")
assert _md_len == md_len
assert _cache_len == cache_len
def test_redis_backend_inc_coll(rb, fake_coll):
tagging, other_tagging = 'day', 'for_diff'
v = {i: i for i in range(20)}
timestamps = [100, 110, 120, 130, 140]
assert_cache_size = partial(_assert_inc_coll_cache_size, rb, fake_coll)
# ---------------- check the operation of item adding ----------------
for ts in timestamps:
_add_inc_coll_item(rb, fake_coll, tagging, ts, v)
# double adding for checking the logic of duplacate handle
for ts in timestamps:
_add_inc_coll_item(rb, fake_coll, tagging, ts, v)
# adding the other_tagging for the cache size check below
for ts in timestamps:
_add_inc_coll_item(rb, fake_coll, other_tagging, ts, v)
print('Success Adding datas...\n\n\n')
assert_cache_size(10, 5)
# ------------------ check the cache data get operations ------------------
fields = [_mk_inc_coll_field(tagging, ts) for ts in timestamps]
rv = rb.inc_coll_caches_get(fake_coll, *fields)
for r in rv:
assert r == v
rb.inc_coll_caches_del(fake_coll, *fields)
rv = rb.inc_coll_caches_get(fake_coll, *fields)
for r in rv:
assert r is None
assert_cache_size(5, 5)
# if no fields specified
assert rb.inc_coll_caches_get(fake_coll) == []
# ---------------- check for the inc_coll_keys_delete ----------------
assert_cache_size(5, 5)
rb.delete_collection_keys(fake_coll, klass="IncreaseCollection")
assert_cache_size(0, 0)
def test_redis_backend_unique_count_coll(rb, fake_coll):
items_num = 200
tagging = 'day'
v = {fake.uuid4() for i in range(items_num)}
timestamps = [100, 200, 300]
# ----------- check the operation of item adding and getting ----------
for ts in timestamps:
rv = rb.uniq_count_coll_cache_set(fake_coll, ts, tagging, v)
assert rv == items_num
rv = rb.uniq_count_coll_cache_set(fake_coll, ts, tagging, v)
assert rv == 0
rv = rb.uniq_count_coll_cache_get(fake_coll, tagging, timestamps)
for item in rv:
assert item == v
assert len(item) == items_num
rv = rb.uniq_count_coll_cache_get(fake_coll, tagging,
timestamps, count_only=True)
for count in rv:
assert count == items_num
# ---------------- check for the operation of deleting ----------------
rv = rb.uniq_count_coll_cache_del(fake_coll, tagging, timestamps[0:1])
assert rv == 1
rv = rb.uniq_count_coll_cache_get(fake_coll, tagging, timestamps[0:1])
assert rv == [set()]
rv = rb.uniq_count_coll_cache_get(fake_coll, tagging, timestamps[1:])
for item in rv:
assert item == v
assert len(item) == items_num
# uniq_count_coll_cache_pop 50 items
rv = rb.uniq_count_coll_cache_pop(fake_coll, tagging, timestamps[1:], 50)
for item in rv:
assert len(item) == 50
rv = rb.uniq_count_coll_cache_get(fake_coll, tagging, timestamps[1:])
for item in rv:
assert len(item) == items_num - 50
# delete remain items
rv = rb.uniq_count_coll_cache_del(fake_coll, tagging, timestamps[1:])
assert rv == 2
rv = rb.uniq_count_coll_cache_get(fake_coll, tagging, timestamps)
assert rv == [set(), set(), set()]
def test_redis_backend_sorted_count_coll(rb, fake_coll):
tagging = 'day'
v = {fake.uuid4(): i for i in range(200)}
v2 = [(member, score) for member, score in v.items()]
v2 = sorted(v2, key=lambda x: x[1])
timestamps = [100, 200, 300]
# ----------- check the operation of item adding and getting ----------
for ts in timestamps:
rv = rb.sorted_count_coll_cache_set(fake_coll, ts, tagging, v)
assert rv == 200
rv = rb.sorted_count_coll_cache_get(fake_coll, tagging, timestamps)
for item in rv:
assert item == v2
rv = rb.sorted_count_coll_cache_get(fake_coll, tagging,
timestamps, topN=100)
for item in rv:
assert item == v2[100:]
# ---------------- check for the operation of deleting ----------------
rv = rb.sorted_count_coll_cache_del(fake_coll, tagging, timestamps[0:1])
assert rv == 1
rv = rb.sorted_count_coll_cache_get(fake_coll, tagging, timestamps[0:1])
assert rv == [[]]
rv = rb.sorted_count_coll_cache_get(fake_coll, tagging, timestamps[1:])
for item in rv:
assert item == v2
rv = rb.sorted_count_coll_cache_del(fake_coll, tagging, timestamps[1:])
assert rv == 2
rv = rb.sorted_count_coll_cache_get(fake_coll, tagging, timestamps)
assert rv == [[], [], []]
| |
from __future__ import annotations
import http.client
import json
import os
import select
import socket as pysocket
import sys
import urllib.error
import urllib.parse
import urllib.request
from multiprocessing.pool import ThreadPool
import libtbx.phil
from dxtbx.model.crystal import CrystalFactory
from dxtbx.util import get_url_scheme
from libtbx.introspection import number_of_processors
from scitbx.array_family import flex
import dials.util
def work(host, port, filename, params):
conn = http.client.HTTPConnection(host, port)
path = filename
for param in params:
path += f";{param}"
conn.request("GET", path)
return conn.getresponse().read()
def _nproc():
return number_of_processors(return_value_if_unknown=-1)
def response_to_xml(d):
if "n_spots_total" in d:
response = f"""
<spot_count>{d['n_spots_total']}</spot_count>
<spot_count_no_ice>{d['n_spots_no_ice']}</spot_count_no_ice>
<d_min>{d['estimated_d_min']:.2f}</d_min>
<d_min_method_1>{d['d_min_distl_method_1']:.2f}</d_min_method_1>
<d_min_method_2>{d['d_min_distl_method_2']:.2f}</d_min_method_2>
<total_intensity>{d['total_intensity']:.0f}</total_intensity>"""
else:
assert "error" in d
return f"<response>\n{d['error']}\n</response>"
if "lattices" in d:
for lattice in d["lattices"]:
crystal = CrystalFactory.from_dict(lattice["crystal"])
response = "\n".join(
[
response,
"<unit_cell>%.6g %.6g %.6g %.6g %.6g %.6g</unit_cell>"
% (crystal.get_unit_cell().parameters()),
]
)
response = "\n".join(
[
response,
"<n_indexed>%i</n_indexed>" % d["n_indexed"],
"<fraction_indexed>%.2f</fraction_indexed>" % d["fraction_indexed"],
]
)
if "integrated_intensity" in d:
response = "\n".join(
[
response,
"<integrated_intensity>%.0f</integrated_intensity>"
% d["integrated_intensity"],
]
)
return f"<response>\n{response}\n</response>"
def work_all(
host,
port,
filenames,
params,
plot=False,
table=False,
json_file=None,
grid=None,
nproc=None,
):
if nproc is None:
nproc = _nproc()
with ThreadPool(processes=nproc) as pool:
threads = {}
for filename in filenames:
threads[filename] = pool.apply_async(work, (host, port, filename, params))
results = []
for filename in filenames:
response = threads[filename].get()
d = json.loads(response)
results.append(d)
print(response_to_xml(d))
if json_file is not None:
with open(json_file, "wb") as f:
json.dump(results, f)
if plot or table:
from dials.algorithms.spot_finding.per_image_analysis import (
StatsMultiImage,
plot_stats,
)
estimated_d_min = flex.double()
d_min_distl_method_1 = flex.double()
d_min_distl_method_2 = flex.double()
n_spots_total = flex.int()
n_spots_no_ice = flex.int()
total_intensity = flex.double()
for d in results:
estimated_d_min.append(d["estimated_d_min"])
d_min_distl_method_1.append(d["d_min_distl_method_1"])
d_min_distl_method_2.append(d["d_min_distl_method_2"])
n_spots_total.append(d["n_spots_total"])
n_spots_no_ice.append(d["n_spots_no_ice"])
total_intensity.append(d["total_intensity"])
stats = StatsMultiImage(
n_spots_total=n_spots_total,
n_spots_no_ice=n_spots_no_ice,
n_spots_4A=None,
total_intensity=total_intensity,
estimated_d_min=estimated_d_min,
d_min_distl_method_1=d_min_distl_method_1,
d_min_distl_method_2=d_min_distl_method_2,
noisiness_method_1=None,
noisiness_method_2=None,
)
if plot:
plot_stats(stats)
if table:
print(stats)
if grid is not None:
from matplotlib import pyplot
n_spots_no_ice.reshape(flex.grid(grid))
print(n_spots_no_ice.size())
pyplot.figure()
pyplot.pcolormesh(n_spots_no_ice.as_numpy_array(), cmap=pyplot.cm.Reds)
pyplot.savefig("spot_count.png")
def stop(host, port, nproc):
stopped = 0
for j in range(nproc):
try:
url_request = urllib.request.Request(f"http://{host}:{port}/Ctrl-C")
socket = urllib.request.urlopen(url_request, None, 3)
if socket.getcode() == "200":
stopped = stopped + 1
else:
print("socket returned code", socket.getcode())
except (pysocket.timeout, urllib.error.HTTPError) as e:
print("error on stopping server:", e)
except urllib.error.URLError as e:
if e.reason.errno != 111:
print("error on stopping server:", e)
except pysocket.error:
# Assuming this means the server killed itself before the reply left the send buffer.
stopped = stopped + 1
except http.client.BadStatusLine:
# Regular occurrence. Probably means the server stopped anyway.
stopped = stopped + 1
return stopped
phil_scope = libtbx.phil.parse(
"""\
nproc = Auto
.type = int(value_min=1)
host = localhost
.type = str
port = 1701
.type = int(value_min=1)
plot = False
.type = bool
table = False
.type = bool
json = None
.type = path
grid = None
.type = ints(size=2, value_min=1)
"""
)
@dials.util.show_mail_handle_errors()
def run(args=None):
mixed_args = args or sys.argv[1:]
if os.name != "nt":
r, w, x = select.select([sys.stdin], [], [], 0)
if len(r) > 0:
mixed_args.extend([l.strip() for rr in r for l in rr.readlines()])
filenames = []
args = []
for arg in mixed_args:
if get_url_scheme(arg):
# Make this look like a path. If you squint. And are looking away.
filenames.append("/" + urllib.parse.quote(arg))
else:
if os.path.isfile(arg):
filenames.append(arg)
else:
args.append(arg)
interp = phil_scope.command_line_argument_interpreter()
params, unhandled = interp.process_and_fetch(
args, custom_processor="collect_remaining"
)
params = params.extract()
if params.nproc is libtbx.Auto:
nproc = None
params.nproc = 1024
else:
nproc = params.nproc
if len(unhandled) and unhandled[0] == "stop":
stopped = stop(params.host, params.port, params.nproc)
print("Stopped %d findspots processes" % stopped)
elif len(unhandled) and unhandled[0] == "ping":
url = "http://%s:%i" % (params.host, params.port)
try:
_ = urllib.request.urlopen(url).read()
print("Success")
sys.exit(0)
except Exception:
print("Failure")
sys.exit(1)
else:
if len(filenames) == 1:
response = work(params.host, params.port, filenames[0], unhandled)
print(response_to_xml(json.loads(response)))
else:
work_all(
params.host,
params.port,
filenames,
unhandled,
plot=params.plot,
table=params.table,
json_file=params.json,
grid=params.grid,
nproc=nproc,
)
if __name__ == "__main__":
run()
| |
# Copyright 2015 Oktay Sancak
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import requests
from slacker.utils import get_item_id_by_name
API_BASE_URL = 'https://slack.com/api/{api}'
__all__ = ['Error', 'Response', 'BaseAPI', 'API', 'Auth', 'Users', 'Groups',
'Channels', 'Chat', 'IM', 'IncomingWebhook', 'Search', 'Files',
'Stars', 'Emoji', 'Presence', 'RTM', 'Team', 'OAuth', 'Slacker']
class Error(Exception):
pass
class Response(object):
def __init__(self, body):
self.raw = body
self.body = json.loads(body)
self.successful = self.body['ok']
self.error = self.body.get('error')
class BaseAPI(object):
def __init__(self, token=None):
self.token = token
def _request(self, method, api, **kwargs):
if self.token:
kwargs.setdefault('params', {})['token'] = self.token
response = method(API_BASE_URL.format(api=api),
**kwargs)
assert response.status_code == 200
response = Response(response.text)
if not response.successful:
raise Error(response.error)
return response
def get(self, api, **kwargs):
return self._request(requests.get, api, **kwargs)
def post(self, api, **kwargs):
return self._request(requests.post, api, **kwargs)
class API(BaseAPI):
def test(self, error=None, **kwargs):
if error:
kwargs['error'] = error
return self.get('api.test', params=kwargs)
class Auth(BaseAPI):
def test(self):
return self.get('auth.test')
class Users(BaseAPI):
def info(self, user):
return self.get('users.info', params={'user': user})
def list(self):
return self.get('users.list')
def set_active(self):
return self.post('users.setActive')
def get_presence(self, user):
return self.get('users.getPresence', params={'user': user})
def set_presence(self, presence):
assert presence in Presence.TYPES, 'Invalid presence type'
return self.post('users.setPresence', params={'presence': presence})
def get_user_id(self, user_name):
members = self.list().body['members']
return get_item_id_by_name(members, user_name)
class Groups(BaseAPI):
def create(self, name):
return self.post('groups.create', params={'name': name})
def create_child(self, channel):
return self.post('groups.createChild', params={'channel': channel})
def info(self, channel):
return self.get('groups.info', params={'channel': channel})
def list(self, exclude_archived=None):
return self.get('groups.list',
params={'exclude_archived': exclude_archived})
def history(self, channel, latest=None, oldest=None, count=None,
inclusive=None):
return self.get('groups.history',
params={
'channel': channel,
'latest': latest,
'oldest': oldest,
'count': count,
'inclusive': inclusive
})
def invite(self, channel, user):
return self.post('groups.invite',
params={'channel': channel, 'user': user})
def kick(self, channel, user):
return self.post('groups.kick',
params={'channel': channel, 'user': user})
def leave(self, channel):
return self.post('groups.leave', params={'channel': channel})
def mark(self, channel, ts):
return self.post('groups.mark', params={'channel': channel, 'ts': ts})
def rename(self, channel, name):
return self.post('groups.rename',
params={'channel': channel, 'name': name})
def archive(self, channel):
return self.post('groups.archive', params={'channel': channel})
def unarchive(self, channel):
return self.post('groups.unarchive', params={'channel': channel})
def open(self, channel):
return self.post('groups.open', params={'channel': channel})
def close(self, channel):
return self.post('groups.close', params={'channel': channel})
def set_purpose(self, channel, purpose):
return self.post('groups.setPurpose',
params={'channel': channel, 'purpose': purpose})
def set_topic(self, channel, topic):
return self.post('groups.setTopic',
params={'channel': channel, 'topic': topic})
class Channels(BaseAPI):
def create(self, name):
return self.post('channels.create', params={'name': name})
def info(self, channel):
return self.get('channels.info', params={'channel': channel})
def list(self, exclude_archived=None):
return self.get('channels.list',
params={'exclude_archived': exclude_archived})
def history(self, channel, latest=None, oldest=None, count=None,
inclusive=None):
return self.get('channels.history',
params={
'channel': channel,
'latest': latest,
'oldest': oldest,
'count': count,
'inclusive': inclusive
})
def mark(self, channel, ts):
return self.post('channels.mark',
params={'channel': channel, 'ts': ts})
def join(self, name):
return self.post('channels.join', params={'name': name})
def leave(self, channel):
return self.post('channels.leave', params={'channel': channel})
def invite(self, channel, user):
return self.post('channels.invite',
params={'channel': channel, 'user': user})
def kick(self, channel, user):
return self.post('channels.kick',
params={'channel': channel, 'user': user})
def rename(self, channel, name):
return self.post('channels.rename',
params={'channel': channel, 'name': name})
def archive(self, channel):
return self.post('channels.archive', params={'channel': channel})
def unarchive(self, channel):
return self.post('channels.unarchive', params={'channel': channel})
def set_purpose(self, channel, purpose):
return self.post('channels.setPurpose',
params={'channel': channel, 'purpose': purpose})
def set_topic(self, channel, topic):
return self.post('channels.setTopic',
params={'channel': channel, 'topic': topic})
def get_channel_id(self, channel_name):
channels = self.list().body['channels']
return get_item_id_by_name(channels, channel_name)
class Chat(BaseAPI):
def post_message(self, channel, text, username=None, as_user=None, parse=None,
link_names=None, attachments=None, unfurl_links=None,
unfurl_media=None, icon_url=None, icon_emoji=None):
# Ensure attachments are json encoded
if attachments:
if isinstance(attachments, list):
attachments = json.dumps(attachments)
return self.post('chat.postMessage',
params={
'channel': channel,
'text': text,
'username': username,
'as_user': as_user,
'parse': parse,
'link_names': link_names,
'attachments': attachments,
'unfurl_links': unfurl_links,
'unfurl_media': unfurl_media,
'icon_url': icon_url,
'icon_emoji': icon_emoji
})
def update(self, channel, ts, text):
self.post('chat.update',
params={'channel': channel, 'ts': ts, 'text': text})
def delete(self, channel, ts):
self.post('chat.delete', params={'channel': channel, 'ts': ts})
class IM(BaseAPI):
def list(self):
return self.get('im.list')
def history(self, channel, latest=None, oldest=None, count=None,
inclusive=None):
return self.get('im.history',
params={
'channel': channel,
'latest': latest,
'oldest': oldest,
'count': count,
'inclusive': inclusive
})
def mark(self, channel, ts):
return self.post('im.mark', params={'channel': channel, 'ts': ts})
def open(self, user):
return self.post('im.open', params={'user': user})
def close(self, channel):
return self.post('im.close', params={'channel': channel})
class Search(BaseAPI):
def all(self, query, sort=None, sort_dir=None, highlight=None, count=None,
page=None):
return self.get('search.all',
params={
'query': query,
'sort': sort,
'sort_dir': sort_dir,
'highlight': highlight,
'count': count,
'page': page
})
def files(self, query, sort=None, sort_dir=None, highlight=None,
count=None, page=None):
return self.get('search.files',
params={
'query': query,
'sort': sort,
'sort_dir': sort_dir,
'highlight': highlight,
'count': count,
'page': page
})
def messages(self, query, sort=None, sort_dir=None, highlight=None,
count=None, page=None):
return self.get('search.messages',
params={
'query': query,
'sort': sort,
'sort_dir': sort_dir,
'highlight': highlight,
'count': count,
'page': page
})
class Files(BaseAPI):
def list(self, user=None, ts_from=None, ts_to=None, types=None,
count=None, page=None):
return self.get('files.list',
params={
'user': user,
'ts_from': ts_from,
'ts_to': ts_to,
'types': types,
'count': count,
'page': page
})
def info(self, file_, count=None, page=None):
return self.get('files.info',
params={'file': file_, 'count': count, 'page': page})
def upload(self, file_, content=None, filetype=None, filename=None,
title=None, initial_comment=None, channels=None):
with open(file_, 'rb') as f:
if isinstance(channels, (tuple, list)):
channels = ','.join(channels)
return self.post('files.upload',
params={
'content': content,
'filetype': filetype,
'filename': filename,
'title': title,
'initial_comment': initial_comment,
'channels': channels
},
files={'file': f})
def delete(self, file_):
return self.post('files.delete', params={'file': file_})
class Stars(BaseAPI):
def list(self, user=None, count=None, page=None):
return self.get('stars.list',
params={'user': user, 'count': count, 'page': page})
class Emoji(BaseAPI):
def list(self):
return self.get('emoji.list')
class Presence(BaseAPI):
AWAY = 'away'
ACTIVE = 'active'
TYPES = (AWAY, ACTIVE)
def set(self, presence):
assert presence in Presence.TYPES, 'Invalid presence type'
return self.post('presence.set', params={'presence': presence})
class RTM(BaseAPI):
def start(self):
return self.get('rtm.start')
class Team(BaseAPI):
def info(self):
return self.get('team.info')
def access_logs(self, count=None, page=None):
return self.get('team.accessLogs',
params={'count': count, 'page': page})
class OAuth(BaseAPI):
def access(self, client_id, client_secret, code, redirect_uri=None):
return self.post('oauth.access',
params={
'client_id': client_id,
'client_secret': client_secret,
'code': code,
'redirect_uri': redirect_uri
})
class IncomingWebhook(object):
def __init__(self, url=None):
self.url = url
def post(self, data):
"""
Posts message with payload formatted in accordance with
this documentation https://api.slack.com/incoming-webhooks
"""
if not self.url:
raise Error('URL for incoming webhook is undefined')
return requests.post(self.url, data=json.dumps(data))
class Slacker(object):
oauth = OAuth()
def __init__(self, token, incoming_webhook_url=None):
self.im = IM(token=token)
self.api = API(token=token)
self.rtm = RTM(token=token)
self.auth = Auth(token=token)
self.chat = Chat(token=token)
self.team = Team(token=token)
self.users = Users(token=token)
self.files = Files(token=token)
self.stars = Stars(token=token)
self.emoji = Emoji(token=token)
self.search = Search(token=token)
self.groups = Groups(token=token)
self.channels = Channels(token=token)
self.presence = Presence(token=token)
self.incomingwebhook = IncomingWebhook(url=incoming_webhook_url)
| |
from gppylib.commands.base import Command, CommandResult, REMOTE
from mpp.lib.datagen import TINCTestDatabase
from mpp.lib.PSQL import PSQL, PSQLException
class TPCHDatabase(TINCTestDatabase):
def __init__(self, database_name = "tpch_0.01_heap", scale_factor = 0.01, storage_type = 'heap', partition = False):
self.db_name = database_name
self.scale_factor = scale_factor
self.storage_type = storage_type
self.partition = partition
self.primary_segments_count = -1
super(TPCHDatabase, self).__init__(database_name)
def setUp(self):
if super(TPCHDatabase, self).setUp():
return True
#call reload_tpch
if self.storage_type == 'heap':
self.reload_TPCH()
elif self.storage_type == 'ao':
self.reload_AOTPCH()
elif self.storage_type == 'co':
self.reload_COTPCH()
elif self.storage_type == 'parquet':
self.reload_ParquetTPCH()
else:
self.reload_TPCH()
return False
def _form_dbgen_command_string(self, table_prefix):
if self.primary_segments_count < 0:
#Find number of primary segments
self.primary_segments_count = (PSQL.run_sql_command("select 'primary_segment' from gp_segment_configuration \
where content >= 0 and role = 'p'", flags='')).count('primary_segment')
db_gen_command = ''' execute 'bash -c \\"\$GPHOME/bin/dbgen -b \$GPHOME/bin/dists.dss -T %s -s %f -N %d -n \$((GP_SEGMENT_ID + 1))\\"'
on %d format 'text' (delimiter '|')''' % ( table_prefix, self.scale_factor, self.primary_segments_count, self.primary_segments_count)
return db_gen_command
def reload_table(self, pTableName, pTableDefinition, pDbGenCmd,
withClause = "", preExecuteCmd = "", createExternalTable = True, pTablePartitionDefinition = ''):
'''
Drop a table; re-create it; and re-load the data into it.
@param pTableName: The name of the table.
pTableDefinition: The list of column names and data types,
enclosed in parentheses, for example:
"(x int, y float)"
@param pDbGenCmd: A command to generate data to insert into the table.
This typically looks like:
dbGenCmd = "execute 'bash -c \\"\$GPHOME/bin/dbgen -b \
$GPHOME/bin/dists.dss -T r -s 1 \\"'
on 1 format 'text' (delimiter '|')"
I don't really understand this; I copied it blindly from an
earlier version of gp.test. I assume that the dbgen program
is being called to generate data for the external tables.
@param withClause: This is an optional string that may contain a "WITH"
clause such as "WITH (APPENDONLY=True, ORIENTATION='column')".
It could also contain other clauses, like "DISTRIBUTED BY...".
@param preExecuteCmd: This is an optional string that will be executed
prior to the CREATE TABLE statement. For example, this string
might contain "SET SEARCH_PATH TO AO_SCHEMA;" to set the
search path to include the schema named "AO_SCHEMA".
@param createExternalTable: set this to False if you have already created
the external table and don't want to create it again.
'''
ok = True
if withClause == None:
withClause = ""
if preExecuteCmd == None:
preExecuteCmd = ""
if len(preExecuteCmd) > 0 and preExecuteCmd[-1].strip() != ";":
preExecuteCmd = preExecuteCmd + ";"
res = {'rc':0, 'stderr':'', 'stdout':''}
if createExternalTable:
cmd = 'drop external web table if exists e_' + pTableName
out = PSQL.run_sql_command(cmd , dbname = self.db_name)
cmd = "create external web table e_" + pTableName + \
pTableDefinition + " " + pDbGenCmd
cmd = "".join( cmd.split( '\n' ) )
out = PSQL.run_sql_command(cmd , dbname = self.db_name, results=res)
if res['rc']:
raise PSQLException(res['stderr'])
cmd = 'drop table if exists ' + pTableName + ' cascade'
out = PSQL.run_sql_command(preExecuteCmd + cmd , dbname = self.db_name, results=res)
if res['rc']:
raise PSQLException(res['stderr'])
cmd = 'CREATE TABLE ' + pTableName + pTableDefinition
cmd = "".join(cmd.split( '\n' ))
cmd = cmd + withClause + ' ' + pTablePartitionDefinition
out = PSQL.run_sql_command(preExecuteCmd + cmd, dbname = self.db_name, results=res)
if res['rc']:
raise PSQLException(res['stderr'])
cmd = 'insert into ' + pTableName + ' select * from e_' + pTableName
out = PSQL.run_sql_command(preExecuteCmd + cmd, dbname = self.db_name, results=res)
if res['rc']:
raise PSQLException(res['stderr'])
cmd = 'analyse ' + pTableName
out = PSQL.run_sql_command(preExecuteCmd + cmd, dbname = self.db_name, results=res)
if res['rc']:
raise PSQLException(res['stderr'])
def reload_Nation(self, withClause = "", preExecuteCmd = "",
createExternalTable = True):
'''
Reloads TPCH Nation
'''
tableName = "nation"
tableDefinition = '''(N_NATIONKEY INTEGER ,
N_NAME CHAR(25) ,
N_REGIONKEY INTEGER ,
N_COMMENT VARCHAR(152))'''
# Always use a fixed scale factor of 1
# for Region and Nation as required by TPC-H standards.
dbGenCmd = '''execute 'bash -c \\"\$GPHOME/bin/dbgen -b \$GPHOME/bin/dists.dss -T n -s 1 \\"'
on 1 format 'text' (delimiter '|')'''
self.reload_table( tableName, tableDefinition, dbGenCmd,
withClause, preExecuteCmd, createExternalTable )
def reload_Region(self, withClause = "", preExecuteCmd = "",
createExternalTable = True):
'''
Reloads TPCH Region
'''
tableName = "region"
tableDefinition = '''( R_REGIONKEY INTEGER ,
R_NAME CHAR(25) ,
R_COMMENT VARCHAR(152))'''
# Always use a fixed scale factor of 1
# for Region and Nation as required by TPC-H standards.
dbGenCmd = '''execute 'bash -c \\"\$GPHOME/bin/dbgen -b \$GPHOME/bin/dists.dss -T r -s 1 \\"'
on 1 format 'text' (delimiter '|')'''
self.reload_table( tableName, tableDefinition, dbGenCmd,
withClause, preExecuteCmd, createExternalTable )
def reload_Part(self, withClause = "", preExecuteCmd = "",
createExternalTable = True):
'''
Reloads TPCH Part
'''
tableName = "Part"
tableDefinition = '''(P_PARTKEY INTEGER ,
P_NAME VARCHAR(55) ,
P_MFGR CHAR(25) ,
P_BRAND CHAR(10) ,
P_TYPE VARCHAR(25) ,
P_SIZE INTEGER ,
P_CONTAINER CHAR(10) ,
P_RETAILPRICE DECIMAL(15,2) ,
P_COMMENT VARCHAR(23) )'''
dbGenCmd = self._form_dbgen_command_string('P')
self.reload_table(tableName, tableDefinition, dbGenCmd,
withClause, preExecuteCmd, createExternalTable)
def reload_Supplier(self, withClause = "", preExecuteCmd = "",
createExternalTable = True):
'''
Reloads TPCH Supplier
'''
tableName = "Supplier"
tableDefinition = '''(S_SUPPKEY INTEGER ,
S_NAME CHAR(25) ,
S_ADDRESS VARCHAR(40) ,
S_NATIONKEY INTEGER ,
S_PHONE CHAR(15) ,
S_ACCTBAL DECIMAL(15,2) ,
S_COMMENT VARCHAR(101) )'''
dbGenCmd = self._form_dbgen_command_string('s')
self.reload_table(tableName, tableDefinition, dbGenCmd,
withClause, preExecuteCmd, createExternalTable)
def reload_Partsupp(self, withClause = "", preExecuteCmd = "",
createExternalTable = True):
'''
Reloads TPCH Part Supply
'''
tableName = "Partsupp"
tableDefinition = '''( PS_PARTKEY INTEGER ,
PS_SUPPKEY INTEGER ,
PS_AVAILQTY INTEGER ,
PS_SUPPLYCOST DECIMAL(15,2) ,
PS_COMMENT VARCHAR(199) )'''
dbGenCmd = self._form_dbgen_command_string('S')
self.reload_table(tableName, tableDefinition, dbGenCmd,
withClause, preExecuteCmd, createExternalTable)
def reload_Customer(self, withClause = "", preExecuteCmd = "",
createExternalTable = True):
'''
Reloads TPCH Customer
'''
tableName = "Customer"
tableDefinition = ''' ( C_CUSTKEY INTEGER ,
C_NAME VARCHAR(25) ,
C_ADDRESS VARCHAR(40) ,
C_NATIONKEY INTEGER ,
C_PHONE CHAR(15) ,
C_ACCTBAL DECIMAL(15,2) ,
C_MKTSEGMENT CHAR(10) ,
C_COMMENT VARCHAR(117) )'''
dbGenCmd = self._form_dbgen_command_string('c')
self.reload_table(tableName, tableDefinition, dbGenCmd,
withClause, preExecuteCmd, createExternalTable)
def reload_Orders(self, withClause = "", preExecuteCmd = "",
createExternalTable = True):
'''
Reloads TPCH Orders
'''
pTablePartitionDefinition = ''
tableName = "Orders"
tableDefinition = '''( O_ORDERKEY INTEGER ,
O_CUSTKEY INTEGER ,
O_ORDERSTATUS CHAR(1) ,
O_TOTALPRICE DECIMAL(15,2) ,
O_ORDERDATE DATE ,
O_ORDERPRIORITY CHAR(15) ,
O_CLERK CHAR(15) ,
O_SHIPPRIORITY INTEGER ,
O_COMMENT VARCHAR(79) )'''
if self.partition:
pTablePartitionDefinition += " PARTITION BY RANGE(o_orderdate) (partition p1 start('1992-01-01') end('1998-08-03') every(interval '1 month'))"
dbGenCmd = self._form_dbgen_command_string('O')
self.reload_table(tableName, tableDefinition, dbGenCmd,
withClause, preExecuteCmd, createExternalTable, pTablePartitionDefinition)
def reload_Lineitem( self, withClause = "", preExecuteCmd = "",
createExternalTable = True):
'''
Reloads TPCH Lineitem
'''
tableName = "Lineitem"
pTablePartitionDefinition = ''
tableDefinition = '''( L_ORDERKEY INTEGER ,
L_PARTKEY INTEGER ,
L_SUPPKEY INTEGER ,
L_LINENUMBER INTEGER ,
L_QUANTITY DECIMAL(15,2) ,
L_EXTENDEDPRICE DECIMAL(15,2) ,
L_DISCOUNT DECIMAL(15,2) ,
L_TAX DECIMAL(15,2) ,
L_RETURNFLAG CHAR(1) ,
L_LINESTATUS CHAR(1) ,
L_SHIPDATE DATE ,
L_COMMITDATE DATE ,
L_RECEIPTDATE DATE ,
L_SHIPINSTRUCT CHAR(25) ,
L_SHIPMODE CHAR(10) ,
L_COMMENT VARCHAR(44) )'''
if self.partition:
pTablePartitionDefinition += "PARTITION by range(l_shipdate) (partition p1 start('1992-01-01') end('1998-12-02') every(interval '1 month'))"
dbGenCmd = self._form_dbgen_command_string('L')
self.reload_table(tableName, tableDefinition, dbGenCmd,
withClause, preExecuteCmd, createExternalTable, pTablePartitionDefinition)
def reload_AOTPCH(self, withClause = "WITH (checksum=true,appendonly=true,compresstype=quicklz,compresslevel=1)", preExecuteCmd = "",
createExternalTable = True, forceCreateTable = False):
'''
Create Append-only TPCH1 database
@param withClause: append only definition
@param scaleFactor: scale factor, default = 1
'''
self.reload_TPCH(withClause, preExecuteCmd, createExternalTable, forceCreateTable)
def reload_COTPCH(self, withClause = "WITH (checksum=false,appendonly=true,orientation = column,blocksize=49152,compresslevel=5)", preExecuteCmd = "",
createExternalTable = True, forceCreateTable = False):
'''
Create Column Append-only TPCH1 database
@param withClause: append only definition
@param scaleFactor: scale factor, default = 1
'''
self.reload_TPCH(withClause, preExecuteCmd, createExternalTable, forceCreateTable)
def reload_ParquetTPCH(self, withClause = "WITH (appendonly=true,orientation=parquet)", preExecuteCmd = "",createExternalTable = True, forceCreateTable = False):
'''
Create Parquet TPCH database
@param withClause: parquet definition
@param scaleFactor: scale factor, default = 0.01
'''
self.reload_TPCH(withClause, preExecuteCmd, createExternalTable, forceCreateTable)
def reload_TPCH(self, withClause = "", preExecuteCmd = "",
createExternalTable = True, forceCreateTable = False):
'''
Drop/create/reload the TPCH1 database
@param withClause: This is an optional string that may contain a "WITH"
clause such as "WITH (APPENDONLY=True, ORIENTATION='column')".
It could also contain other clauses, like "DISTRIBUTED BY...".
@param preExecuteCmd: This is an optional string that will be executed
prior to the CREATE TABLE statement. For example, this string
might contain "SET SEARCH_PATH TO AO_SCHEMA;" to set the
search path to include the schema named "AO_SCHEMA".
@param createExternalTable: set this to False if you have already created
the external table and don't want to create it again.
@param forceCreateTable: Set to True if you want to force re-creation
of the table even if it already exists and data hasn't changed.
(You might do this if you want to drop a heap table and
re-create it as an AO table, for example.)
@change: 2010-05-20 mgilkey
Added withClause, preExecuteCmd, and createExternalTable.
@note: Uses PSQL to load TPCH1 data, problem with using pyODB causes SIGSEGV
The results are different so it will always drop and load data. And since it gives SIGSEGV
no data is loaded. We need to find a better way to do this.
'''
if withClause == None:
withClause = ""
if preExecuteCmd == None:
preExecuteCmd = ""
if len(preExecuteCmd) > 0 and preExecuteCmd[-1] != ";":
preExecuteCmd = preExecuteCmd + ";"
self.reload_Nation(withClause, preExecuteCmd, createExternalTable)
self.reload_Region(withClause, preExecuteCmd, createExternalTable)
self.reload_Part(withClause, preExecuteCmd, createExternalTable)
self.reload_Supplier(withClause, preExecuteCmd, createExternalTable)
self.reload_Partsupp(withClause, preExecuteCmd, createExternalTable)
self.reload_Customer(withClause, preExecuteCmd, createExternalTable)
self.reload_Orders(withClause, preExecuteCmd, createExternalTable)
self.reload_Lineitem(withClause, preExecuteCmd, createExternalTable)
if __name__ == '__main__':
print (PSQL.run_sql_command("select 'primary_segment' from gp_segment_configuration \
where content >= 0 and role = 'p'", flags='')).count('primary_segment')
| |
#!/router/bin/python-2.7.4
import re
import misc_methods
class PlatformResponseMissmatch(Exception):
def __init__(self, message):
# Call the base class constructor with the parameters it needs
super(PlatformResponseMissmatch, self).__init__(message + ' is not available for given platform state and data.\nPlease make sure the relevant features are turned on in the platform.')
class PlatformResponseAmbiguity(Exception):
def __init__(self, message):
# Call the base class constructor with the parameters it needs
super(PlatformResponseAmbiguity, self).__init__(message + ' found more than one file matching the provided filename.\nPlease provide more distinct filename.')
class CShowParser(object):
@staticmethod
def parse_drop_stats (query_response, interfaces_list):
res = {'total_drops' : 0}
response_lst = query_response.split('\r\n')
mtch_found = 0
for line in response_lst:
mtch = re.match("^\s*(\w+/\d/\d)\s+(\d+)\s+(\d+)", line)
if mtch:
mtch_found += 1
if (mtch.group(1) in interfaces_list):
res[mtch.group(1)] = (int(mtch.group(2)) + int(mtch.group(3)))
res['total_drops'] += (int(mtch.group(2)) + int(mtch.group(3)))
# if mtch_found == 0: # no matches found at all
# raise PlatformResponseMissmatch('Drop stats')
# else:
# return res
return res
@staticmethod
def parse_nbar_stats (query_response):
response_lst = query_response.split('\r\n')
stats = {}
final_stats = {}
mtch_found = 0
for line in response_lst:
mtch = re.match("\s*([\w-]+)\s*(\d+)\s*(\d+)\s+", line)
if mtch:
mtch_found += 1
key = mtch.group(1)
pkt_in = int(mtch.group(2))
pkt_out = int(mtch.group(3))
avg_pkt_cnt = ( pkt_in + pkt_out )/2
if avg_pkt_cnt == 0.0:
# escaping zero division case
continue
if key in stats:
stats[key] += avg_pkt_cnt
else:
stats[key] = avg_pkt_cnt
# Normalize the results to percents
for protocol in stats:
protocol_norm_stat = int(stats[protocol]*10000/stats['Total'])/100.0 # round the result to x.xx format
if (protocol_norm_stat != 0.0):
final_stats[protocol] = protocol_norm_stat
if mtch_found == 0: # no matches found at all
raise PlatformResponseMissmatch('NBAR classification stats')
else:
return { 'percentage' : final_stats, 'packets' : stats }
@staticmethod
def parse_nat_stats (query_response):
response_lst = query_response.split('\r\n')
res = {}
mtch_found = 0
for line in response_lst:
mtch = re.match("Total (active translations):\s+(\d+).*(\d+)\s+static,\s+(\d+)\s+dynamic", line)
if mtch:
mtch_found += 1
res['total_active_trans'] = int(mtch.group(2))
res['static_active_trans'] = int(mtch.group(3))
res['dynamic_active_trans'] = int(mtch.group(4))
continue
mtch = re.match("(Hits):\s+(\d+)\s+(Misses):\s+(\d+)", line)
if mtch:
mtch_found += 1
res['num_of_hits'] = int(mtch.group(2))
res['num_of_misses'] = int(mtch.group(4))
if mtch_found == 0: # no matches found at all
raise PlatformResponseMissmatch('NAT translations stats')
else:
return res
@staticmethod
def parse_cpu_util_stats (query_response):
response_lst = query_response.split('\r\n')
res = { 'cpu0' : 0,
'cpu1' : 0 }
mtch_found = 0
for line in response_lst:
mtch = re.match("\W*Processing: Load\D*(\d+)\D*(\d+)\D*(\d+)\D*(\d+)\D*", line)
if mtch:
mtch_found += 1
res['cpu0'] += float(mtch.group(1))
res['cpu1'] += float(mtch.group(2))
if mtch_found == 0: # no matches found at all
raise PlatformResponseMissmatch('CPU utilization processing')
else:
res['cpu0'] = res['cpu0']/mtch_found
res['cpu1'] = res['cpu1']/mtch_found
return res
@staticmethod
def parse_cft_stats (query_response):
response_lst = query_response.split('\r\n')
res = {}
mtch_found = 0
for line in response_lst:
mtch = re.match("\W*(\w+)\W*([:]|[=])\W*(\d+)", line)
if mtch:
mtch_found += 1
res[ str( mix_string(m.group(1)) )] = float(m.group(3))
if mtch_found == 0: # no matches found at all
raise PlatformResponseMissmatch('CFT counters stats')
else:
return res
@staticmethod
def parse_cvla_memory_usage(query_response):
response_lst = query_response.split('\r\n')
res = {}
res2 = {}
cnt = 0
state = 0
name = ''
number = 0.0
for line in response_lst:
if state == 0:
mtch = re.match("\W*Entity name:\W*(\w[^\r\n]+)", line)
if mtch:
name = misc_methods.mix_string(mtch.group(1))
state = 1
cnt += 1
elif state == 1:
mtch = re.match("\W*Handle:\W*(\d+)", line)
if mtch:
state = state + 1
else:
state = 0;
elif state == 2:
mtch = re.match("\W*Number of allocations:\W*(\d+)", line)
if mtch:
state = state + 1
number=float(mtch.group(1))
else:
state = 0;
elif state == 3:
mtch = re.match("\W*Memory allocated:\W*(\d+)", line)
if mtch:
state = 0
res[name] = float(mtch.group(1))
res2[name] = number
else:
state = 0
if cnt == 0:
raise PlatformResponseMissmatch('CVLA memory usage stats')
return (res,res2)
@staticmethod
def parse_show_image_version(query_response):
response_lst = query_response.split('\r\n')
res = {}
for line in response_lst:
mtch = re.match("System image file is \"(\w+):(.*/)?(.+)\"", line)
if mtch:
res['drive'] = mtch.group(1)
res['image'] = mtch.group(3)
return res
raise PlatformResponseMissmatch('Running image info')
@staticmethod
def parse_image_existence(query_response, img_name):
response_lst = query_response.split('\r\n')
cnt = 0
for line in response_lst:
regex = re.compile(".* (?!include) %s" % img_name )
mtch = regex.match(line)
if mtch:
cnt += 1
if cnt == 1:
return True
elif cnt > 1:
raise PlatformResponseAmbiguity('Image existence')
else:
return False
@staticmethod
def parse_file_copy (query_response):
rev_response_lst = reversed(query_response.split('\r\n'))
lines_parsed = 0
for line in rev_response_lst:
mtch = re.match("\[OK - (\d+) bytes\]", line)
if mtch:
return True
lines_parsed += 1
if lines_parsed > 5:
return False
return False
if __name__ == "__main__":
pass
| |
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Authors:
# Nilesh Bhosale <nilesh.bhosale@in.ibm.com>
# Sasikanth Eda <sasikanth.eda@in.ibm.com>
"""
IBM NAS Volume Driver.
Currently, it supports the following IBM Storage Systems:
1. IBM Scale Out NAS (SONAS)
2. IBM Storwize V7000 Unified
Notes:
1. If you specify both a password and a key file, this driver will use the
key file only.
2. When using a key file for authentication, it is up to the user or
system administrator to store the private key in a safe manner.
"""
import os
import re
from oslo.config import cfg
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder.openstack.common import units
from cinder import utils
from cinder.volume.drivers import nfs
from cinder.volume.drivers.nfs import nas_opts
from cinder.volume.drivers.san import san
VERSION = '1.0.0'
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class IBMNAS_NFSDriver(nfs.NfsDriver, san.SanDriver):
"""IBM NAS NFS based cinder driver.
Creates file on NFS share for using it as block device on hypervisor.
Version history:
1.0.0 - Initial driver
"""
driver_volume_type = 'nfs'
VERSION = VERSION
def __init__(self, execute=utils.execute, *args, **kwargs):
self._context = None
super(IBMNAS_NFSDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(nas_opts)
self.configuration.san_ip = self.configuration.nas_ip
self.configuration.san_login = self.configuration.nas_login
self.configuration.san_password = self.configuration.nas_password
self.configuration.san_private_key = \
self.configuration.nas_private_key
self.configuration.san_ssh_port = self.configuration.nas_ssh_port
def set_execute(self, execute):
self._execute = utils.execute
def do_setup(self, context):
"""Any initialization the volume driver does while starting."""
super(IBMNAS_NFSDriver, self).do_setup(context)
self._context = context
def check_for_setup_error(self):
"""Ensure that the flags are set properly."""
required_flags = ['nas_ip', 'nas_ssh_port', 'nas_login']
for flag in required_flags:
if not self.configuration.safe_get(flag):
raise exception.InvalidInput(reason=_('%s is not set') % flag)
# Ensure that either password or keyfile were set
if not (self.configuration.nas_password or
self.configuration.nas_private_key):
raise exception.InvalidInput(
reason=_('Password or SSH private key is required for '
'authentication: set either nas_password or '
'nas_private_key option'))
def _get_provider_location(self, volume_id):
"""Returns provider location for given volume."""
LOG.debug("Enter _get_provider_location: volume_id %s" % volume_id)
volume = self.db.volume_get(self._context, volume_id)
LOG.debug("Exit _get_provider_location")
return volume['provider_location']
def _get_export_path(self, volume_id):
"""Returns NFS export path for the given volume."""
LOG.debug("Enter _get_export_path: volume_id %s" % volume_id)
return self._get_provider_location(volume_id).split(':')[1]
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug("Enter _update_volume_stats")
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or 'IBMNAS_NFS'
data['vendor_name'] = 'IBM'
data['driver_version'] = self.get_version()
data['storage_protocol'] = self.driver_volume_type
self._ensure_shares_mounted()
global_capacity = 0
global_free = 0
for share in self._mounted_shares:
capacity, free, _used = self._get_capacity_info(share)
global_capacity += capacity
global_free += free
data['total_capacity_gb'] = global_capacity / float(units.Gi)
data['free_capacity_gb'] = global_free / float(units.Gi)
data['reserved_percentage'] = 0
data['QoS_support'] = False
self._stats = data
LOG.debug("Exit _update_volume_stats")
def _create_ibmnas_snap(self, src, dest, mount_path):
"""Create volume clones and snapshots."""
LOG.debug("Enter _create_ibmnas_snap: src %(src)s, dest %(dest)s"
% {'src': src, 'dest': dest})
if mount_path is not None:
tmp_file_path = dest + '.snap'
ssh_cmd = ['mkclone', '-p', dest, '-s', src, '-t', tmp_file_path]
try:
self._run_ssh(ssh_cmd)
except processutils.ProcessExecutionError as e:
msg = (_("Failed in _create_ibmnas_snap during "
"create_snapshot. Error: %s") % e.stderr)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
#Now remove the tmp file
tmp_file_local_path = os.path.join(mount_path,
os.path.basename(tmp_file_path))
self._execute('rm', '-f', tmp_file_local_path, run_as_root=True)
else:
ssh_cmd = ['mkclone', '-s', src, '-t', dest]
try:
self._run_ssh(ssh_cmd)
except processutils.ProcessExecutionError as e:
msg = (_("Failed in _create_ibmnas_snap during "
"create_volume_from_snapshot. Error: %s") % e.stderr)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug("Exit _create_ibmnas_snap")
def _create_ibmnas_copy(self, src, dest, snap):
"""Create a cloned volume, parent & the clone both remain writable."""
LOG.debug('Enter _create_ibmnas_copy: src %(src)s, dest %(dest)s, '
'snap %(snap)s' % {'src': src,
'dest': dest,
'snap': snap})
ssh_cmd = ['mkclone', '-p', snap, '-s', src, '-t', dest]
try:
self._run_ssh(ssh_cmd)
except processutils.ProcessExecutionError as e:
msg = (_("Failed in _create_ibmnas_copy. Error: %s") % e.stderr)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug("Exit _create_ibmnas_copy")
def _resize_volume_file(self, path, new_size):
"""Resize the image file on share to new size."""
LOG.info(_('Resizing file to %sG'), new_size)
try:
image_utils.resize_image(path, new_size, run_as_root=True)
except processutils.ProcessExecutionError as e:
msg = (_("Failed to resize volume "
"%(volume_id)s, error: %(error)s") %
{'volume_id': os.path.basename(path).split('-')[1],
'error': e.stderr})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
return True
def extend_volume(self, volume, new_size):
"""Extend an existing volume to the new size."""
LOG.info(_('Extending volume %s.'), volume['name'])
path = self.local_path(volume)
self._resize_volume_file(path, new_size)
def _delete_snapfiles(self, fchild, mount_point):
LOG.debug('Enter _delete_snapfiles: fchild %(fchild)s, '
'mount_point %(mount_point)s'
% {'fchild': fchild,
'mount_point': mount_point})
ssh_cmd = ['lsclone', fchild]
try:
(out, _err) = self._run_ssh(ssh_cmd, check_exit_code=False)
except processutils.ProcessExecutionError as e:
msg = (_("Failed in _delete_snapfiles. Error: %s") % e.stderr)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
fparent = None
reInode = re.compile(
r'.*\s+(?:yes|no)\s+\d+\s+(?P<inode>\d+)', re.M | re.S)
match = reInode.match(out)
if match:
inode = match.group('inode')
path = mount_point
(out, _err) = self._execute('find', path, '-maxdepth', '1',
'-inum', inode, run_as_root=True)
if out:
fparent = out.split('\n', 1)[0]
fchild_local_path = os.path.join(mount_point, os.path.basename(fchild))
self._execute(
'rm', '-f', fchild_local_path, check_exit_code=False,
run_as_root=True)
# There is no need to check for volume references on this snapshot
# because 'rm -f' itself serves as a simple and implicit check. If the
# parent is referenced by another volume, system doesn't allow deleting
# it. 'rm -f' silently fails and the subsequent check on the path
# indicates whether there are any volumes derived from that snapshot.
# If there are such volumes, we quit recursion and let the other
# volumes delete the snapshot later. If there are no references, rm
# would succeed and the snapshot is deleted.
if not os.path.exists(fchild) and fparent:
fpbase = os.path.basename(fparent)
if (fpbase.endswith('.ts') or fpbase.endswith('.snap')):
fparent_remote_path = os.path.join(os.path.dirname(fchild),
fpbase)
self._delete_snapfiles(fparent_remote_path, mount_point)
LOG.debug("Exit _delete_snapfiles")
def delete_volume(self, volume):
"""Deletes a logical volume."""
if not volume['provider_location']:
LOG.warn(_('Volume %s does not have provider_location specified, '
'skipping.'), volume['name'])
return
export_path = self._get_export_path(volume['id'])
volume_name = volume['name']
volume_path = os.path.join(export_path, volume_name)
mount_point = os.path.dirname(self.local_path(volume))
# Delete all dependent snapshots, the snapshot will get deleted
# if the link count goes to zero, else rm will fail silently
self._delete_snapfiles(volume_path, mount_point)
def create_snapshot(self, snapshot):
"""Creates a volume snapshot."""
export_path = self._get_export_path(snapshot['volume_id'])
snapshot_path = os.path.join(export_path, snapshot['name'])
volume_path = os.path.join(export_path, snapshot['volume_name'])
nfs_share = self._get_provider_location(snapshot['volume_id'])
mount_path = self._get_mount_point_for_share(nfs_share)
self._create_ibmnas_snap(src=volume_path, dest=snapshot_path,
mount_path=mount_path)
def delete_snapshot(self, snapshot):
"""Deletes a volume snapshot."""
# A snapshot file is deleted as a part of delete_volume when
# all volumes derived from it are deleted.
# Rename the deleted snapshot to indicate it no longer exists in
# cinder db. Attempt to delete the snapshot. If the snapshot has
# clone children, the delete will fail silently. When volumes that
# are clone children are deleted in the future, the remaining ts
# snapshots will also be deleted.
nfs_share = self._get_provider_location(snapshot['volume_id'])
mount_path = self._get_mount_point_for_share(nfs_share)
snapshot_path = os.path.join(mount_path, snapshot['name'])
snapshot_ts_path = '%s.ts' % snapshot_path
self._execute('mv', '-f', snapshot_path, snapshot_ts_path,
check_exit_code=True, run_as_root=True)
self._execute('rm', '-f', snapshot_ts_path,
check_exit_code=False, run_as_root=True)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from an existing volume snapshot.
Extends the volume if the volume size is more than the snapshot size.
"""
export_path = self._get_export_path(snapshot['volume_id'])
snapshot_path = os.path.join(export_path, snapshot.name)
volume_path = os.path.join(export_path, volume['name'])
self._create_ibmnas_snap(snapshot_path, volume_path, None)
volume['provider_location'] = self._find_share(volume['size'])
volume_path = self.local_path(volume)
self._set_rw_permissions_for_all(volume_path)
#Extend the volume if required
self._resize_volume_file(volume_path, volume['size'])
return {'provider_location': volume['provider_location']}
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume.
Extends the volume if the new volume size is more than
the source volume size.
"""
export_path = self._get_export_path(src_vref['id'])
src_vol_path = os.path.join(export_path, src_vref['name'])
dest_vol_path = os.path.join(export_path, volume['name'])
snap_file_name = volume['name']
snap_file_name = snap_file_name + '.snap'
snap_file_path = os.path.join(export_path, snap_file_name)
self._create_ibmnas_copy(src_vol_path, dest_vol_path, snap_file_path)
volume['provider_location'] = self._find_share(volume['size'])
volume_path = self.local_path(volume)
self._set_rw_permissions_for_all(volume_path)
#Extend the volume if required
self._resize_volume_file(volume_path, volume['size'])
return {'provider_location': volume['provider_location']}
| |
# -*- coding: utf-8 -*-
# File: common.py
from __future__ import division
import six
import numpy as np
from copy import copy
import pprint
import itertools
from termcolor import colored
from collections import deque, defaultdict
from six.moves import range, map
import tqdm
from .base import DataFlow, ProxyDataFlow, RNGDataFlow, DataFlowReentrantGuard
from ..utils import logger
from ..utils.utils import get_tqdm, get_rng, get_tqdm_kwargs
from ..utils.develop import log_deprecated
__all__ = ['TestDataSpeed', 'PrintData', 'BatchData', 'BatchDataByShape', 'FixedSizeData', 'MapData',
'MapDataComponent', 'RepeatedData', 'RepeatedDataPoint', 'RandomChooseData',
'RandomMixData', 'JoinData', 'ConcatData', 'SelectComponent',
'LocallyShuffleData', 'CacheData']
class TestDataSpeed(ProxyDataFlow):
""" Test the speed of some DataFlow """
def __init__(self, ds, size=5000, warmup=0):
"""
Args:
ds (DataFlow): the DataFlow to test.
size (int): number of datapoints to fetch.
warmup (int): warmup iterations
"""
super(TestDataSpeed, self).__init__(ds)
self.test_size = int(size)
self.warmup = int(warmup)
def __iter__(self):
""" Will run testing at the beginning, then produce data normally. """
self.start_test()
for dp in self.ds:
yield dp
def start_test(self):
"""
Start testing with a progress bar.
"""
self.ds.reset_state()
itr = self.ds.__iter__()
if self.warmup:
for _ in tqdm.trange(self.warmup, **get_tqdm_kwargs()):
next(itr)
# add smoothing for speed benchmark
with get_tqdm(total=self.test_size,
leave=True, smoothing=0.2) as pbar:
for idx, dp in enumerate(itr):
pbar.update()
if idx == self.test_size - 1:
break
def start(self):
"""
Alias of start_test.
"""
self.start_test()
class BatchData(ProxyDataFlow):
"""
Stack datapoints into batches.
It produces datapoints of the same number of components as ``ds``, but
each component has one new extra dimension of size ``batch_size``.
The batch can be either a list of original components, or (by default)
a numpy array of original components.
"""
def __init__(self, ds, batch_size, remainder=False, use_list=False):
"""
Args:
ds (DataFlow): When ``use_list=False``, the components of ``ds``
must be either scalars or :class:`np.ndarray`, and have to be consistent in shapes.
batch_size(int): batch size
remainder (bool): When the remaining datapoints in ``ds`` is not
enough to form a batch, whether or not to also produce the remaining
data as a smaller batch.
If set to False, all produced datapoints are guaranteed to have the same batch size.
If set to True, `len(ds)` must be accurate.
use_list (bool): if True, each component will contain a list
of datapoints instead of an numpy array of an extra dimension.
"""
super(BatchData, self).__init__(ds)
if not remainder:
try:
assert batch_size <= len(ds)
except NotImplementedError:
pass
self.batch_size = int(batch_size)
self.remainder = remainder
self.use_list = use_list
def __len__(self):
ds_size = len(self.ds)
div = ds_size // self.batch_size
rem = ds_size % self.batch_size
if rem == 0:
return div
return div + int(self.remainder)
def __iter__(self):
"""
Yields:
Batched data by stacking each component on an extra 0th dimension.
"""
holder = []
for data in self.ds:
holder.append(data)
if len(holder) == self.batch_size:
yield BatchData._aggregate_batch(holder, self.use_list)
del holder[:]
if self.remainder and len(holder) > 0:
yield BatchData._aggregate_batch(holder, self.use_list)
@staticmethod
def _aggregate_batch(data_holder, use_list=False):
size = len(data_holder[0])
result = []
for k in range(size):
if use_list:
result.append(
[x[k] for x in data_holder])
else:
dt = data_holder[0][k]
if type(dt) in list(six.integer_types) + [bool]:
tp = 'int32'
elif type(dt) == float:
tp = 'float32'
else:
try:
tp = dt.dtype
except AttributeError:
raise TypeError("Unsupported type to batch: {}".format(type(dt)))
try:
result.append(
np.asarray([x[k] for x in data_holder], dtype=tp))
except Exception as e: # noqa
logger.exception("Cannot batch data. Perhaps they are of inconsistent shape?")
if isinstance(dt, np.ndarray):
s = pprint.pformat([x[k].shape for x in data_holder])
logger.error("Shape of all arrays to be batched: " + s)
try:
# open an ipython shell if possible
import IPython as IP; IP.embed() # noqa
except ImportError:
pass
return result
class BatchDataByShape(BatchData):
"""
Group datapoints of the same shape together to batches.
It doesn't require input DataFlow to be homogeneous anymore: it can have
datapoints of different shape, and batches will be formed from those who
have the same shape.
Note:
It is implemented by a dict{shape -> datapoints}.
Datapoints of uncommon shapes may never be enough to form a batch and
never get generated.
"""
def __init__(self, ds, batch_size, idx):
"""
Args:
ds (DataFlow): input DataFlow. ``dp[idx]`` has to be an :class:`np.ndarray`.
batch_size (int): batch size
idx (int): ``dp[idx].shape`` will be used to group datapoints.
Other components are assumed to have the same shape.
"""
super(BatchDataByShape, self).__init__(ds, batch_size, remainder=False)
self.idx = idx
self._guard = DataFlowReentrantGuard()
def reset_state(self):
super(BatchDataByShape, self).reset_state()
self.holder = defaultdict(list)
def __iter__(self):
with self._guard:
for dp in self.ds:
shp = dp[self.idx].shape
holder = self.holder[shp]
holder.append(dp)
if len(holder) == self.batch_size:
yield BatchData._aggregate_batch(holder)
del holder[:]
class FixedSizeData(ProxyDataFlow):
""" Generate data from another DataFlow, but with a fixed total count.
"""
def __init__(self, ds, size, keep_state=True):
"""
Args:
ds (DataFlow): input dataflow
size (int): size
keep_state (bool): keep the iterator state of ``ds``
between calls to :meth:`__iter__()`, so that the
next call will continue the previous iteration over ``ds``,
instead of reinitializing an iterator.
Example:
.. code-block:: none
ds produces: 1, 2, 3, 4, 5; 1, 2, 3, 4, 5; ...
FixedSizeData(ds, 3, True): 1, 2, 3; 4, 5, 1; 2, 3, 4; ...
FixedSizeData(ds, 3, False): 1, 2, 3; 1, 2, 3; ...
FixedSizeData(ds, 6, False): 1, 2, 3, 4, 5, 1; 1, 2, 3, 4, 5, 1;...
"""
super(FixedSizeData, self).__init__(ds)
self._size = int(size)
self.itr = None
self._guard = DataFlowReentrantGuard()
self._keep = keep_state
def __len__(self):
return self._size
def reset_state(self):
super(FixedSizeData, self).reset_state()
self.itr = self.ds.__iter__()
def __iter__(self):
with self._guard:
if self.itr is None:
self.itr = self.ds.__iter__()
cnt = 0
while True:
try:
dp = next(self.itr)
except StopIteration:
self.itr = self.ds.__iter__()
dp = next(self.itr)
cnt += 1
yield dp
if cnt == self._size:
if not self._keep:
self.itr = None
return
class MapData(ProxyDataFlow):
"""
Apply a mapper/filter on the datapoints of a DataFlow.
Note:
1. Please make sure func doesn't modify its arguments in place,
unless you're certain it's safe.
2. If you discard some datapoints, ``len(ds)`` will be incorrect.
Example:
.. code-block:: none
ds = Mnist('train)
ds = MapData(ds, lambda dp: [dp[0] * 255, dp[1]])
"""
def __init__(self, ds, func):
"""
Args:
ds (DataFlow): input DataFlow
func (datapoint -> datapoint | None): takes a datapoint and returns a new
datapoint. Return None to discard this datapoint.
"""
super(MapData, self).__init__(ds)
self.func = func
def __iter__(self):
for dp in self.ds:
ret = self.func(copy(dp)) # shallow copy the list
if ret is not None:
yield ret
class MapDataComponent(MapData):
"""
Apply a mapper/filter on a datapoint component.
Note:
1. This dataflow itself doesn't modify the datapoints.
But please make sure func doesn't modify its arguments in place,
unless you're certain it's safe.
2. If you discard some datapoints, ``len(ds)`` will be incorrect.
Example:
.. code-block:: none
ds = Mnist('train)
ds = MapDataComponent(ds, lambda img: img * 255, 0)
"""
def __init__(self, ds, func, index=0):
"""
Args:
ds (DataFlow): input DataFlow which produces either list or dict.
func (TYPE -> TYPE|None): takes ``dp[index]``, returns a new value for ``dp[index]``.
return None to discard this datapoint.
index (int or str): index or key of the component.
"""
self._index = index
self._func = func
super(MapDataComponent, self).__init__(ds, self._mapper)
def _mapper(self, dp):
r = self._func(dp[self._index])
if r is None:
return None
dp = copy(dp) # shallow copy to avoid modifying the datapoint
dp[self._index] = r
return dp
class RepeatedData(ProxyDataFlow):
""" Take data points from another DataFlow and produce them until
it's exhausted for certain amount of times. i.e.:
dp1, dp2, .... dpn, dp1, dp2, ....dpn
"""
def __init__(self, ds, nr):
"""
Args:
ds (DataFlow): input DataFlow
nr (int): number of times to repeat ds.
Set to -1 to repeat ``ds`` infinite times.
"""
self.nr = nr
super(RepeatedData, self).__init__(ds)
def __len__(self):
"""
Raises:
:class:`ValueError` when nr == -1.
"""
if self.nr == -1:
raise NotImplementedError("__len__() is unavailable for infinite dataflow")
return len(self.ds) * self.nr
def __iter__(self):
if self.nr == -1:
while True:
for dp in self.ds:
yield dp
else:
for _ in range(self.nr):
for dp in self.ds:
yield dp
class RepeatedDataPoint(ProxyDataFlow):
""" Take data points from another DataFlow and produce them a
certain number of times. i.e.:
dp1, dp1, ..., dp1, dp2, ..., dp2, ...
"""
def __init__(self, ds, nr):
"""
Args:
ds (DataFlow): input DataFlow
nr (int): number of times to repeat each datapoint.
"""
self.nr = int(nr)
assert self.nr >= 1, self.nr
super(RepeatedDataPoint, self).__init__(ds)
def __len__(self):
return len(self.ds) * self.nr
def __iter__(self):
for dp in self.ds:
for _ in range(self.nr):
yield dp
class RandomChooseData(RNGDataFlow):
"""
Randomly choose from several DataFlow.
Stop producing when any of them is exhausted.
"""
def __init__(self, df_lists):
"""
Args:
df_lists (list): a list of DataFlow, or a list of (DataFlow, probability) tuples.
Probabilities must sum to 1 if used.
"""
super(RandomChooseData, self).__init__()
if isinstance(df_lists[0], (tuple, list)):
assert sum([v[1] for v in df_lists]) == 1.0
self.df_lists = df_lists
else:
prob = 1.0 / len(df_lists)
self.df_lists = [(k, prob) for k in df_lists]
def reset_state(self):
super(RandomChooseData, self).reset_state()
for d in self.df_lists:
if isinstance(d, tuple):
d[0].reset_state()
else:
d.reset_state()
def __iter__(self):
itrs = [v[0].__iter__() for v in self.df_lists]
probs = np.array([v[1] for v in self.df_lists])
try:
while True:
itr = self.rng.choice(itrs, p=probs)
yield next(itr)
except StopIteration:
return
class RandomMixData(RNGDataFlow):
"""
Perfectly mix datapoints from several DataFlow using their
:meth:`__len__()`. Will stop when all DataFlow exhausted.
"""
def __init__(self, df_lists):
"""
Args:
df_lists (list): a list of DataFlow.
All DataFlow must implement ``__len__()``.
"""
super(RandomMixData, self).__init__()
self.df_lists = df_lists
self.sizes = [len(k) for k in self.df_lists]
def reset_state(self):
super(RandomMixData, self).reset_state()
for d in self.df_lists:
d.reset_state()
def __len__(self):
return sum(self.sizes)
def __iter__(self):
sums = np.cumsum(self.sizes)
idxs = np.arange(self.__len__())
self.rng.shuffle(idxs)
idxs = np.array(list(map(
lambda x: np.searchsorted(sums, x, 'right'), idxs)))
itrs = [k.__iter__() for k in self.df_lists]
assert idxs.max() == len(itrs) - 1, "{}!={}".format(idxs.max(), len(itrs) - 1)
for k in idxs:
yield next(itrs[k])
# TODO run till exception
class ConcatData(DataFlow):
"""
Concatenate several DataFlow.
Produce datapoints from each DataFlow and go to the next when one
DataFlow is exhausted.
"""
def __init__(self, df_lists):
"""
Args:
df_lists (list): a list of DataFlow.
"""
self.df_lists = df_lists
def reset_state(self):
for d in self.df_lists:
d.reset_state()
def __len__(self):
return sum([len(x) for x in self.df_lists])
def __iter__(self):
for d in self.df_lists:
for dp in d.__iter__():
yield dp
class JoinData(DataFlow):
"""
Join the components from each DataFlow.
Example:
.. code-block:: none
df1 produces: [c1, c2]
df2 produces: [c3, c4]
joined: [c1, c2, c3, c4]
"""
def __init__(self, df_lists):
"""
Args:
df_lists (list): a list of DataFlow.
When these dataflows have different sizes, JoinData will stop when any
of them is exhausted.
The list could contain the same DataFlow instance more than once,
but note that `__iter__` will then also be called many times.
"""
self.df_lists = df_lists
try:
self._size = len(self.df_lists[0])
for d in self.df_lists:
assert len(d) == self._size, \
"All DataFlow must have the same size! {} != {}".format(len(d), self._size)
except Exception:
logger.info("[JoinData] Size check failed for the list of dataflow to be joined!")
def reset_state(self):
for d in set(self.df_lists):
d.reset_state()
def __len__(self):
"""
Return the minimum size among all.
"""
return min([len(k) for k in self.df_lists])
def __iter__(self):
itrs = [k.__iter__() for k in self.df_lists]
try:
while True:
dp = []
for itr in itrs:
dp.extend(next(itr))
yield dp
except StopIteration: # some of them are exhausted
pass
def SelectComponent(ds, idxs):
"""
Select / reorder components from datapoints.
Args:
ds (DataFlow): input DataFlow.
idxs (list[int]): a list of component indices.
Example:
.. code-block:: none
original df produces: [c1, c2, c3]
idxs: [2,1]
this df: [c3, c2]
"""
return MapData(ds, lambda dp: [dp[i] for i in idxs])
class LocallyShuffleData(ProxyDataFlow, RNGDataFlow):
""" Maintain a pool to buffer datapoints, and shuffle before producing them.
This can be used as an alternative when a complete random read is too expensive
or impossible for the data source.
"""
def __init__(self, ds, buffer_size, nr_reuse=1, shuffle_interval=None):
"""
Args:
ds (DataFlow): input DataFlow.
buffer_size (int): size of the buffer.
nr_reuse (int): duplicate each datapoints several times into the buffer to improve
speed, but may hurt your model.
shuffle_interval (int): shuffle the buffer after this many
datapoints were produced from the given dataflow. Frequent shuffle on large buffer
may affect speed, but infrequent shuffle may affect
randomness. Defaults to buffer_size / 3
"""
ProxyDataFlow.__init__(self, ds)
self.q = deque(maxlen=buffer_size)
if shuffle_interval is None:
shuffle_interval = int(buffer_size // 3)
self.shuffle_interval = shuffle_interval
self.nr_reuse = nr_reuse
self._guard = DataFlowReentrantGuard()
def reset_state(self):
ProxyDataFlow.reset_state(self)
RNGDataFlow.reset_state(self)
self.current_cnt = 0
def __len__(self):
return len(self.ds) * self.nr_reuse
def __iter__(self):
with self._guard:
for i, dp in enumerate(self.ds):
# fill queue
if i % self.shuffle_interval == 0:
self.rng.shuffle(self.q)
if self.q.maxlen > len(self.q):
self.q.extend([dp] * self.nr_reuse)
continue
for _ in range(self.nr_reuse):
yield self.q.popleft()
self.q.append(dp)
class CacheData(ProxyDataFlow):
"""
Cache the first pass of a DataFlow completely in memory,
and produce from the cache thereafter.
NOTE: The user should not stop the iterator before it has reached the end.
Otherwise the cache may be incomplete.
"""
def __init__(self, ds, shuffle=False):
"""
Args:
ds (DataFlow): input DataFlow.
shuffle (bool): whether to shuffle the datapoints before producing them.
"""
self.shuffle = shuffle
self._guard = DataFlowReentrantGuard()
super(CacheData, self).__init__(ds)
def reset_state(self):
super(CacheData, self).reset_state()
if self.shuffle:
self.rng = get_rng(self)
self.buffer = []
def __iter__(self):
with self._guard:
if len(self.buffer):
if self.shuffle:
self.rng.shuffle(self.buffer)
for dp in self.buffer:
yield dp
else:
for dp in self.ds:
yield dp
self.buffer.append(dp)
class PrintData(ProxyDataFlow):
"""
Behave like an identity mapping, but print shape and range of the first few datapoints.
Example:
To enable this debugging output, you should place it somewhere in your dataflow like
.. code-block:: python
def __iter__():
ds = SomeDataSource('path/to/lmdb')
ds = SomeInscrutableMappings(ds)
ds = PrintData(ds, num=2, max_list=2)
return ds
ds = __iter__()
The output looks like:
.. code-block:: none
[0110 09:22:21 @common.py:589] DataFlow Info:
datapoint 0<2 with 4 components consists of
0: float with value 0.0816501893251
1: ndarray:int32 of shape (64,) in range [0, 10]
2: ndarray:float32 of shape (64, 64) in range [-1.2248, 1.2177]
3: list of len 50
0: ndarray:int32 of shape (64, 64) in range [-128, 80]
1: ndarray:float32 of shape (64, 64) in range [0.8400, 0.6845]
...
datapoint 1<2 with 4 components consists of
0: float with value 5.88252075399
1: ndarray:int32 of shape (64,) in range [0, 10]
2: ndarray:float32 of shape (64, 64) with range [-0.9011, 0.8491]
3: list of len 50
0: ndarray:int32 of shape (64, 64) in range [-70, 50]
1: ndarray:float32 of shape (64, 64) in range [0.7400, 0.3545]
...
"""
def __init__(self, ds, num=1, label=None, name=None, max_depth=3, max_list=3):
"""
Args:
ds (DataFlow): input DataFlow.
num (int): number of dataflow points to print.
name (str, optional): name to identify this DataFlow.
max_depth (int, optional): stop output when too deep recursion in sub elements
max_list (int, optional): stop output when too many sub elements
"""
super(PrintData, self).__init__(ds)
self.num = num
if label:
log_deprecated("PrintData(label, ...", "Use PrintData(name, ... instead.", "2018-05-01")
self.name = label
else:
self.name = name
self.cnt = 0
self.max_depth = max_depth
self.max_list = max_list
def _analyze_input_data(self, entry, k, depth=1, max_depth=3, max_list=3):
"""
Gather useful debug information from a datapoint.
Args:
entry: the datapoint component
k (int): index of this component in current datapoint
depth (int, optional): recursion depth
max_depth, max_list: same as in :meth:`__init__`.
Returns:
string: debug message
"""
class _elementInfo(object):
def __init__(self, el, pos, depth=0, max_list=3):
self.shape = ""
self.type = type(el).__name__
self.dtype = ""
self.range = ""
self.sub_elements = []
self.ident = " " * (depth * 2)
self.pos = pos
numpy_scalar_types = list(itertools.chain(*np.sctypes.values()))
if isinstance(el, (int, float, bool)):
self.range = " with value {}".format(el)
elif type(el) is np.ndarray:
self.shape = " of shape {}".format(el.shape)
self.dtype = ":{}".format(str(el.dtype))
self.range = " in range [{}, {}]".format(el.min(), el.max())
elif type(el) in numpy_scalar_types:
self.range = " with value {}".format(el)
elif isinstance(el, (list)):
self.shape = " of len {}".format(len(el))
if depth < max_depth:
for k, subel in enumerate(el):
if k < max_list:
self.sub_elements.append(_elementInfo(subel, k, depth + 1, max_list))
else:
self.sub_elements.append(" " * ((depth + 1) * 2) + '...')
break
else:
if len(el) > 0:
self.sub_elements.append(" " * ((depth + 1) * 2) + ' ...')
def __str__(self):
strings = []
vals = (self.ident, self.pos, self.type, self.dtype, self.shape, self.range)
strings.append("{}{}: {}{}{}{}".format(*vals))
for k, el in enumerate(self.sub_elements):
strings.append(str(el))
return "\n".join(strings)
return str(_elementInfo(entry, k, depth, max_list))
def _get_msg(self, dp):
msg = [u"datapoint %i<%i with %i components consists of" % (self.cnt, self.num, len(dp))]
for k, entry in enumerate(dp):
msg.append(self._analyze_input_data(entry, k, max_depth=self.max_depth, max_list=self.max_list))
return u'\n'.join(msg)
def __iter__(self):
for dp in self.ds:
# it is important to place this here! otherwise it mixes the output of multiple PrintData
if self.cnt == 0:
label = ' (%s)' % self.name if self.name is not None else ""
logger.info(colored("DataFlow Info%s:" % label, 'cyan'))
if self.cnt < self.num:
print(self._get_msg(dp))
self.cnt += 1
yield dp
def reset_state(self):
super(PrintData, self).reset_state()
self.cnt = 0
| |
import os.path
import itertools as it
import yaml
import re
import markdown
from jinja2 import FileSystemLoader, Environment
from flask import current_app, request, jsonify
from flask_wtf import Form
from flaskext.uploads import UploadSet, IMAGES, AllExcept, EXECUTABLES
from wtforms import TextAreaField, FileField
from wtforms.validators import DataRequired
image_uploader = UploadSet('images',
extensions=IMAGES)
file_uploader = UploadSet('files', extensions=AllExcept(IMAGES + EXECUTABLES))
class Module(object):
display_name = 'A module'
admin_menu_tag = '.'
admin_menu_order = 0
blueprint = None
target_dir = '/'
_template_env = None
_global_tmpl_funcs = dict()
_registered_module_func = staticmethod(lambda :None)
@staticmethod
def render(tmpl_name, **tmpl_vars):
if not Module._template_env:
Module._template_env = Environment(loader=FileSystemLoader(current_app.config['TEMPLATE_DIR']))
tmpl = Module._template_env.get_template(tmpl_name)
tmpl_vars_ = dict(Module._global_tmpl_funcs)
tmpl_vars_.update(tmpl_vars)
return tmpl.render(**tmpl_vars_)
@staticmethod
def module_pages():
return []
@classmethod
def build_dest(cls):
return os.path.join(current_app.config['BUILD_DEST'], cls.target_dir.lstrip('/'))
@staticmethod
def add_template_global(the_global):
Module._global_tmpl_funcs[the_global.__name__] = the_global
return the_global
@staticmethod
def register_modules(module_func):
Module._registered_module_func = staticmethod(module_func)
return module_func
class Page(object):
def __init__(self, display_name, url):
self.display_name = display_name
self.url = url
def simple_json_endpoint(pth):
try:
if request.method == 'GET':
form = SimpleContentForm(obj=FlatFile.load(pth))
return jsonify(form.data)
elif request.method == 'POST':
form = SimpleContentForm.from_json(request.json)
if form.validate():
FlatFile(form.content.data).save(pth)
return jsonify({'status': 'success', 'form': form.data})
else:
return '', 403
except IOError:
return 'I/O error', 500
return '', 500
def create_markdown_content(form, markdown_content):
if form.validate():
try:
markdown_content.create()
except IOError:
return 'I/O error', 500
except ContentExistsException:
return 'Page already exists', 409
else:
return 'Form invalid', 403
rv = form.data
rv['slug'] = markdown_content.slug
return jsonify({'status': 'success', 'form': rv})
def save_markdown_content(form, markdown_content):
if form.validate():
try:
markdown_content.save()
except IOError:
return 'I/O error', 500
except ContentDoesNotExistsException:
return 'Page does not exists', 409
else:
return 'Form invalid', 403
return jsonify({'status': 'success', 'form': form.data})
class SimpleContentForm(Form):
content = TextAreaField('Content', validators=[DataRequired()])
class UploadForm(Form):
file= FileField('File', validators=[DataRequired()])
class FlatFile(object):
def __init__(self, filecontent):
self.content = filecontent
@staticmethod
def load(abs_path):
with current_app.open_resource(abs_path) as f:
return FlatFile(f.read())
def save(self, abs_path):
with open(abs_path, 'w') as f:
f.write(self.content)
class ContentExistsException(Exception):
pass
class ContentDoesNotExistsException(Exception):
pass
class InvalidMetaException(Exception):
pass
class MarkdownContent(object):
rel_path = '/'
def __init__(self, slug, markdown, **meta):
self.slug = slug
self.content = markdown
self.meta = meta
self.__dict__.update(meta)
@property
def html(self):
return markdown.markdown(self.content)
@classmethod
def path(cls, slug):
return os.path.join(current_app.config[cls.root_path_config], slug if slug else '')
@classmethod
def load(cls, slug):
""" Load markdown content from disk.
:param slug: The slug identifying the content
:return: The MarkdownContent object or InvalidMetaException if the content could not be parsed
"""
with open(cls.path(slug)) as f:
meta, content = MarkdownContent.split_matters(f.read())
return cls(slug, content, **meta)
@classmethod
def delete(cls, slug):
os.remove(cls.path(slug))
@property
def url(self):
return '%s/%s' % (self.rel_path, self.slug)
def create(self):
""" Save the content represented by this object iff it does not yet exists
:raises:
ContentExistsException if the file (as identified by this object's slug already exists
"""
if os.path.exists(self.path(self.slug)):
raise ContentExistsException()
else:
self._save()
def save(self):
""" Save the content represented by this object iff it already exists
:raises:
ContentDoesNotExistsException if the file (as identified by this object's slug already exists
"""
if not os.path.exists(self.path(self.slug)):
raise ContentDoesNotExistsException()
self._save()
def _save(self):
out = MarkdownContent.join_matters(self.meta, self.content)
with open(self.path(self.slug), 'w') as f:
f.write(out)
@staticmethod
def slug(title):
return re.sub('[^a-z0-9 ]', '', title.lower()).replace(' ', '-')
def __str__(self):
return '%s\n\n%s' % (self.slug, self.content)
@staticmethod
def split_matters(s):
lines = s.split('\n')
frontmatter = '\n'.join(it.takewhile(lambda x: x, lines))
content = '\n'.join(it.dropwhile(lambda x: x, lines)).strip('\n')
try:
meta = yaml.load(frontmatter)
except yaml.parser.ScannerError:
raise InvalidMetaException()
if not meta:
raise InvalidMetaException()
return meta, content
@staticmethod
def join_matters(meta, content):
frontmatter = yaml.safe_dump(meta, default_flow_style=False)
return '%s\n\n%s' % (frontmatter.strip('\n'), content.strip('\n'))
| |
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.test import TestCase, override_settings
from django.urls import reverse
from django.utils.text import capfirst
from wagtail.admin.edit_handlers import FieldPanel, ObjectList, TabbedInterface
from wagtail.contrib.settings.registry import SettingMenuItem
from wagtail.contrib.settings.views import get_setting_edit_handler
from wagtail.core import hooks
from wagtail.core.models import Page, Site
from wagtail.tests.testapp.models import (
FileUploadSetting, IconSetting, PanelSettings, TabbedSettings, TestSetting)
from wagtail.tests.utils import WagtailTestUtils
class TestSettingMenu(TestCase, WagtailTestUtils):
def login_only_admin(self):
""" Log in with a user that only has permission to access the admin """
user = get_user_model().objects.create_user(
username='test', email='test@email.com', password='password')
user.user_permissions.add(Permission.objects.get_by_natural_key(
codename='access_admin', app_label='wagtailadmin', model='admin'))
self.assertTrue(self.client.login(username='test', password='password'))
return user
def test_menu_item_in_admin(self):
self.login()
response = self.client.get(reverse('wagtailadmin_home'))
self.assertContains(response, capfirst(TestSetting._meta.verbose_name))
self.assertContains(response, reverse('wagtailsettings:edit', args=('tests', 'testsetting')))
def test_menu_item_no_permissions(self):
self.login_only_admin()
response = self.client.get(reverse('wagtailadmin_home'))
self.assertNotContains(response, TestSetting._meta.verbose_name)
self.assertNotContains(response, reverse('wagtailsettings:edit', args=('tests', 'testsetting')))
def test_menu_item_icon(self):
menu_item = SettingMenuItem(IconSetting, icon='tag', classnames='test-class')
classnames = set(menu_item.classnames.split(' '))
self.assertEqual(classnames, {'icon', 'icon-tag', 'test-class'})
class BaseTestSettingView(TestCase, WagtailTestUtils):
def get(self, site_pk=1, params={}, setting=TestSetting):
url = self.edit_url(setting=setting, site_pk=site_pk)
return self.client.get(url, params)
def post(self, site_pk=1, post_data={}, setting=TestSetting):
url = self.edit_url(setting=setting, site_pk=site_pk)
return self.client.post(url, post_data)
def edit_url(self, setting, site_pk=1):
args = [setting._meta.app_label, setting._meta.model_name, site_pk]
return reverse('wagtailsettings:edit', args=args)
class TestSettingCreateView(BaseTestSettingView):
def setUp(self):
self.login()
def test_get_edit(self):
response = self.get()
self.assertEqual(response.status_code, 200)
# there should be a menu item highlighted as active
self.assertContains(response, "menu-active")
def test_edit_invalid(self):
response = self.post(post_data={'foo': 'bar'})
self.assertContains(response, "The setting could not be saved due to errors.")
self.assertContains(response, """<p class="error-message"><span>This field is required.</span></p>""",
count=2, html=True)
self.assertContains(response, "This field is required", count=2)
def test_edit(self):
response = self.post(post_data={'title': 'Edited site title',
'email': 'test@example.com'})
self.assertEqual(response.status_code, 302)
default_site = Site.objects.get(is_default_site=True)
setting = TestSetting.objects.get(site=default_site)
self.assertEqual(setting.title, 'Edited site title')
self.assertEqual(setting.email, 'test@example.com')
def test_file_upload_multipart(self):
response = self.get(setting=FileUploadSetting)
# Ensure the form supports file uploads
self.assertContains(response, 'enctype="multipart/form-data"')
class TestSettingEditView(BaseTestSettingView):
def setUp(self):
default_site = Site.objects.get(is_default_site=True)
self.test_setting = TestSetting()
self.test_setting.title = 'Site title'
self.test_setting.email = 'initial@example.com'
self.test_setting.site = default_site
self.test_setting.save()
self.login()
def test_get_edit(self):
response = self.get()
self.assertEqual(response.status_code, 200)
# there should be a menu item highlighted as active
self.assertContains(response, "menu-active")
def test_non_existant_model(self):
response = self.client.get(reverse('wagtailsettings:edit', args=['test', 'foo', 1]))
self.assertEqual(response.status_code, 404)
def test_edit_invalid(self):
response = self.post(post_data={'foo': 'bar'})
self.assertContains(response, "The setting could not be saved due to errors.")
self.assertContains(response, """<p class="error-message"><span>This field is required.</span></p>""",
count=2, html=True)
self.assertContains(response, "This field is required", count=2)
def test_edit(self):
response = self.post(post_data={'title': 'Edited site title',
'email': 'test@example.com'})
self.assertEqual(response.status_code, 302)
default_site = Site.objects.get(is_default_site=True)
setting = TestSetting.objects.get(site=default_site)
self.assertEqual(setting.title, 'Edited site title')
self.assertEqual(setting.email, 'test@example.com')
def test_get_edit_current_site(self):
url = reverse('wagtailsettings:edit', args=('tests', 'testsetting'))
default_site = Site.objects.get(is_default_site=True)
response = self.client.get(url)
self.assertRedirects(response, status_code=302, expected_url='%s%s/' % (url, default_site.pk))
def test_get_edit_current_site_invalid(self):
Site.objects.all().delete()
url = reverse('wagtailsettings:edit', args=('tests', 'testsetting'))
response = self.client.get(url)
self.assertRedirects(response, status_code=302, expected_url='/admin/')
@override_settings(ALLOWED_HOSTS=['testserver', 'example.com', 'noneoftheabove.example.com'])
class TestMultiSite(BaseTestSettingView):
def setUp(self):
self.default_site = Site.objects.get(is_default_site=True)
self.other_site = Site.objects.create(hostname='example.com', root_page=Page.objects.get(pk=2))
self.login()
def test_redirect_to_default(self):
"""
Should redirect to the setting for the default site.
"""
start_url = reverse('wagtailsettings:edit', args=[
'tests', 'testsetting'])
dest_url = reverse('wagtailsettings:edit', args=[
'tests', 'testsetting', self.default_site.pk])
response = self.client.get(start_url, follow=True)
self.assertRedirects(response, dest_url, status_code=302, fetch_redirect_response=False)
def test_redirect_to_current(self):
"""
Should redirect to the setting for the current site taken from the URL,
by default
"""
start_url = reverse('wagtailsettings:edit', args=[
'tests', 'testsetting'])
dest_url = reverse('wagtailsettings:edit', args=[
'tests', 'testsetting', self.other_site.pk])
response = self.client.get(start_url, follow=True, HTTP_HOST=self.other_site.hostname)
self.assertRedirects(response, dest_url, status_code=302, fetch_redirect_response=False)
def test_with_no_current_site(self):
"""
Redirection should not break if the current request does not correspond to a site
"""
self.default_site.is_default_site = False
self.default_site.save()
start_url = reverse('wagtailsettings:edit', args=[
'tests', 'testsetting'])
response = self.client.get(start_url, follow=True, HTTP_HOST="noneoftheabove.example.com")
self.assertEqual(302, response.redirect_chain[0][1])
def test_switcher(self):
""" Check that the switcher form exists in the page """
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'id="settings-site-switch"')
def test_unknown_site(self):
""" Check that unknown sites throw a 404 """
response = self.get(site_pk=3)
self.assertEqual(response.status_code, 404)
def test_edit(self):
"""
Check that editing settings in multi-site mode edits the correct
setting, and leaves the other ones alone
"""
TestSetting.objects.create(
title='default',
email='default@example.com',
site=self.default_site)
TestSetting.objects.create(
title='other',
email='other@example.com',
site=self.other_site)
response = self.post(site_pk=self.other_site.pk, post_data={
'title': 'other-new', 'email': 'other-other@example.com'})
self.assertEqual(response.status_code, 302)
# Check that the correct setting was updated
other_setting = TestSetting.for_site(self.other_site)
self.assertEqual(other_setting.title, 'other-new')
self.assertEqual(other_setting.email, 'other-other@example.com')
# Check that the other setting was not updated
default_setting = TestSetting.for_site(self.default_site)
self.assertEqual(default_setting.title, 'default')
self.assertEqual(default_setting.email, 'default@example.com')
class TestAdminPermission(TestCase, WagtailTestUtils):
def test_registered_permission(self):
permission = Permission.objects.get_by_natural_key(
app_label='tests', model='testsetting', codename='change_testsetting')
for fn in hooks.get_hooks('register_permissions'):
if permission in fn():
break
else:
self.fail('Change permission for tests.TestSetting not registered')
class TestEditHandlers(TestCase):
def setUp(self):
get_setting_edit_handler.cache_clear()
def test_default_model_introspection(self):
handler = get_setting_edit_handler(TestSetting)
self.assertIsInstance(handler, ObjectList)
self.assertEqual(len(handler.children), 2)
first = handler.children[0]
self.assertIsInstance(first, FieldPanel)
self.assertEqual(first.field_name, 'title')
second = handler.children[1]
self.assertIsInstance(second, FieldPanel)
self.assertEqual(second.field_name, 'email')
def test_with_custom_panels(self):
handler = get_setting_edit_handler(PanelSettings)
self.assertIsInstance(handler, ObjectList)
self.assertEqual(len(handler.children), 1)
first = handler.children[0]
self.assertIsInstance(first, FieldPanel)
self.assertEqual(first.field_name, 'title')
def test_with_custom_edit_handler(self):
handler = get_setting_edit_handler(TabbedSettings)
self.assertIsInstance(handler, TabbedInterface)
self.assertEqual(len(handler.children), 2)
| |
from __future__ import absolute_import, division, print_function
from operator import getitem
import numpy as np
from .core import getarray, getarray_nofancy
from ..core import flatten
from ..optimize import cull, fuse, inline_functions
def optimize(dsk, keys, fuse_keys=None, fast_functions=None,
inline_functions_fast_functions=None, **kwargs):
""" Optimize dask for array computation
1. Cull tasks not necessary to evaluate keys
2. Remove full slicing, e.g. x[:]
3. Inline fast functions like getitem and np.transpose
"""
keys = list(flatten(keys))
if fast_functions is not None:
inline_functions_fast_functions = fast_functions
if inline_functions_fast_functions is None:
inline_functions_fast_functions = {getarray, getarray_nofancy,
np.transpose}
dsk2, dependencies = cull(dsk, keys)
dsk4, dependencies = fuse(dsk2, keys + (fuse_keys or []), dependencies)
dsk5 = optimize_slices(dsk4)
dsk6 = inline_functions(dsk5, keys, dependencies=dependencies,
fast_functions=inline_functions_fast_functions)
return dsk6
def optimize_slices(dsk):
""" Optimize slices
1. Fuse repeated slices, like x[5:][2:6] -> x[7:11]
2. Remove full slices, like x[:] -> x
See also:
fuse_slice_dict
"""
fancy_ind_types = (list, np.ndarray)
getters = (getarray_nofancy, getarray, getitem)
dsk = dsk.copy()
for k, v in dsk.items():
if type(v) is tuple and v[0] in getters and len(v) == 3:
f, a, a_index = v
getter = f
while type(a) is tuple and a[0] in getters and len(a) == 3:
f2, b, b_index = a
if (type(a_index) is tuple) != (type(b_index) is tuple):
break
if type(a_index) is tuple:
indices = b_index + a_index
if (len(a_index) != len(b_index) and
any(i is None for i in indices)):
break
if (f2 is getarray_nofancy and
any(isinstance(i, fancy_ind_types) for i in indices)):
break
elif (f2 is getarray_nofancy and
(type(a_index) in fancy_ind_types or
type(b_index) in fancy_ind_types)):
break
try:
c_index = fuse_slice(b_index, a_index)
# rely on fact that nested gets never decrease in
# strictness e.g. `(getarray, (getitem, ...))` never
# happens
getter = f2
except NotImplementedError:
break
a, a_index = b, c_index
if getter is not getitem:
dsk[k] = (getter, a, a_index)
elif (type(a_index) is slice and
not a_index.start and
a_index.stop is None and
a_index.step is None):
dsk[k] = a
elif type(a_index) is tuple and all(type(s) is slice and
not s.start and
s.stop is None and
s.step is None
for s in a_index):
dsk[k] = a
else:
dsk[k] = (getitem, a, a_index)
return dsk
def normalize_slice(s):
""" Replace Nones in slices with integers
>>> normalize_slice(slice(None, None, None))
slice(0, None, 1)
"""
start, stop, step = s.start, s.stop, s.step
if start is None:
start = 0
if step is None:
step = 1
if start < 0 or step < 0 or stop is not None and stop < 0:
raise NotImplementedError()
return slice(start, stop, step)
def fuse_slice(a, b):
""" Fuse stacked slices together
Fuse a pair of repeated slices into a single slice:
>>> fuse_slice(slice(1000, 2000), slice(10, 15))
slice(1010, 1015, None)
This also works for tuples of slices
>>> fuse_slice((slice(100, 200), slice(100, 200, 10)),
... (slice(10, 15), [5, 2]))
(slice(110, 115, None), [150, 120])
And a variety of other interesting cases
>>> fuse_slice(slice(1000, 2000), 10) # integers
1010
>>> fuse_slice(slice(1000, 2000, 5), slice(10, 20, 2))
slice(1050, 1100, 10)
>>> fuse_slice(slice(1000, 2000, 5), [1, 2, 3]) # lists
[1005, 1010, 1015]
>>> fuse_slice(None, slice(None, None)) # doctest: +SKIP
None
"""
# None only works if the second side is a full slice
if a is None and b == slice(None, None):
return None
# Replace None with 0 and one in start and step
if isinstance(a, slice):
a = normalize_slice(a)
if isinstance(b, slice):
b = normalize_slice(b)
if isinstance(a, slice) and isinstance(b, int):
if b < 0:
raise NotImplementedError()
return a.start + b * a.step
if isinstance(a, slice) and isinstance(b, slice):
start = a.start + a.step * b.start
if b.stop is not None:
stop = a.start + a.step * b.stop
else:
stop = None
if a.stop is not None:
if stop is not None:
stop = min(a.stop, stop)
else:
stop = a.stop
stop = stop
step = a.step * b.step
if step == 1:
step = None
return slice(start, stop, step)
if isinstance(b, list):
return [fuse_slice(a, bb) for bb in b]
if isinstance(a, list) and isinstance(b, (int, slice)):
return a[b]
if isinstance(a, tuple) and not isinstance(b, tuple):
b = (b,)
# If given two tuples walk through both, being mindful of uneven sizes
# and newaxes
if isinstance(a, tuple) and isinstance(b, tuple):
if (any(isinstance(item, list) for item in a) and
any(isinstance(item, list) for item in b)):
raise NotImplementedError("Can't handle multiple list indexing")
j = 0
result = list()
for i in range(len(a)):
# axis ceased to exist or we're out of b
if isinstance(a[i], int) or j == len(b):
result.append(a[i])
continue
while b[j] is None: # insert any Nones on the rhs
result.append(None)
j += 1
result.append(fuse_slice(a[i], b[j])) # Common case
j += 1
while j < len(b): # anything leftover on the right?
result.append(b[j])
j += 1
return tuple(result)
raise NotImplementedError()
| |
import numpy as np
import astropy.constants as const
def calculate_mu(X,Y,Z=None):
"""
Calculates mean molecular weight
Assumes:
Fully ionized gas
Composition of Hydrogen, Helium, Metals
Inputs:
X - Hydrogen mass fraction
Y - Helium mass fraction
[Z] - Metals mass fraction
- if Z omitted, assumes X+Y+Z=1
Inputs must sum to 1
Outputs:
mu - mean molecular weight
Warnings:
"""
if Z is None:
Z = 1 - X - Y
elif (X + Y + Z) != 1:
print "Incorrect values for X,Y,Z. Must sum to 1"
raise Exception('calculate_mu incorrect inputs')
mu = np.power((X / (1./2)) + (Y / (4./3)) + (Z / 2.), -1.)
return mu
def calculate_mu_e(X,Y,Z=None):
"""
Calculates mean molecular weight of electrons
Assumes:
Fully ionized gas
Composition of Hydrogen, Helium, Metals
Inputs:
X - Hydrogen mass fraction
Y - Helium mass fraction
[Z] - Metals mass fraction
- if Z omitted, assumes X+Y+Z=1
Inputs must sum to 1
Outputs:
mu_e - mean molecular weight of electrons
Warnings:
"""
if Z is None:
Z = 1 - X - Y
elif (X + Y + Z) != 1:
print "Incorrect values for X,Y,Z. Must sum to 1"
raise Exception('calculate_mu incorrect inputs')
mu_e = np.divide(2., (1+X)) #Eq. 4.30, Kippenhan Weigert Weiss 2nd ed.
return mu_e
def calculate_rad_pressure(T):
"""
Calculates radiative pressure of photon gas at temperature T
Assumes:
Inputs:
T - temperature [K]
Outputs:
P_rad [dyne/cm^2]
Warnings:
"""
if T <= 0:
print "Temperature must be greater than 0"
raise Exception('calculate_rad_pressure incorrect inputs')
a = 4 * const.sigma_sb.cgs.value / const.c.cgs.value
P_rad = a * np.power(T, 4.) / 3
return P_rad
def calculate_beta(P_total,T):
"""
Calculates beta, ratio between gas pressure and total pressure
Assumes:
Inputs:
P_total - pressure [dyne cm^-2]
T - temperature [K]
Outputs:
beta (can range between 0 and 1)
Warnings:
"""
if P_total <= 0 or T <= 0:
print "Inputs must be greater than 0"
raise Exception('calculate_beta incorrect inputs')
P_rad = calculate_rad_pressure(T)
beta = np.divide(P_total - P_rad, P_total)
return beta
def calculate_density(P_gas, T, mu):
"""
Calculates density using ideal gas law
Assumes:
ideal gas law
Inputs:
P_gas - pressure [dyne cm^-2]
T - temperature [K]
mu - mean molecular weight
Outputs:
rho - density [g cm^-3]
Warnings:
"""
if (P_gas <= 0 or T <= 0) or mu <= 0:
print "Inputs must be greater than 0"
raise Exception('calculate_density incorrect inputs \n P_gas = ' + \
str(P_gas) + "\n T = " + str(T))
R = const.k_B.cgs.value / const.m_p.cgs.value # ideal gas constant
rho = np.divide(P_gas * mu, R * T)
return rho
def calculate_del_rad(m, l, P, T, kappa):
"""
Calculates temperature gradient of radiative heat transfer
del = d ln T / d ln P
Assumes:
mass, m as independent variable
Inputs:
(all local conditions at an enclosed mass, m)
m 1xN array - mass [g]
l 1xN array - luminosity [ergs s^-1]
P 1xN array - total Pressure [dyne cm^-2]
T 1xN array - Temperature [K]
kappa 1xN array - opacity [cm^2 g^-1]
Outputs:
del_rad 1xN array - radiative pressure gradient
= d ln T / d ln P
Warnings:
This may not be your actual pressure gradient
Need to do: min(del_rad, del_ad)
"""
del_rad = np.divide(3., 64 * np.pi * const.sigma_sb.cgs.value * const.G.cgs.value) * \
np.divide(kappa * l * P, m * np.power(T, 4.))
return del_rad
def calculate_del_ad():
"""
Calculates temperature gradient of radiative heat transfer
del = d ln T / d ln P
Assumes:
mass, m as independent variable
fixed composition (ionization levels not changing)
Inputs:
(all local conditions at an enclosed mass, m)
Outputs:
del_ad 1x1 float - adiabatic pressure gradient
= d ln T / d ln P
Warnings:
This may not be your actual pressure gradient
Need to do: min(del_rad, del_ad)
"""
return .4
def calculate_del(m, l, P, T, kappa):
"""
Calculates temperature gradient
del = d ln T / d ln P
Assumes:
mass, m as independent variable
Inputs:
(all local conditions at an enclosed mass, m)
m 1xN array - mass [g]
l 1xN array - luminosity [ergs s^-1]
P 1xN array - total Pressure [dyne cm^-2]
T 1xN array - Temperature [K]
kappa 1xN array - opacity [cm^2 g^-1]
Outputs:
del_actual 1xN array - overall pressure gradient
= d ln T / d ln P
Warnings:
"""
del_rad = calculate_del_rad(m, l, P, T, kappa)
del_ad = calculate_del_ad()
del_actual = np.minimum(del_rad, del_ad)
return del_actual
def solve_problem_5():
P_total_a = 10**(16.85)
T_a = 10**(7.55)
X_a = 0
Y_a = .98
mu_a = calculate_mu(X_a, Y_a)
P_gas_a = P_total_a - calculate_rad_pressure(T_a)
beta_a = P_gas_a / P_total_a
P_total_b = 10**(16.87)
T_b = 10**(6.91)
X_b = .70
Y_b = .28
mu_b = calculate_mu(X_b, Y_b)
P_gas_b = P_total_b - calculate_rad_pressure(T_b)
beta_b = P_gas_b / P_total_b
print "5a)"
rho_a = calculate_density(P_gas_a, T_a, mu_a)
print "rho_a: ", rho_a, "[g cm^-3]"
print "beta_a: ", beta_a
print "5b)"
rho_b = calculate_density(P_gas_b, T_b, mu_b)
print "rho_b: ", rho_b, "[g cm^-3]"
print "beta_b: ", beta_b
return
# solve_problem_5()
| |
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module provides utilities for validation.
"""
import numbers
import re
import socket
from ryu.lib import ip
def is_valid_mac(mac):
"""Returns True if the given MAC address is valid.
The given MAC address should be a colon hexadecimal notation string.
Samples:
- valid address: aa:bb:cc:dd:ee:ff, 11:22:33:44:55:66
- invalid address: aa:bb:cc:dd, 11-22-33-44-55-66, etc.
"""
return bool(re.match(r'^' + r'[\:\-]'.join([r'([0-9a-f]{2})'] * 6)
+ r'$', mac.lower()))
def is_valid_ip_prefix(prefix, bits):
"""Returns True if *prefix* is a valid IPv4 or IPv6 address prefix.
*prefix* should be a number between 0 to *bits* length.
"""
try:
# Prefix should be a number
prefix = int(prefix)
except ValueError:
return False
# Prefix should be a number between 0 to *bits*
return 0 <= prefix <= bits
def is_valid_ipv4(ipv4):
"""Returns True if given is a valid ipv4 address.
Given value should be a dot-decimal notation string.
Samples:
- valid address: 10.0.0.1, 192.168.0.1
- invalid address: 11.0.0, 192:168:0:1, etc.
"""
return ip.valid_ipv4(ipv4)
def is_valid_ipv4_prefix(ipv4_prefix):
"""Returns True if *ipv4_prefix* is a valid prefix with mask.
Samples:
- valid prefix: 1.1.1.0/32, 244.244.244.1/10
- invalid prefix: 255.2.2.2/2, 2.2.2/22, etc.
"""
if not isinstance(ipv4_prefix, str):
return False
tokens = ipv4_prefix.split('/')
if len(tokens) != 2:
return False
# Validate address/mask and return
return is_valid_ipv4(tokens[0]) and is_valid_ip_prefix(tokens[1], 32)
def is_valid_ipv6(ipv6):
"""Returns True if given `ipv6` is a valid IPv6 address
"""
return ip.valid_ipv6(ipv6)
def is_valid_ipv6_prefix(ipv6_prefix):
"""Returns True if given `ipv6_prefix` is a valid IPv6 prefix."""
# Validate input type
if not isinstance(ipv6_prefix, str):
return False
tokens = ipv6_prefix.split('/')
if len(tokens) != 2:
return False
# Validate address/mask and return
return is_valid_ipv6(tokens[0]) and is_valid_ip_prefix(tokens[1], 128)
def is_valid_old_asn(asn):
"""Returns True if the given AS number is Two Octet."""
return isinstance(asn, numbers.Integral) and 0 <= asn <= 0xffff
def is_valid_asn(asn):
"""Returns True if the given AS number is Two or Four Octet."""
return isinstance(asn, numbers.Integral) and 0 <= asn <= 0xffffffff
def is_valid_vpnv4_prefix(prefix):
"""Returns True if given prefix is a string represent vpnv4 prefix.
Vpnv4 prefix is made up of RD:Ipv4, where RD is represents route
distinguisher and Ipv4 represents valid dot-decimal ipv4 notation string.
"""
if not isinstance(prefix, str):
return False
# Split the prefix into route distinguisher and IP
tokens = prefix.split(':', 2)
if len(tokens) != 3:
return False
# Validate route distinguisher
if not is_valid_route_dist(':'.join([tokens[0], tokens[1]])):
return False
# Validate IPv4 prefix and return
return is_valid_ipv4_prefix(tokens[2])
def is_valid_vpnv6_prefix(prefix):
"""Returns True if given prefix is a string represent vpnv6 prefix.
Vpnv6 prefix is made up of RD:Ipv6, where RD is represents route
distinguisher and Ipv6 represents valid colon hexadecimal notation string.
"""
if not isinstance(prefix, str):
return False
# Split the prefix into route distinguisher and IP
tokens = prefix.split(':', 2)
if len(tokens) != 3:
return False
# Validate route distinguisher
if not is_valid_route_dist(':'.join([tokens[0], tokens[1]])):
return False
# Validate IPv6 prefix and return
return is_valid_ipv6_prefix(tokens[2])
def is_valid_med(med):
"""Returns True if value of *med* is valid as per RFC.
According to RFC MED is a four octet non-negative integer and
value '((2 ** 32) - 1) = 0xffffffff' denotes an "infinity" metric.
"""
return isinstance(med, numbers.Integral) and 0 <= med <= 0xffffffff
def is_valid_mpls_label(label):
"""Validates `label` according to MPLS label rules
RFC says:
This 20-bit field.
A value of 0 represents the "IPv4 Explicit NULL Label".
A value of 1 represents the "Router Alert Label".
A value of 2 represents the "IPv6 Explicit NULL Label".
A value of 3 represents the "Implicit NULL Label".
Values 4-15 are reserved.
"""
if (not isinstance(label, numbers.Integral) or
(4 <= label <= 15) or
(label < 0 or label > 2 ** 20)):
return False
return True
def is_valid_mpls_labels(labels):
"""Returns True if the given value is a list of valid MPLS labels.
"""
if not isinstance(labels, (list, tuple)):
return False
for label in labels:
if not is_valid_mpls_label(label):
return False
return True
def is_valid_route_dist(route_dist):
"""Validates *route_dist* as string representation of route distinguisher.
Returns True if *route_dist* is as per our convention of RD, else False.
Our convention is to represent RD as a string in format:
*admin_sub_field:assigned_num_field* and *admin_sub_field* can be valid
IPv4 string representation.
Valid examples: '65000:222', '1.2.3.4:4432'.
Invalid examples: '1.11.1: 333'
"""
# TODO(PH): Provide complete implementation.
return is_valid_ext_comm_attr(route_dist)
def is_valid_ext_comm_attr(attr):
"""Validates *attr* as string representation of RT or SOO.
Returns True if *attr* is as per our convention of RT or SOO, else
False. Our convention is to represent RT/SOO is a string with format:
*global_admin_part:local_admin_path*
"""
if not isinstance(attr, str):
return False
tokens = attr.rsplit(':', 1)
if len(tokens) != 2:
return False
try:
if '.' in tokens[0]:
if not is_valid_ipv4(tokens[0]):
return False
else:
int(tokens[0])
int(tokens[1])
except (ValueError, socket.error):
return False
return True
def is_valid_esi(esi):
"""Returns True if the given EVPN Ethernet SegmentEthernet ID is valid."""
if isinstance(esi, numbers.Integral):
return 0 <= esi <= 0xffffffffffffffffff
return isinstance(esi, dict)
def is_valid_ethernet_tag_id(etag_id):
"""Returns True if the given EVPN Ethernet Tag ID is valid.
Ethernet Tag ID should be a 32-bit field number.
"""
return isinstance(etag_id, numbers.Integral) and 0 <= etag_id <= 0xffffffff
def is_valid_vni(vni):
"""Returns True if the given Virtual Network Identifier for VXLAN
is valid.
Virtual Network Identifier should be a 24-bit field number.
"""
return isinstance(vni, numbers.Integral) and 0 <= vni <= 0xffffff
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
if sys.version >= '3':
basestring = unicode = str
from py4j.java_gateway import JavaClass
from pyspark import RDD, since
from pyspark.sql.column import _to_seq
from pyspark.sql.types import *
__all__ = ["DataFrameReader", "DataFrameWriter"]
def to_str(value):
"""
A wrapper over str(), but convert bool values to lower case string
"""
if isinstance(value, bool):
return str(value).lower()
else:
return str(value)
class DataFrameReader(object):
"""
Interface used to load a :class:`DataFrame` from external storage systems
(e.g. file systems, key-value stores, etc). Use :func:`SQLContext.read`
to access this.
::Note: Experimental
.. versionadded:: 1.4
"""
def __init__(self, sqlContext):
self._jreader = sqlContext._ssql_ctx.read()
self._sqlContext = sqlContext
def _df(self, jdf):
from pyspark.sql.dataframe import DataFrame
return DataFrame(jdf, self._sqlContext)
@since(1.4)
def format(self, source):
"""Specifies the input data source format.
:param source: string, name of the data source, e.g. 'json', 'parquet'.
>>> df = sqlContext.read.format('json').load('python/test_support/sql/people.json')
>>> df.dtypes
[('age', 'bigint'), ('name', 'string')]
"""
self._jreader = self._jreader.format(source)
return self
@since(1.4)
def schema(self, schema):
"""Specifies the input schema.
Some data sources (e.g. JSON) can infer the input schema automatically from data.
By specifying the schema here, the underlying data source can skip the schema
inference step, and thus speed up data loading.
:param schema: a StructType object
"""
if not isinstance(schema, StructType):
raise TypeError("schema should be StructType")
jschema = self._sqlContext._ssql_ctx.parseDataType(schema.json())
self._jreader = self._jreader.schema(jschema)
return self
@since(1.5)
def option(self, key, value):
"""Adds an input option for the underlying data source.
"""
self._jreader = self._jreader.option(key, to_str(value))
return self
@since(1.4)
def options(self, **options):
"""Adds input options for the underlying data source.
"""
for k in options:
self._jreader = self._jreader.option(k, to_str(options[k]))
return self
@since(1.4)
def load(self, path=None, format=None, schema=None, **options):
"""Loads data from a data source and returns it as a :class`DataFrame`.
:param path: optional string for file-system backed data sources.
:param format: optional string for format of the data source. Default to 'parquet'.
:param schema: optional :class:`StructType` for the input schema.
:param options: all other string options
>>> df = sqlContext.read.load('python/test_support/sql/parquet_partitioned', opt1=True,
... opt2=1, opt3='str')
>>> df.dtypes
[('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')]
>>> df = sqlContext.read.format('json').load(['python/test_support/sql/people.json',
... 'python/test_support/sql/people1.json'])
>>> df.dtypes
[('age', 'bigint'), ('aka', 'string'), ('name', 'string')]
"""
if format is not None:
self.format(format)
if schema is not None:
self.schema(schema)
self.options(**options)
if path is not None:
if type(path) == list:
paths = path
gateway = self._sqlContext._sc._gateway
jpaths = gateway.new_array(gateway.jvm.java.lang.String, len(paths))
for i in range(0, len(paths)):
jpaths[i] = paths[i]
return self._df(self._jreader.load(jpaths))
else:
return self._df(self._jreader.load(path))
else:
return self._df(self._jreader.load())
@since(1.4)
def json(self, path, schema=None):
"""
Loads a JSON file (one object per line) or an RDD of Strings storing JSON objects
(one object per record) and returns the result as a :class`DataFrame`.
If the ``schema`` parameter is not specified, this function goes
through the input once to determine the input schema.
:param path: string represents path to the JSON dataset,
or RDD of Strings storing JSON objects.
:param schema: an optional :class:`StructType` for the input schema.
>>> df1 = sqlContext.read.json('python/test_support/sql/people.json')
>>> df1.dtypes
[('age', 'bigint'), ('name', 'string')]
>>> rdd = sc.textFile('python/test_support/sql/people.json')
>>> df2 = sqlContext.read.json(rdd)
>>> df2.dtypes
[('age', 'bigint'), ('name', 'string')]
"""
if schema is not None:
self.schema(schema)
if isinstance(path, basestring):
return self._df(self._jreader.json(path))
elif isinstance(path, RDD):
return self._df(self._jreader.json(path._jrdd))
else:
raise TypeError("path can be only string or RDD")
@since(1.4)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:param tableName: string, name of the table.
>>> df = sqlContext.read.parquet('python/test_support/sql/parquet_partitioned')
>>> df.registerTempTable('tmpTable')
>>> sqlContext.read.table('tmpTable').dtypes
[('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')]
"""
return self._df(self._jreader.table(tableName))
@since(1.4)
def parquet(self, *paths):
"""Loads a Parquet file, returning the result as a :class:`DataFrame`.
>>> df = sqlContext.read.parquet('python/test_support/sql/parquet_partitioned')
>>> df.dtypes
[('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')]
"""
return self._df(self._jreader.parquet(_to_seq(self._sqlContext._sc, paths)))
@since(1.5)
def orc(self, path):
"""
Loads an ORC file, returning the result as a :class:`DataFrame`.
::Note: Currently ORC support is only available together with
:class:`HiveContext`.
>>> df = hiveContext.read.orc('python/test_support/sql/orc_partitioned')
>>> df.dtypes
[('a', 'bigint'), ('b', 'int'), ('c', 'int')]
"""
return self._df(self._jreader.orc(path))
@since(1.4)
def jdbc(self, url, table, column=None, lowerBound=None, upperBound=None, numPartitions=None,
predicates=None, properties=None):
"""
Construct a :class:`DataFrame` representing the database table accessible
via JDBC URL `url` named `table` and connection `properties`.
The `column` parameter could be used to partition the table, then it will
be retrieved in parallel based on the parameters passed to this function.
The `predicates` parameter gives a list expressions suitable for inclusion
in WHERE clauses; each one defines one partition of the :class:`DataFrame`.
::Note: Don't create too many partitions in parallel on a large cluster;
otherwise Spark might crash your external database systems.
:param url: a JDBC URL
:param table: name of table
:param column: the column used to partition
:param lowerBound: the lower bound of partition column
:param upperBound: the upper bound of the partition column
:param numPartitions: the number of partitions
:param predicates: a list of expressions
:param properties: JDBC database connection arguments, a list of arbitrary string
tag/value. Normally at least a "user" and "password" property
should be included.
:return: a DataFrame
"""
if properties is None:
properties = dict()
jprop = JavaClass("java.util.Properties", self._sqlContext._sc._gateway._gateway_client)()
for k in properties:
jprop.setProperty(k, properties[k])
if column is not None:
if numPartitions is None:
numPartitions = self._sqlContext._sc.defaultParallelism
return self._df(self._jreader.jdbc(url, table, column, int(lowerBound), int(upperBound),
int(numPartitions), jprop))
if predicates is not None:
arr = self._sqlContext._sc._jvm.PythonUtils.toArray(predicates)
return self._df(self._jreader.jdbc(url, table, arr, jprop))
return self._df(self._jreader.jdbc(url, table, jprop))
class DataFrameWriter(object):
"""
Interface used to write a [[DataFrame]] to external storage systems
(e.g. file systems, key-value stores, etc). Use :func:`DataFrame.write`
to access this.
::Note: Experimental
.. versionadded:: 1.4
"""
def __init__(self, df):
self._df = df
self._sqlContext = df.sql_ctx
self._jwrite = df._jdf.write()
@since(1.4)
def mode(self, saveMode):
"""Specifies the behavior when data or table already exists.
Options include:
* `append`: Append contents of this :class:`DataFrame` to existing data.
* `overwrite`: Overwrite existing data.
* `error`: Throw an exception if data already exists.
* `ignore`: Silently ignore this operation if data already exists.
>>> df.write.mode('append').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
# At the JVM side, the default value of mode is already set to "error".
# So, if the given saveMode is None, we will not call JVM-side's mode method.
if saveMode is not None:
self._jwrite = self._jwrite.mode(saveMode)
return self
@since(1.4)
def format(self, source):
"""Specifies the underlying output data source.
:param source: string, name of the data source, e.g. 'json', 'parquet'.
>>> df.write.format('json').save(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self._jwrite = self._jwrite.format(source)
return self
@since(1.5)
def option(self, key, value):
"""Adds an output option for the underlying data source.
"""
self._jwrite = self._jwrite.option(key, value)
return self
@since(1.4)
def options(self, **options):
"""Adds output options for the underlying data source.
"""
for k in options:
self._jwrite = self._jwrite.option(k, options[k])
return self
@since(1.4)
def partitionBy(self, *cols):
"""Partitions the output by the given columns on the file system.
If specified, the output is laid out on the file system similar
to Hive's partitioning scheme.
:param cols: name of columns
>>> df.write.partitionBy('year', 'month').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
if len(cols) == 1 and isinstance(cols[0], (list, tuple)):
cols = cols[0]
self._jwrite = self._jwrite.partitionBy(_to_seq(self._sqlContext._sc, cols))
return self
@since(1.4)
def save(self, path=None, format=None, mode=None, partitionBy=None, **options):
"""Saves the contents of the :class:`DataFrame` to a data source.
The data source is specified by the ``format`` and a set of ``options``.
If ``format`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
:param path: the path in a Hadoop supported file system
:param format: the format used to save
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` (default case): Throw an exception if data already exists.
:param partitionBy: names of partitioning columns
:param options: all other string options
>>> df.write.mode('append').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode).options(**options)
if partitionBy is not None:
self.partitionBy(partitionBy)
if format is not None:
self.format(format)
if path is None:
self._jwrite.save()
else:
self._jwrite.save(path)
@since(1.4)
def insertInto(self, tableName, overwrite=False):
"""Inserts the content of the :class:`DataFrame` to the specified table.
It requires that the schema of the class:`DataFrame` is the same as the
schema of the table.
Optionally overwriting any existing data.
"""
self._jwrite.mode("overwrite" if overwrite else "append").insertInto(tableName)
@since(1.4)
def saveAsTable(self, name, format=None, mode=None, partitionBy=None, **options):
"""Saves the content of the :class:`DataFrame` as the specified table.
In the case the table already exists, behavior of this function depends on the
save mode, specified by the `mode` function (default to throwing an exception).
When `mode` is `Overwrite`, the schema of the [[DataFrame]] does not need to be
the same as that of the existing table.
* `append`: Append contents of this :class:`DataFrame` to existing data.
* `overwrite`: Overwrite existing data.
* `error`: Throw an exception if data already exists.
* `ignore`: Silently ignore this operation if data already exists.
:param name: the table name
:param format: the format used to save
:param mode: one of `append`, `overwrite`, `error`, `ignore` (default: error)
:param partitionBy: names of partitioning columns
:param options: all other string options
"""
self.mode(mode).options(**options)
if partitionBy is not None:
self.partitionBy(partitionBy)
if format is not None:
self.format(format)
self._jwrite.saveAsTable(name)
@since(1.4)
def json(self, path, mode=None):
"""Saves the content of the :class:`DataFrame` in JSON format at the specified path.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` (default case): Throw an exception if data already exists.
>>> df.write.json(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)._jwrite.json(path)
@since(1.4)
def parquet(self, path, mode=None, partitionBy=None):
"""Saves the content of the :class:`DataFrame` in Parquet format at the specified path.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` (default case): Throw an exception if data already exists.
:param partitionBy: names of partitioning columns
>>> df.write.parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)
if partitionBy is not None:
self.partitionBy(partitionBy)
self._jwrite.parquet(path)
def orc(self, path, mode=None, partitionBy=None):
"""Saves the content of the :class:`DataFrame` in ORC format at the specified path.
::Note: Currently ORC support is only available together with
:class:`HiveContext`.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` (default case): Throw an exception if data already exists.
:param partitionBy: names of partitioning columns
>>> orc_df = hiveContext.read.orc('python/test_support/sql/orc_partitioned')
>>> orc_df.write.orc(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)
if partitionBy is not None:
self.partitionBy(partitionBy)
self._jwrite.orc(path)
@since(1.4)
def jdbc(self, url, table, mode=None, properties=None):
"""Saves the content of the :class:`DataFrame` to a external database table via JDBC.
.. note:: Don't create too many partitions in parallel on a large cluster;\
otherwise Spark might crash your external database systems.
:param url: a JDBC URL of the form ``jdbc:subprotocol:subname``
:param table: Name of the table in the external database.
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` (default case): Throw an exception if data already exists.
:param properties: JDBC database connection arguments, a list of
arbitrary string tag/value. Normally at least a
"user" and "password" property should be included.
"""
if properties is None:
properties = dict()
jprop = JavaClass("java.util.Properties", self._sqlContext._sc._gateway._gateway_client)()
for k in properties:
jprop.setProperty(k, properties[k])
self._jwrite.mode(mode).jdbc(url, table, jprop)
def _test():
import doctest
import os
import tempfile
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext, HiveContext
import pyspark.sql.readwriter
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.readwriter.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['tempfile'] = tempfile
globs['os'] = os
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['hiveContext'] = HiveContext(sc)
globs['df'] = globs['sqlContext'].read.parquet('python/test_support/sql/parquet_partitioned')
(failure_count, test_count) = doctest.testmod(
pyspark.sql.readwriter, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| |
"""The compiler that turns topologies into xml network definitions"""
from Core.Item import nodeTypes
from Core.Device import Device
from Core.globals import options, environ, mainWidgets
from PyQt4 import QtCore
import os, re
class Compiler:
def __init__(self, device_list, filename):
"""
Create a compile instance with a list of devices and the filename.
"""
self.warnings = 0
self.errors = 0
self.device_list = device_list
self.filename = filename.replace(".gsav", ".xml")
self.output = open(self.filename, "w")
self.log = mainWidgets["log"]
self.compile_list = {}
for nodeType in nodeTypes:
self.compile_list[nodeType] = []
for device in device_list:
if isinstance(device, Device):
self.compile_list[device.device_type].append(device)
def compile(self):
"""
Compile the topology into xml.
"""
if options["autogen"]:
self.log.append("Auto-generate IP/MAC Addresses is ON.")
else:
self.log.append("Auto-generate IP/MAC Addresses is OFF.")
if options["autorouting"]:
self.log.append("Auto-routing is ON.")
else:
self.log.append("Auto-routing is OFF.")
self.output.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
self.output.write("<!DOCTYPE gloader SYSTEM \"" + os.environ["GINI_SHARE"] + "/gloader/gloader.dtd"+"\">\n")
self.output.write("<gloader>\n\n")
if options["autogen"]:
self.autogen_subnet()
self.autogen_switch()
self.compile_subnet()
self.compile_switch()
self.switch_pass_mask()
if options["autogen"]:
self.autogen_wireless_access_point()
self.compile_wireless_access_point()
if options["autogen"]:
self.autogen_router()
self.autogen_UML()
self.autogen_REALM()
self.autogen_mobile()
self.routing_table_clear()
if options["autorouting"]:
self.routing_table_router()
#self.routing_table_wireless_access_point()
self.routing_table_entry()
self.routing_table_uml()
#self.routing_table_mobile()
self.compile_router()
self.compile_UML()
self.compile_REALM()
self.compile_mobile()
self.compile_OpenFlow_Controller()
self.output.write("</gloader>\n")
self.output.close()
self.log.append("Compile finished with " + str(self.errors) + \
" error(s) and " + str(self.warnings) + " warning(s).\n")
if self.errors:
os.remove(self.filename)
return ""
return self.filename
def autogen_subnet(self):
"""
Auto-generate properties for Subnets.
"""
for subnet in self.compile_list["Subnet"]:
subnet.setProperty("mask", "255.255.255.0")
if subnet.getProperty("subnet"):
continue
subnet.setProperty("subnet", "192.168.%d.0" % subnet.getID())
def writeProperty(self, prop, value):
"""
Write a property and value in xml format to file.
"""
self.output.write("\t\t<" + prop + ">")
self.output.write(value)
self.output.write("</" + prop + ">\n")
def generateREALMError(self):
self.errors += 1
self.log.append("Error: there is more than one REALM")
def generateError(self, device, prop, errorType, interface = None):
"""
Generate a compile error.
"""
self.errors += 1
message = "Error: " + device.getName() + "'s " + prop
if not errorType == "missing":
message += " value"
if interface:
message += " of interface " + str(device.getInterfaces().index(interface) + 1)
message += " is " + errorType + "."
self.log.append(message)
def generateGenericError(self, device, message):
"""
Generate a compile error.
"""
self.errors += 1
message = ' '.join(("Error:", device.getName(), message))
self.log.append(message)
def generateConnectionError(self, device, numCons):
"""
Generate a compile error.
"""
self.errors += 1
message = ' '.join(("Error:", device.getName(), "has less than", str(numCons), "connection(s)."))
self.log.append(message)
def generateConnectionWarning(self, device, numCons):
"""
Generate a compile warning.
"""
self.warnings += 1
message = ' '.join(("Warning:", device.getName(), "has less than", str(numCons), "connection(s)."))
self.log.append(message)
def generateGenericWarning(self, device, message):
"""
Generate a compile warning.
"""
self.warnings += 1
message = ' '.join(("Warning:", device.getName(), message))
self.log.append(message)
def compile_subnet(self):
"""
Compile all the Subnets.
"""
for subnet in self.compile_list["Subnet"]:
edges = subnet.edges()
if len(edges) < 1:
self.generateConnectionWarning(subnet, 1)
for prop in ["subnet", "mask"]:
value = subnet.getProperty(prop)
if not value:
self.generateError(subnet, prop, "empty")
elif not self.validate(prop, value):
self.generateError(subnet, prop, "invalid")
self.pass_mask(subnet)
def autogen_router(self):
"""
Auto-generate properties for Routers.
"""
for router in self.compile_list["Router"]:
i = 0
for con in router.edges():
i += 1
node = con.getOtherDevice(router)
if node.device_type == "OpenFlow_Controller":
continue
node = node.getTarget(router)
if options["autogen"]:
subnet = str(router.getInterfaceProperty("subnet", node)).rsplit(".", 1)[0]
router.setInterfaceProperty("ipv4", "%s.%d" % (subnet, 127 + router.getID()), node)
router.setInterfaceProperty("mac", "fe:fd:03:%02x:00:%02x" % (router.getID(), i), node)
def compile_router(self):
"""
Compile all the Routers.
"""
for router in self.compile_list["Router"]:
self.output.write("<vr name=\"" + router.getName() + "\">\n")
controllerFound = False
for con in router.edges():
node = con.getOtherDevice(router)
if node.device_type == "OpenFlow_Controller":
if controllerFound:
self.generateGenericError(router, " is connected to multiple OpenFlow controllers")
return
controllerFound = True
self.output.write("\t<controller>" + node.getName() + "</controller>\n")
edges = router.edges()
if len(edges) < 2:
self.generateConnectionWarning(router, 2)
for con in edges:
node = con.getOtherDevice(router)
if node.device_type == "OpenFlow_Controller":
continue
node = node.getTarget(router)
self.output.write("\t<netif>\n")
interface = router.getInterface(node)
mapping = {"subnet":"network", "mac":"nic", "ipv4":"ip"}
self.writeInterface(router, interface, mapping)
self.output.write("\t</netif>\n")
self.output.write("</vr>\n\n")
def autogen_wireless_access_point(self):
"""
Auto-generate properties for Wireless_access_points.
"""
for router in self.compile_list["Wireless_access_point"]:
if options["autogen"]:
subnet = router.getInterfaceProperty("subnet")
if not subnet:
subnet = "192.168.%d.0" % (256 - router.getID())
router.setInterfaceProperty("subnet", subnet)
subnet = str(subnet).rsplit(".", 1)[0]
router.setInterfaceProperty("mask", "255.255.255.0")
router.setInterfaceProperty("ipv4", "%s.%d" % (subnet, router.getID()))
router.setInterfaceProperty("mac", "fe:fd:01:%02x:00:00" % router.getID())
def compile_wireless_access_point(self):
"""
Compile all the Wireless_access_points.
"""
for router in self.compile_list["Wireless_access_point"]:
self.output.write("<vwr name=\"" + router.getName() + "\">\n")
edges = router.edges()
if len(edges) < 1:
self.errors += 1
message = "Error: " + router.getName() + " needs at least one connected Mobile to start."
self.log.append(message)
self.output.write("\t<netif_wireless>\n")
interface = router.getInterface()
mapping = {"subnet":"network", "mac":"nic", "ipv4":"ip"}
self.writeInterface(router, interface, mapping)
#properties=router.getProperties()
p_types={}
p_types["wireless_card"]=("w_type", "freq", "bandwidth", "Pt", "Pt_c", "Pr_c", "P_idle", "P_sleep", "P_off", "RX", "CS", "CP", "module")
p_types["antenna"]=("a_type", "ant_h", "ant_g", "ant_l", "JAM")
p_types["energy"]=("power", "PSM", "energy_amount")
p_types["mobility"]=("m_type", "ran_max", "ran_min")
p_types["mac_layer"]=("mac_type", "trans")
for item in p_types:
self.output.write("\t<"+item+">\n")
for p in p_types[item]:
self.output.write("\t\t<"+p+">"+router.getProperty(p)+"</"+p+">\n")
self.output.write("\t</"+item+">\n")
self.output.write("\t</netif_wireless>\n")
self.output.write("</vwr>\n\n")
subnet = router.getInterfaceProperty("subnet")
mask = router.getInterfaceProperty("mask")
for edge in edges:
target = edge.getOtherDevice(router)
target.setInterfaceProperty("subnet", subnet, router)
target.setInterfaceProperty("mask", mask, router)
def writeInterface(self, device, interface, mapping):
"""
Write an interface to file according to mapping.
"""
if interface.has_key(QtCore.QString("target")):
self.writeProperty("target", interface[QtCore.QString("target")].getName())
for prop, eq in mapping.iteritems():
try:
value = interface[QtCore.QString(prop)]
except:
self.generateError(device, prop, "missing", interface)
return
if not value:
self.generateError(device, prop, "empty", interface)
elif not self.validate(prop, value, interface):
self.generateError(device, prop, "invalid", interface)
else:
self.writeProperty(eq, value)
table = self.formatRoutes(interface[QtCore.QString("routing")], device.device_type)
self.output.write(table)
def autogen_switch(self):
"""
Auto-generate properties for switches.
"""
for switch in self.compile_list["Switch"]:
switch.setProperty("mac", "fe:fd:04:00:00:%02x" % switch.getID())
def compile_switch(self):
"""
Compile all the Switches.
"""
for switch in self.compile_list["Switch"]:
self.output.write("<vs name=\"" + switch.getName() + "\">\n")
self.output.write("\t<priority>" + switch.getProperty("Priority") + "</priority>\n")
self.output.write("\t<mac>" + switch.getProperty("mac") + "</mac>\n")
subnet = None
if len(switch.edges()) < 2:
self.generateConnectionWarning(switch, 2)
first = True
Q = [switch]
switch_seen = set([switch])
while Q:
t = Q.pop(0)
for edge in t.edges():
node = edge.getOtherDevice(t)
if node.device_type == "Subnet":
if (subnet is None) or (subnet == node):
subnet = node
else:
self.generateError(t, "subnet", "inconsistent due to multiple values (only connect to a single subnet)")
return
if node.device_type == "Switch":
# should look around for a subnet
if node not in switch_seen:
switch_seen.add(node)
Q.append(node)
if first:
self.output.write("\t<target>" + node.getName() + "</target>\n")
first = False
if subnet is None:
self.generateError(switch, "subnet", "missing")
return
switch.setProperty("subnet", subnet.getProperty("subnet"))
if switch.getProperty("Hub mode") == "True":
self.output.write("\t<hub/>\n")
self.output.write("</vs>\n\n")
# self.pass_mask(switch)
def switch_pass_mask(self):
for switch in self.compile_list["Switch"]:
has_subnet = False
for edge in switch.edges():
node = edge.getOtherDevice(switch)
if node.device_type == "Subnet":
has_subnet = True
if has_subnet:
target = switch.getTarget(None)
gateway = target.getInterface(switch) if target is not None else None
Q = [switch]
switch_seen = set([switch])
while Q:
t = Q.pop(0)
t.gateway = gateway
self.pass_mask(t)
for edge in t.edges():
node = edge.getOtherDevice(t)
if node.device_type == "Switch":
# should look around for a subnet
if node not in switch_seen:
switch_seen.add(node)
Q.append(node)
def autogen_UML(self):
"""
Auto-generate properties for UMLs.
"""
for uml in self.compile_list["UML"]:
for interface in uml.getInterfaces():
if options["autogen"]:
try:
subnet = str(uml.getInterfaceProperty("subnet")).rsplit(".", 1)[0]
except:
continue
uml.setInterfaceProperty("ipv4", "%s.%d" % (subnet, uml.getID()+1))
uml.setInterfaceProperty("mac", "fe:fd:02:00:00:%02x" % uml.getID())
def compile_UML(self):
"""
Compile all the UMLs.
"""
for uml in self.compile_list["UML"]:
self.output.write("<vm name=\"" + uml.getName() + "\">\n")
self.output.write("\t<filesystem type=\"" + uml.getProperty("filetype") + "\">"
+ uml.getProperty("filesystem") + "</filesystem>\n")
interfaces = uml.getInterfaces()
if len(interfaces) < 1:
self.generateConnectionWarning(uml, 1)
for interface in interfaces:
self.output.write("\t<if>\n")
mapping = {"subnet":"network", "mac":"mac", "ipv4":"ip"}
self.writeInterface(uml, interface, mapping)
self.output.write("\t</if>\n")
self.output.write("</vm>\n\n")
#********************************* REALM
def autogen_REALM(self):
"""
Auto-generate properties for REALMs.
"""
for realm in self.compile_list["REALM"]:
for interface in realm.getInterfaces():
if options["autogen"]:
try:
subnet = str(realm.getInterfaceProperty("subnet")).rsplit(".", 1)[0]
except:
continue
realm.setInterfaceProperty("ipv4", "%s.%d" % (subnet, realm.getID()+1+len(self.compile_list["UML"])))
realm.setInterfaceProperty("mac", "fe:fd:02:00:01:%02x" % realm.getID())
def compile_REALM(self):
"""
Compile all the REALMs.
"""
if len(self.compile_list["REALM"]) > 1:
self.generateREALMError()
for realm in self.compile_list["REALM"]:
self.output.write("<vrm name=\"" + realm.getName() + "\">\n")
self.output.write("\t<filesystem type=\"" + realm.getProperty("filetype") + "\">"
+ realm.getProperty("filesystem") + "</filesystem>\n")
interfaces = realm.getInterfaces()
if len(interfaces) < 1:
self.generateConnectionWarning(realm, 1)
for interface in interfaces:
self.output.write("\t<if>\n")
mapping = {"subnet":"network", "mac":"mac", "ipv4":"ip"}
self.writeInterface(realm, interface, mapping)
self.output.write("\t</if>\n")
self.output.write("</vrm>\n\n")
#******************************FINISH REALM
def autogen_mobile(self):
"""
Auto-generate properties for Mobiles.
"""
for uml in self.compile_list["Mobile"]:
for interface in uml.getInterfaces():
if options["autogen"]:
subnet = str(uml.getInterfaceProperty("subnet")).rsplit(".", 1)[0]
uml.setInterfaceProperty("ipv4", "%s.%d" % (subnet, uml.getID()+1))
uml.setInterfaceProperty("mac", "fe:fd:00:00:00:%02x" % uml.getID())
def compile_mobile(self):
"""
Compile all the Mobiles.
"""
for uml in self.compile_list["Mobile"]:
self.output.write("<vmb name=\"" + uml.getName() + "\">\n")
self.output.write("\t<filesystem type=\"" + uml.getProperty("filetype") + "\">"
+ uml.getProperty("filesystem") + "</filesystem>\n")
interfaces = uml.getInterfaces()
if len(interfaces) < 1:
self.generateConnectionWarning(uml, 1)
for interface in interfaces:
self.output.write("\t<if>\n")
mapping = {"subnet":"network", "mac":"mac", "ipv4":"ip"}
self.writeInterface(uml, interface, mapping)
self.output.write("\t</if>\n")
self.output.write("</vmb>\n\n")
def compile_OpenFlow_Controller(self):
"""
Compile all the OpenFlow controllers.
"""
for controller in self.compile_list["OpenFlow_Controller"]:
self.output.write("<vofc name=\"" + controller.getName() + "\">\n")
routerFound = False
for con in controller.edges():
node = con.getOtherDevice(controller)
if node.device_type == "Router":
self.output.write("\t<router>" + node.getName() + "</router>\n")
routerFound = True
else:
self.generateGenericWarning(controller, "has non-router connection; ignored")
if not routerFound:
self.generateGenericWarning(controller, "has no router connections")
self.output.write("</vofc>\n\n")
def pass_mask(self, node):
"""
Pass the mask between connected devices.
"""
try:
subnet = node.getProperty("subnet")
except:
self.generateError(node, "subnet", "missing")
return
try:
mask = node.getProperty("mask")
except:
self.generateError(node, "mask", "missing")
return
for con in node.edges():
otherDevice = con.getOtherDevice(node)
if otherDevice.device_type in ["Router", "UML", "REALM", "Mobile"]:
target = node
if node.device_type == "Subnet":
target = node.getTarget(otherDevice)
if target is None:
continue
otherDevice.setInterfaceProperty("subnet", subnet, target)
otherDevice.setInterfaceProperty("mask", mask, target)
else:
otherDevice.setProperty("subnet", subnet)
otherDevice.setProperty("mask", mask)
def routing_table_clear(self):
"""
Clear all route tables of interfaceable devices.
"""
for interfaceable in self.compile_list["Router"]:
interfaceable.emptyAdjacentLists()
interfaceable.emptyRouteTable()
for interfaceable in self.compile_list["UML"]:
interfaceable.emptyAdjacentLists()
interfaceable.emptyRouteTable()
for interfaceable in self.compile_list["REALM"]:
interfaceable.emptyAdjacentLists()
interfaceable.emptyRouteTable()
def routing_table_interfaceable(self, devType):
"""
Compute route tables of devices of type devType.
"""
for interfaceable in self.compile_list[devType]:
interfaceable.emptyAdjacentLists()
interfaceable.emptyRouteTable()
self.findAdjacentRouters(interfaceable)
self.findAdjacentSubnets(interfaceable)
def routing_table_uml(self):
"""
Compute route tables of UMLs.
"""
self.routing_table_interfaceable("UML")
for uml in self.compile_list["UML"]:
for subnet in self.compile_list["Subnet"]:
if not uml.hasSubnet(subnet.getProperty("subnet")):
uml.addRoutingEntry(subnet.getProperty("subnet"))
def routing_table_realm(self):
"""
Compute route tables of REALMs.
"""
self.routing_table_interfaceable("REALM")
for uml in self.compile_list["REALM"]:
for subnet in self.compile_list["Subnet"]:
if not uml.hasSubnet(subnet.getProperty("subnet")):
uml.addRoutingEntry(subnet.getProperty("subnet"))
def routing_table_mobile(self):
"""
Compute route tables of Mobiles.
"""
self.routing_table_interfaceable("Mobile")
for uml in self.compile_list["Mobile"]:
for subnet in self.compile_list["Subnet"]:
if not uml.hasSubnet(subnet.getProperty("subnet")):
uml.addRoutingEntry(subnet.getProperty("subnet"))
def routing_table_router(self):
"""
Compute route tables of Routers.
"""
self.routing_table_interfaceable("Router")
def routing_table_wireless_access_point(self):
"""
Compute route tables of Wireless_access_points.
"""
self.routing_table_interfaceable("Wireless_access_point")
def routing_table_entry(self):
"""
Add routing entries for Routers.
"""
for uml in self.compile_list["Router"]:
for subnet in self.compile_list["Subnet"]:
uml.addRoutingEntry(subnet.getProperty("subnet"))
def findAdjacentRouters(self, device):
"""
Find all Routers adjacent to device.
"""
for interface in device.getInterfaces():
for con in device.edges():
otherDevice = con.getOtherDevice(device)
if otherDevice.device_type == "Subnet":
otherDevice = otherDevice.getTarget(device)
if interface[QtCore.QString("target")] == otherDevice:
break
visitedNodes = []
self.visitAdjacentRouters(device, con, device, interface, visitedNodes)
def visitAdjacentRouters(self, myself, con, device, interface, visitedNodes):
"""
Helper method to find adjacent Routers.
"""
otherDevice = con.getOtherDevice(device)
if otherDevice in visitedNodes:
return
visitedNodes.append(otherDevice)
if otherDevice.device_type in ["Router", "Wireless_access_point"]:
myself.addAdjacentRouter(otherDevice, interface)
elif otherDevice.device_type in ["UML", "Mobile", "REALM"]:
pass
else:
for c in otherDevice.edges():
if con != c:
self.visitAdjacentRouters(myself, c, otherDevice, interface, visitedNodes)
def findAdjacentSubnets(self, device):
"""
Find all Subnets adjacent to device.
"""
for con in device.edges():
otherDevice = con.getOtherDevice(device)
if otherDevice.device_type == "Subnet":
device.addAdjacentSubnet(otherDevice.getProperty("subnet"))
elif otherDevice.device_type == "Wireless_access_point":
device.addAdjacentSubnet(otherDevice.getProperty("subnet"))
def formatRoutes(self, routes, devType):
"""
Format the routes in xml.
"""
if devType == "UML" or devType == "REALM":
header = "\t\t<route type=\"net\" "
gateway = "\" gw=\""
footer = "</route>\n"
else:
header = "\t\t<rtentry "
gateway = "\" nexthop=\""
footer = "</rtentry>\n"
# Because of gloader's getVMIFOutLine, we must preserve a specific order of the routes
outstring = ""
outstring2 = ""
for route in routes:
string = ""
string += header
string += "netmask=\"" + route[QtCore.QString("netmask")]
string += gateway
string += route[QtCore.QString("gw")]
string += "\">" + route[QtCore.QString("subnet")]
string += footer
if route[QtCore.QString("gw")]:
outstring2 += string
else:
outstring += string
return outstring + outstring2
def validate(self, prop, value, interface=None):
"""
Validate a property of a device or interface.
"""
if prop == "mac":
return self.valid_mac(value)
elif prop == "ipv4":
return self.valid_ip_subnet(value, interface[QtCore.QString("subnet")], interface[QtCore.QString("mask")])
elif prop == "mask":
return self.valid_mask(value)
else:
return self.valid_ip(value)
def valid_ip(self, ip):
"""
Validate an ip-like address (includes mask).
"""
ip = str(ip)
if re.match(r'^\d+\.\d+\.\d+\.\d+$', ip) == None:
return False
p = re.compile('\d+')
res = p.findall(ip)
# Each chunk should be between 0 and 255 inc
for chunk in res:
if not int(chunk) in range(256):
return False
return True
def valid_mask(self, mask):
"""
Validate a subnet mask.
"""
mask = str(mask)
if mask == "255.255.255.0":
return True
else:
self.warnings += 1
message = "Warning: Using a mask other than 255.255.255.0 is not recommended."
self.log.append(message)
if not self.valid_ip(mask):
return False
# Make sure the chunks match the possible values
chunks = mask.split(".")
for chunk in chunks:
if not int(chunk) in (0, 128, 192, 224, 240, 248, 252, 255):
return False
# The last chunk of a mask cannot be 255
if int(chunks[-1]) == 255:
return False
return True
def valid_ip_subnet(self, ip, subnet, mask):
"""
Validate an ip address based on the subnet and mask.
"""
ip = str(ip)
subnet = str(subnet)
mask = str(mask)
if not self.valid_ip(ip):
return False
if not self.valid_mask(mask):
return False
p=re.compile('\d+')
ip_chunk=p.findall(ip)
subnet_chunk=p.findall(subnet)
mask_chunk=p.findall(mask)
# Make sure the ip addresses are not reserved
if ip_chunk[3] == "0" or ip_chunk[3] == "255":
return False
# Check each chunk against subnet and mask
for i in range(len(subnet_chunk)):
if mask_chunk[i] == "255":
if ip_chunk[i] != subnet_chunk[i]:
return False
elif mask_chunk[i] == "0":
if ip_chunk[i] == "0":
return False
else:
mask_value = int(mask_chunk[i])
ip_value = int(ip_chunk[i])
subnet_value = int(subnet_chunk[i])
if i == 3:
ip_range = 254 - mask_value
if not ip_value - 1 in range(ip_range):
return False
else:
ip_range = 256 - mask_value
if not ip_value in range(ip_range):
return False
return True
def valid_mac(self, mac):
"""
Validate a mac address.
"""
mac = str(mac)
if re.match(r'^[a-f|0-9]{2}:[a-f|0-9]{2}:[a-f|0-9]{2}:[a-f|0-9]{2}:[a-f|0-9]{2}:[a-f|0-9]{2}$', mac) == None:
return False
else:
return True
| |
#!/usr/bin/env python
# This script implements the registration pipeline described in the paper:
#
# Quantification and Analysis of Large Multimodal Clinical Image Studies:
# Application to Stroke, by Sridharan, Dalca et al.
#
# For questions, please contact {rameshvs,adalca}@csail.mit.edu.
import pipebuild as pb
import os
import sys
import subprocess
import datetime
import time
cwd = os.path.dirname(os.path.abspath(__file__))
ATLAS_MODALITY = 't1'
#features_by_modality = {'dwi': ['img', 'roi'], 'flair': ['img', 'wmh_L', 'wmh_R']}
features_by_modality = {'dwi': ['img'], 'flair': ['img']}
CLOBBER_EXISTING_OUTPUTS = False
DATA_ROOT = os.path.join(pb.NFS_ROOT, 'projects/stroke')
ATLAS_BASE = os.path.join(DATA_ROOT, 'work/input/atlases/')
ATLAS_NAME = 'buckner61'
if __name__ == '__main__':
########################
### Argument parsing ###
########################
USAGE = '%s <subj> <smoothness regularization> <field regularization> <out folder>' % sys.argv[0]
if len(sys.argv != 5):
print(USAGE)
sys.exit(1)
subj = sys.argv[1]
# Regularization parameters for ANTS
regularization = float(sys.argv[2])
regularization2 = float(sys.argv[3])
# where the data lives
data_subfolder = sys.argv[4]
#############################
### Set up atlas and data ###
#############################
BASE = os.path.join(DATA_ROOT, 'processed_datasets', data_subfolder)
## Atlas
atlas = pb.Atlas(ATLAS_NAME, ATLAS_BASE)
stroke_atlas_files = {
'seg': '_seg.nii.gz', # full segmentation
'wm_region': '_seg_wm_region.nii.gz', # periventricular area for WMH
'wmh_prior': '_wmh_prior_100.nii.gz', # prior from averaging subjs
'wmh_L': '_wmh_L_average.nii.gz', # prior in left hemisphere
'wmh_R': '_wmh_R_average.nii.gz', # prior in right hemisphere
'mask': '_fixed_mask_from_seg_binary.nii.gz'} # brain mask
for (name, file_suffix) in stroke_atlas_files.iteritems():
atlas.add_file(name, file_suffix)
## Subject data
dataset = pb.Dataset(
BASE,
atlas,
# How are the inputs to the pipeline stored?
os.path.join(BASE , '{subj}/original/{modality}_1/{subj}_{modality}_{feature}.nii.gz'),
# How should intermediate files be stored?
os.path.join(BASE, '{subj}/images/{subj}_{modality}_{feature}{modifiers}.nii.gz'))
#############################
### Registration pipeline ###
#############################
t1_modifiers = '' # keeps track of what's been done so far for filenames
###### Fix problems with T1 images caused by use of Analyze format
original_t1 = dataset.get_file(subj, 't1', 'img', t1_modifiers + '_prep')
pb.InputOutputShellCommand(
"Remove header from T1",
cmdName=os.path.join(cwd, 'strip_header.py'),
input=dataset.get_original_file(subj,'t1', 'raw'),
output=original_t1,
extra_args='t1'
)
t1_modifiers += '_prep'
###### N4 bias correction of T1.
pb.N4Command("N4 bias field correction for T1",
input=dataset.get_file(subj, 't1', 'img', t1_modifiers),
output=dataset.get_file(subj, 't1', 'img', t1_modifiers + '_bcorr'))
t1_modifiers += '_bcorr'
###### Initial rigid registration
initial_affine_reg = pb.ANTSCommand(
"Initial affine registration step: atlas->subj T1",
moving=atlas.get_file('img'),
fixed=dataset.get_file(subj, 't1', 'img', t1_modifiers),
metric='MI',
method='rigid')
mask_warp = pb.ANTSWarpCommand.make_from_registration(
"Warp atlas mask into subj space using initial"
"affine subj T1->atlas registration",
atlas.get_file('mask'),
dataset.get_file(subj, 't1', 'img', t1_modifiers),
[initial_affine_reg],
['forward'])
###### Intensity matching (can't do histogram equalization)
# TODO use more robust mean-shift based mode-matching
pb.PyMatchWMCommand("Match white matter intensity values",
inFile=dataset.get_file(subj, 't1', 'img', t1_modifiers),
maskFile=mask_warp.outfiles[0],
intensity='138',
output=dataset.get_file(subj, 't1', 'img', t1_modifiers + '_matchwm'))
# pb.PyMatchWMCommand("Match white matter intensity values",
# alignedInFile=init_warp_args['output'],
# maskFile=atlas.get_file('mask'),
# inFile=dataset.get_file(subj, 't1', 'img', t1_modifiers),
# wmiSrc=atlas.get_file('img'),
# output=dataset.get_file(subj, 't1', 'img', t1_modifiers + '_matchwm'))
t1_modifiers += '_matchwm'
subject_img = dataset.get_file(subj, 't1', 'img', t1_modifiers)
###### Final atlas -> subject registration
forward_reg_full = pb.ANTSCommand("Forward atlas->subj T1 registration with "
"rough mask & CC. initialize affine w/o doing any more affine steps.",
moving=atlas.get_file('img'),
fixed=subject_img,
metric='CC',
radiusBins=4,
regularization='Gauss[%0.3f,%0.3f]' % (regularization,regularization2),
method='201x201x201',
init=initial_affine_reg.affine)
### Warp quantities of interest for visualization and future analysis
for atlas_feature in ['img', 'mask', 'seg', 'wm_region', 'wmh_L', 'wmh_R']:
warp_atlas_to_t1 = pb.ANTSWarpCommand.make_from_registration(
"Warp atlas %s into subject space using full forward"
"atlas -> t1 registration" % atlas_feature,
atlas.get_file(atlas_feature),
subject_img,
[forward_reg_full],
['forward'],
useNN=(atlas_feature in ['mask', 'seg']))
if atlas_feature == 'mask':
atlas_mask_in_t1 = warp_atlas_to_t1.outfiles[0]
pb.ANTSWarpCommand.make_from_registration(
"Warp subject into atlas space using"
"full forward atlas->subj T1 registration",
subject_img,
atlas.get_file('img'),
[forward_reg_full],
['inverse'],
useNN=False)
###### Rigid ANTS registration DWI/FLAIR --> T1
multimodal_t1_registrations = {} # dict containing the ANTSCommand objects by modality
for modality in ['flair', 'dwi']:
if not os.path.exists(dataset.get_original_file(subj, modality, 'img')):
# Quit if the subject is missing data. TODO support partial script execution
continue
for feature in features_by_modality[modality]:
if feature != 'wmh_LR':
modifiers = ''
pb.PyPadCommand("Pad %s %s" % (modality, feature),
input=dataset.get_original_file(subj, modality, feature),
output=dataset.get_file(subj, modality, feature, modifiers+'_pad'),
out_mask=dataset.get_file(subj, modality, feature, modifiers+'_padmask_seg'))
modifiers = '_pad'
reg_to_t1_init = pb.ANTSCommand("Rigid intrasubject/multimodal "
"registration of %s to T1: initialize w/o mask" % modality,
moving=dataset.get_file(subj, modality, 'img', modifiers),
fixed=dataset.get_file(subj, 't1', 'img', t1_modifiers),
metric='MI',
method='rigid')
multimodal_t1_registrations[modality] = pb.ANTSCommand("Rigid intrasubject/multimodal "
"registration of %s to T1: continue with mask" % modality,
moving=dataset.get_file(subj, modality, 'img', modifiers),
fixed=dataset.get_file(subj, 't1', 'img', t1_modifiers),
metric='MI',
method='rigid',
mask=atlas_mask_in_t1,
cont=reg_to_t1_init.affine)
### Warp subject stuff into common space for spatial analysis
for feature in features_by_modality[modality]:
warp_to_t1 = pb.ANTSWarpCommand.make_from_registration(
"Warp {} {} into t1 space".format(modality,feature),
dataset.get_file(subj, modality, feature, modifiers),
dataset.get_file(subj, 't1', 'img', t1_modifiers),
[multimodal_t1_registrations[modality]],
['forward'])
warp_to_atlas = pb.ANTSWarpCommand.make_from_registration(
"Warp {} {} into atlas space".format(modality, feature),
dataset.get_file(subj, modality, feature, modifiers),
atlas.get_file('img'),
[multimodal_t1_registrations[modality], forward_reg_full],
['forward', 'inverse'])
### Warp atlas stuff into subject space for help with segmentation
for atlas_feature in ['img', 'mask', 'seg', 'wm_region', 'wmh_prior', 'wmh_L', 'wmh_R']:
warp_from_atlas = pb.ANTSWarpCommand.make_from_registration(
"Warp atlas {} into subject {} space".format(atlas_feature, modality),
moving=atlas.get_file(atlas_feature),
reference=dataset.get_file(subj, modality, 'img', modifiers),
reg_sequence=[forward_reg_full, multimodal_t1_registrations[modality]],
inversion_sequence=['forward', 'inverse'])
#############################
# warping between dwi and flair using the indirect through-t1 registrations
if 'dwi' in multimodal_t1_registrations and 'flair' in multimodal_t1_registrations:
for feature in features_by_modality['flair']:
pb.ANTSWarpCommand.make_from_registration(
"Warp FLAIR {} into DWI using through-T1 reg".format(feature),
moving = dataset.get_file(subj, 'flair', feature, modifiers),
reference = dataset.get_file(subj, 'dwi', 'img', modifiers),
reg_sequence = [multimodal_t1_registrations['flair'], multimodal_t1_registrations['dwi']],
inversion_sequence = ['forward', 'inverse'],
useNN = (feature != 'img')) # nearest neighbor for all non-image features
for feature in features_by_modality['dwi']:
pb.ANTSWarpCommand.make_from_registration(
"Warp DWI {} into FLAIR using through-T1 reg".format(feature),
moving = dataset.get_file(subj, 'dwi', feature, modifiers),
reference = dataset.get_file(subj, 'flair', 'img', modifiers),
reg_sequence = [multimodal_t1_registrations['dwi'], multimodal_t1_registrations['flair']],
inversion_sequence = ['forward', 'inverse'],
useNN = (feature != 'img')) # nearest neighbor for all non-image features
# warp segmentations back into atlas space
if 'flair' in multimodal_t1_registrations:
warp_wmh_back = pb.ANTSWarpCommand.make_from_registration(
"Warp WMH back into atlas space",
dataset.get_file(subj, 'flair', 'wmh', '_CALL_pad-MATLAB_WM_corr'),
atlas.get_file('img'),
[multimodal_t1_registrations['flair'], forward_reg_full],
['forward', 'inverse'],
useNN=True)
for path in [os.path.join(BASE,subj,'images'),
os.path.join(BASE,subj,'images','reg'),
dataset.get_log_folder(subj)]:
try:
os.mkdir(path)
except:
pass
### Generate script file and SGE qsub file
time.sleep(1) # sleep so that timestamps don't clash, SGE isn't overloaded
timestamp = datetime.datetime.now().strftime('%y%m%d-%H%M%S')
out_script = os.path.join(dataset.get_log_folder(subj), 'pipeline.%s.sh' % timestamp)
pb.Command.generate_code(out_script, clobber_existing_outputs=CLOBBER_EXISTING_OUTPUTS)
## Prep for SGE
out_qsub = out_script + '.qsub'
os.environ['SGE_LOG_PATH'] = dataset.get_log_folder(subj)
with open(out_qsub,'w') as out_qsub_file:
subprocess.call([pb.QSUB_RUN, '-c', out_script], stdout=out_qsub_file)
print(out_qsub)
subprocess.call([pb.QSUB, out_qsub])
| |
# -*- coding: utf-8 -*-
'''
Installation of Bower Packages
==============================
These states manage the installed packages using Bower.
Note that npm, git and bower must be installed for these states to be
available, so bower states should include requisites to pkg.installed states
for the packages which provide npm and git (simply ``npm`` and ``git`` in most
cases), and npm.installed state for the package which provides bower.
Example:
.. code-block:: yaml
npm:
pkg.installed
git:
pkg.installed
bower:
npm.installed
require:
- pkg: npm
- pkg: git
underscore:
bower.installed:
- dir: /path/to/project
- require:
- npm: bower
'''
from __future__ import absolute_import
# Import salt libs
from salt.exceptions import CommandExecutionError, CommandNotFoundError
# Import 3rd-party libs
import salt.ext.six as six
def __virtual__():
'''
Only load if the bower module is available in __salt__
'''
return 'bower' if 'bower.list' in __salt__ else False
def installed(name,
dir,
pkgs=None,
user=None,
env=None):
'''
Verify that the given package is installed and is at the correct version
(if specified).
.. code-block:: yaml
underscore:
bower.installed:
- dir: /path/to/project
- user: someuser
jquery#2.0:
bower.installed:
- dir: /path/to/project
name
The package to install
dir
The target directory in which to install the package
pkgs
A list of packages to install with a single Bower invocation;
specifying this argument will ignore the ``name`` argument
user
The user to run Bower with
env
A list of environment variables to be set prior to execution. The
format is the same as the :py:func:`cmd.run <salt.states.cmd.run>`.
state function.
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
if pkgs is not None:
pkg_list = pkgs
else:
pkg_list = [name]
try:
installed_pkgs = __salt__['bower.list'](dir=dir, runas=user, env=env)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = False
ret['comment'] = 'Error looking up {0!r}: {1}'.format(name, err)
return ret
else:
installed_pkgs = dict((p, info) for p, info in
six.iteritems(installed_pkgs))
pkgs_satisfied = []
pkgs_to_install = []
for pkg in pkg_list:
pkg_name, _, pkg_ver = pkg.partition('#')
pkg_name = pkg_name.strip()
if pkg_name not in installed_pkgs:
pkgs_to_install.append(pkg)
continue
if pkg_name in installed_pkgs:
installed_pkg = installed_pkgs[pkg_name]
installed_pkg_ver = installed_pkg.get('pkgMeta').get('version')
installed_name_ver = '{0}#{1}'.format(
pkg_name,
installed_pkg_ver)
# If given an explicit version check the installed version matches.
if pkg_ver:
if installed_pkg_ver != pkg_ver:
pkgs_to_install.append(pkg)
else:
pkgs_satisfied.append(installed_name_ver)
continue
else:
pkgs_satisfied.append(installed_name_ver)
continue
if __opts__['test']:
ret['result'] = None
comment_msg = []
if pkgs_to_install:
comment_msg.append(
'Bower package(s) {0!r} are set to be installed'.format(
', '.join(pkgs_to_install)))
ret['changes'] = {'old': [], 'new': pkgs_to_install}
if pkgs_satisfied:
comment_msg.append(
'Package(s) {0!r} satisfied by {1}'.format(
', '.join(pkg_list), ', '.join(pkgs_satisfied)))
ret['comment'] = '. '.join(comment_msg)
return ret
if not pkgs_to_install:
ret['result'] = True
ret['comment'] = ('Package(s) {0!r} satisfied by {1}'.format(
', '.join(pkg_list), ', '.join(pkgs_satisfied)))
return ret
try:
cmd_args = {
'pkg': None,
'dir': dir,
'pkgs': None,
'runas': user,
'env': env,
}
if pkgs is not None:
cmd_args['pkgs'] = pkgs
else:
cmd_args['pkg'] = pkg_name
call = __salt__['bower.install'](**cmd_args)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = False
ret['comment'] = 'Error installing {0!r}: {1}'.format(
', '.join(pkg_list), err)
return ret
if call:
ret['result'] = True
ret['changes'] = {'old': [], 'new': pkgs_to_install}
ret['comment'] = 'Package(s) {0!r} successfully installed'.format(
', '.join(pkgs_to_install))
else:
ret['result'] = False
ret['comment'] = 'Could not install package(s) {0!r}'.format(
', '.join(pkg_list))
return ret
def removed(name, dir, user=None):
'''
Verify that the given package is not installed.
dir
The target directory in which to install the package
user
The user to run Bower with
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
try:
installed_pkgs = __salt__['bower.list'](dir=dir, runas=user)
except (CommandExecutionError, CommandNotFoundError) as err:
ret['result'] = False
ret['comment'] = 'Error removing {0!r}: {1}'.format(name, err)
return ret
if name not in installed_pkgs:
ret['result'] = True
ret['comment'] = 'Package {0!r} is not installed'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Package {0!r} is set to be removed!'.format(name)
return ret
try:
if __salt__['bower.uninstall'](pkg=name, dir=dir, runas=user):
ret['result'] = True
ret['changes'] = {name: 'Removed'}
ret['comment'] = 'Package {0!r} was successfully removed'.format(
name)
else:
ret['result'] = False
ret['comment'] = 'Error removing {0!r}'.format(name)
except (CommandExecutionError, CommandNotFoundError) as err:
ret['result'] = False
ret['comment'] = 'Error removing {0!r}: {1}'.format(name, err)
return ret
def bootstrap(name, user=None):
'''
Bootstraps a frontend distribution.
Will execute 'bower install' on the specified directory.
user
The user to run Bower with
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Directory {0!r} is set to be bootstrapped'.format(
name)
return ret
try:
call = __salt__['bower.install'](pkg=None, dir=name, runas=user)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = False
ret['comment'] = 'Error bootstrapping {0!r}: {1}'.format(name, err)
return ret
if not call:
ret['result'] = True
ret['comment'] = 'Directory is already bootstrapped'
return ret
ret['result'] = True
ret['changes'] = {name: 'Bootstrapped'}
ret['comment'] = 'Directory was successfully bootstrapped'
return ret
| |
# Copyright 2011 Nicholas Bray
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from util.typedispatch import *
from language.python import ast
from .. import intrinsics
from util.monkeypatch import xcollections
from util.python.calling import CallerArgs
from analysis import cpa, lifetimeanalysis
from application.program import Program
class ReadCollector(TypeDispatcher):
def __init__(self):
self.reads = set()
@dispatch(ast.leafTypes, ast.CodeParameters, ast.Local, ast.Existing, ast.Return)
def visitLeaf(self, node):
pass
@dispatch(ast.Load, ast.Store, ast.Allocate, ast.Call, ast.DirectCall)
def visitOP(self, node):
self.reads.update(node.annotation.reads.merged)
@dispatch(ast.Assign, ast.Discard)
def visitAssign(self, node):
self(node.expr)
@dispatch(ast.Suite, ast.Switch, ast.Condition, ast.TypeSwitch, ast.TypeSwitchCase, ast.While)
def visitOK(self, node):
node.visitChildren(self)
def process(self, code):
code.visitChildrenForced(self)
class ObjectInfo(object):
def __init__(self, uid):
self.uid = uid
self.objects = []
self.field = xcollections.defaultdict(set)
self.result = None
@property
def path(self):
return self.uid[0]
@property
def type(self):
return self.uid[1]
@property
def example(self):
return self.uid[2]
class TreeAnalysis(object):
def __init__(self, compiler, reads):
self.compiler = compiler
self.reads = reads
self.objectInfo = xcollections.lazydict(lambda obj: ObjectInfo(obj))
self.root = set()
self.current = set()
self.intrinsicFields = True
def handleFields(self, obj, objectInfo):
for slot in obj.slots.itervalues():
intrinsic = intrinsics.isIntrinsicSlot(slot)
slotRead = slot in self.reads
if self.intrinsicFields and intrinsic or slotRead and not intrinsic:
path = objectInfo.path
extpath = path + (slot.slotName,)
objs = self.handleSlot(extpath, slot)
objectInfo.field[slot.slotName].update(objs)
def ensureLoaded(self, example):
# HACK sometimes constant folding neglects this.
if not hasattr(example, 'type'):
self.compiler.extractor.ensureLoaded(example)
t = example.type
if not hasattr(t, 'typeinfo'):
self.compiler.extractor.ensureLoaded(t)
def getAbstractInstance(self, example):
self.ensureLoaded(example)
return example.type.typeinfo.abstractInstance
# The policy for object cloning
def objectUID(self, obj, path):
# Existing objects should not be cloned.
xtype = obj.xtype
pt = xtype.obj.pythonType()
unique = True
example = xtype.obj
if pt in (float, int):
# Merge all numeric types
path = (pt,)
unique = False
# Get the abstract instance of this type
example = self.getAbstractInstance(example)
elif xtype.obj.isConcrete():
# Keep non-numeric existing types unique
path = (obj,)
else:
pass
uid = path, pt, example
return uid, unique
def handleObject(self, obj, path):
uid, unique = self.objectUID(obj, path)
objectInfo = self.objectInfo[uid]
# Keep track of all objects that define their own subtree
if len(objectInfo.path) <= 1: self.root.add(objectInfo)
# Have we already considered this obj/path combination?
if obj not in objectInfo.objects:
objectInfo.objects.append(obj)
# Detect recursive cycles
assert obj not in self.current, obj
self.current.add(obj)
self.handleFields(obj, objectInfo)
self.current.remove(obj)
return objectInfo
def handleSlot(self, path, refs):
objs = []
for obj in refs:
objInfo = self.handleObject(obj, path)
objs.append(objInfo)
return objs
def handleLocal(self, lcl, path):
refs = lcl.annotation.references.merged
return self.handleSlot(path, refs)
def handleParam(self, param, pathname=None):
if param is None: return None
if param.isDoNotCare(): return None # TODO is this correct?
if pathname is None: pathname = param
return self.handleLocal(param, (pathname,))
def process(self, code):
codeParams = code.codeParameters()
selfparam = (codeParams.selfparam, self.handleParam(codeParams.selfparam))
params = []
# Give the self parameter a special name, so we can
# easily merge it between shaders
uniformParam = codeParams.params[0]
params.append((uniformParam, self.handleParam(uniformParam, 'uniform')))
for param in codeParams.params[1:]:
params.append((param, self.handleParam(param)))
vparam = (codeParams.vparam, self.handleParam(codeParams.vparam))
kparam = (codeParams.kparam, self.handleParam(codeParams.kparam))
return CallerArgs(selfparam, params, [], vparam, kparam, None)
def dumpObjectInfo(self, objectInfo):
print objectInfo.uid
for obj in objectInfo.objects:
print '\t', obj
print len(objectInfo.prev), len(objectInfo.next)
print
def dump(self):
for objInfo in self.objectInfo.itervalues():
self.dumpObjectInfo(objInfo)
print
print
from analysis.storegraph import storegraph
from analysis.storegraph import canonicalobjects
from util.graphalgorithim import exclusiongraph
class TreeResynthesis(object):
def __init__(self, compiler, analysis):
self.compiler = compiler
self.analysis = analysis
self.canonical = canonicalobjects.CanonicalObjects()
self.storeGraph = storegraph.StoreGraph(self.compiler.extractor, self.canonical)
self.shaderprgm = Program()
self.shaderprgm.storeGraph = self.storeGraph
self.shaderprgm.entryPoints = []
self.roots = []
self.cache = {}
def processObject(self, obj):
if obj is None: return None
if obj not in self.cache:
example = obj.example
if obj.example.isAbstract():
xtype = self.canonical.externalType(example)
else:
assert self.count[example] == 1
xtype = self.canonical.existingType(example)
if self.count[example] > 1:
xtype = self.canonical.indexedType(xtype)
self.cache[obj] = xtype
assert xtype.obj.pythonType() is not list, "lists create non-uniqueness, currently unsupported."
graphobj = self.storeGraph.regionHint.object(xtype)
graphobj.rewriteAnnotation(preexisting=True, unique=True, final=True)
for fieldName, values in obj.field.iteritems():
graphfield = graphobj.field(fieldName, self.storeGraph.regionHint)
for child in values:
childxtype = self.processObject(child)
graphfield.initializeType(childxtype)
graphfield.rewriteAnnotation(unique=True)
obj.result = xtype
result = xtype
else:
result = self.cache[obj]
return result
def countInstances(self):
count = {}
for objInfo in self.analysis.objectInfo.itervalues():
count[objInfo.example] = count.get(objInfo.example, 0)+1
return count
def translateObjs(self, objs):
if objs is None:
return None
else:
return [self.processObject(obj) for obj in objs]
def translateParam(self, tup):
param, objs = tup
xtypes = self.translateObjs(objs)
if xtypes is not None:
slotName = self.storeGraph.canonical.localName(self.code, param, None)
slot = self.storeGraph.root(slotName, self.storeGraph.regionHint)
slot.initializeTypes(xtypes)
self.roots.append(slot)
return xtypes
def process(self, code, args):
self.count = self.countInstances()
self.code = code
argobjs = args.map(self.translateParam)
if False:
print "="*60
print argobjs.selfarg
for arg in argobjs.args:
print '\t', arg
print argobjs.vargs
print argobjs.kargs
print
# Create an entry point
# The arguments for this entry points are bogus.
ep = self.shaderprgm.interface.createEntryPoint(code, None, None, None, None, None, None)
self.shaderprgm.entryPoints.append((ep, argobjs))
def buildExGraph(self):
return exclusiongraph.build(self.roots, lambda node: iter(node), lambda node: node.isSlot())
def process(compiler, *codeASTs):
with compiler.console.scope('analysis'):
rc = ReadCollector()
for code in codeASTs:
rc.process(code)
analysis = TreeAnalysis(compiler, rc.reads)
argsList = [(code, analysis.process(code)) for code in codeASTs]
with compiler.console.scope('resynthesis'):
resynthesis = TreeResynthesis(compiler, analysis)
for code, args in argsList:
resynthesis.process(code, args)
exgraph = resynthesis.buildExGraph()
prgm = resynthesis.shaderprgm
with compiler.console.scope('reanalysis'):
cpa.evaluateWithImage(compiler, prgm, 3, firstPass=False, clone=True)
lifetimeanalysis.evaluate(compiler, prgm)
# The reanalysis will clone the code and create a new copy
newcode = [ep.code for ep in prgm.interface.entryPoint]
return resynthesis.shaderprgm, newcode, exgraph, analysis.objectInfo
| |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.texttospeech_v1beta1.types import cloud_tts
from .base import TextToSpeechTransport, DEFAULT_CLIENT_INFO
from .grpc import TextToSpeechGrpcTransport
class TextToSpeechGrpcAsyncIOTransport(TextToSpeechTransport):
"""gRPC AsyncIO backend transport for TextToSpeech.
Service that implements Google Cloud Text-to-Speech API.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "texttospeech.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "texttospeech.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def list_voices(
self,
) -> Callable[
[cloud_tts.ListVoicesRequest], Awaitable[cloud_tts.ListVoicesResponse]
]:
r"""Return a callable for the list voices method over gRPC.
Returns a list of Voice supported for synthesis.
Returns:
Callable[[~.ListVoicesRequest],
Awaitable[~.ListVoicesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_voices" not in self._stubs:
self._stubs["list_voices"] = self.grpc_channel.unary_unary(
"/google.cloud.texttospeech.v1beta1.TextToSpeech/ListVoices",
request_serializer=cloud_tts.ListVoicesRequest.serialize,
response_deserializer=cloud_tts.ListVoicesResponse.deserialize,
)
return self._stubs["list_voices"]
@property
def synthesize_speech(
self,
) -> Callable[
[cloud_tts.SynthesizeSpeechRequest],
Awaitable[cloud_tts.SynthesizeSpeechResponse],
]:
r"""Return a callable for the synthesize speech method over gRPC.
Synthesizes speech synchronously: receive results
after all text input has been processed.
Returns:
Callable[[~.SynthesizeSpeechRequest],
Awaitable[~.SynthesizeSpeechResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "synthesize_speech" not in self._stubs:
self._stubs["synthesize_speech"] = self.grpc_channel.unary_unary(
"/google.cloud.texttospeech.v1beta1.TextToSpeech/SynthesizeSpeech",
request_serializer=cloud_tts.SynthesizeSpeechRequest.serialize,
response_deserializer=cloud_tts.SynthesizeSpeechResponse.deserialize,
)
return self._stubs["synthesize_speech"]
def close(self):
return self.grpc_channel.close()
__all__ = ("TextToSpeechGrpcAsyncIOTransport",)
| |
"""
1&1 Cloud Server Module
=======================
The 1&1 SaltStack cloud module allows a 1&1 server to be automatically deployed
and bootstrapped with Salt. It also has functions to create block storages and
ssh keys.
:depends: 1and1 >= 1.2.0
The module requires the 1&1 api_token to be provided. The server should also
be assigned a public LAN, a private LAN, or both along with SSH key pairs.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/oneandone.conf``:
.. code-block:: yaml
my-oneandone-config:
driver: oneandone
# The 1&1 api token
api_token: <your-token>
# SSH private key filename
ssh_private_key: /path/to/private_key
# SSH public key filename
ssh_public_key: /path/to/public_key
.. code-block:: yaml
my-oneandone-profile:
provider: my-oneandone-config
# Either provide fixed_instance_size_id or vcore, cores_per_processor, ram, and hdds.
# Size of the ID desired for the server
fixed_instance_size: S
# Total amount of processors
vcore: 2
# Number of cores per processor
cores_per_processor: 2
# RAM memory size in GB
ram: 4
# Hard disks
hdds:
-
is_main: true
size: 20
-
is_main: false
size: 20
# ID of the appliance image that will be installed on server
appliance_id: <ID>
# ID of the datacenter where the server will be created
datacenter_id: <ID>
# Description of the server
description: My server description
# Password of the server. Password must contain more than 8 characters
# using uppercase letters, numbers and other special symbols.
password: P4$$w0rD
# Power on server after creation - default True
power_on: true
# Firewall policy ID. If it is not provided, the server will assign
# the best firewall policy, creating a new one if necessary.
# If the parameter is sent with a 0 value, the server will be created with all ports blocked.
firewall_policy_id: <ID>
# IP address ID
ip_id: <ID>
# Load balancer ID
load_balancer_id: <ID>
# Monitoring policy ID
monitoring_policy_id: <ID>
Set ``deploy`` to False if Salt should not be installed on the node.
.. code-block:: yaml
my-oneandone-profile:
deploy: False
Create an SSH key
.. code-block:: bash
sudo salt-cloud -f create_ssh_key my-oneandone-config name='SaltTest' description='SaltTestDescription'
Create a block storage
.. code-block:: bash
sudo salt-cloud -f create_block_storage my-oneandone-config name='SaltTest2'
description='SaltTestDescription' size=50 datacenter_id='5091F6D8CBFEF9C26ACE957C652D5D49'
"""
import logging
import os
import pprint
import time
import salt.config as config
import salt.utils.cloud
import salt.utils.files
import salt.utils.stringutils
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudExecutionFailure,
SaltCloudExecutionTimeout,
SaltCloudNotFound,
SaltCloudSystemExit,
)
try:
# pylint: disable=no-name-in-module
from oneandone.client import (
OneAndOneService,
Server,
Hdd,
BlockStorage,
SshKey,
)
# pylint: enable=no-name-in-module
HAS_ONEANDONE = True
except ImportError:
HAS_ONEANDONE = False
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = "oneandone"
# Only load in this module if the 1&1 configurations are in place
def __virtual__():
"""
Check for 1&1 configurations.
"""
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
return __virtualname__
def _get_active_provider_name():
try:
return __active_provider_name__.value()
except AttributeError:
return __active_provider_name__
def get_configured_provider():
"""
Return the first configured instance.
"""
return config.is_provider_configured(
__opts__, _get_active_provider_name() or __virtualname__, ("api_token",)
)
def get_dependencies():
"""
Warn if dependencies are not met.
"""
return config.check_driver_dependencies(
__virtualname__, {"oneandone": HAS_ONEANDONE}
)
def get_conn():
"""
Return a conn object for the passed VM data
"""
return OneAndOneService(
api_token=config.get_cloud_config_value(
"api_token", get_configured_provider(), __opts__, search_global=False
)
)
def get_size(vm_):
"""
Return the VM's size object
"""
vm_size = config.get_cloud_config_value(
"fixed_instance_size", vm_, __opts__, default=None, search_global=False
)
sizes = avail_sizes()
if not vm_size:
size = next((item for item in sizes if item["name"] == "S"), None)
return size
size = next(
(item for item in sizes if item["name"] == vm_size or item["id"] == vm_size),
None,
)
if size:
return size
raise SaltCloudNotFound(
"The specified size, '{}', could not be found.".format(vm_size)
)
def get_image(vm_):
"""
Return the image object to use
"""
vm_image = config.get_cloud_config_value("image", vm_, __opts__).encode(
"ascii", "salt-cloud-force-ascii"
)
images = avail_images()
for key, value in images.items():
if vm_image and vm_image in (images[key]["id"], images[key]["name"]):
return images[key]
raise SaltCloudNotFound(
"The specified image, '{}', could not be found.".format(vm_image)
)
def avail_locations(conn=None, call=None):
"""
List available locations/datacenters for 1&1
"""
if call == "action":
raise SaltCloudSystemExit(
"The avail_locations function must be called with "
"-f or --function, or with the --list-locations option"
)
datacenters = []
if not conn:
conn = get_conn()
for datacenter in conn.list_datacenters():
datacenters.append({datacenter["country_code"]: datacenter})
return {"Locations": datacenters}
def create_block_storage(kwargs=None, call=None):
"""
Create a block storage
"""
if call == "action":
raise SaltCloudSystemExit(
"The avail_locations function must be called with "
"-f or --function, or with the --list-locations option"
)
conn = get_conn()
# Assemble the composite block storage object.
block_storage = _get_block_storage(kwargs)
data = conn.create_block_storage(block_storage=block_storage)
return {"BlockStorage": data}
def _get_block_storage(kwargs):
"""
Construct a block storage instance from passed arguments
"""
if kwargs is None:
kwargs = {}
block_storage_name = kwargs.get("name", None)
block_storage_size = kwargs.get("size", None)
block_storage_description = kwargs.get("description", None)
datacenter_id = kwargs.get("datacenter_id", None)
server_id = kwargs.get("server_id", None)
block_storage = BlockStorage(name=block_storage_name, size=block_storage_size)
if block_storage_description:
block_storage.description = block_storage_description
if datacenter_id:
block_storage.datacenter_id = datacenter_id
if server_id:
block_storage.server_id = server_id
return block_storage
def _get_ssh_key(kwargs):
"""
Construct an SshKey instance from passed arguments
"""
ssh_key_name = kwargs.get("name", None)
ssh_key_description = kwargs.get("description", None)
public_key = kwargs.get("public_key", None)
return SshKey(
name=ssh_key_name, description=ssh_key_description, public_key=public_key
)
def create_ssh_key(kwargs=None, call=None):
"""
Create an ssh key
"""
if call == "action":
raise SaltCloudSystemExit(
"The avail_locations function must be called with "
"-f or --function, or with the --list-locations option"
)
conn = get_conn()
# Assemble the composite SshKey object.
ssh_key = _get_ssh_key(kwargs)
data = conn.create_ssh_key(ssh_key=ssh_key)
return {"SshKey": data}
def avail_images(conn=None, call=None):
"""
Return a list of the server appliances that are on the provider
"""
if call == "action":
raise SaltCloudSystemExit(
"The avail_images function must be called with "
"-f or --function, or with the --list-images option"
)
if not conn:
conn = get_conn()
ret = {}
for appliance in conn.list_appliances():
ret[appliance["name"]] = appliance
return ret
def avail_sizes(call=None):
"""
Return a dict of all available VM sizes on the cloud provider with
relevant data.
"""
if call == "action":
raise SaltCloudSystemExit(
"The avail_sizes function must be called with "
"-f or --function, or with the --list-sizes option"
)
conn = get_conn()
sizes = conn.fixed_server_flavors()
return sizes
def script(vm_):
"""
Return the script deployment object
"""
return salt.utils.cloud.os_script(
config.get_cloud_config_value("script", vm_, __opts__),
vm_,
__opts__,
salt.utils.cloud.salt_config_to_yaml(
salt.utils.cloud.minion_config(__opts__, vm_)
),
)
def list_nodes(conn=None, call=None):
"""
Return a list of VMs that are on the provider
"""
if call == "action":
raise SaltCloudSystemExit(
"The list_nodes function must be called with -f or --function."
)
if not conn:
conn = get_conn()
ret = {}
nodes = conn.list_servers()
for node in nodes:
public_ips = []
private_ips = []
ret = {}
size = node.get("hardware").get("fixed_instance_size_id", "Custom size")
if node.get("private_networks"):
for private_ip in node["private_networks"]:
private_ips.append(private_ip)
if node.get("ips"):
for public_ip in node["ips"]:
public_ips.append(public_ip["ip"])
server = {
"id": node["id"],
"image": node["image"]["id"],
"size": size,
"state": node["status"]["state"],
"private_ips": private_ips,
"public_ips": public_ips,
}
ret[node["name"]] = server
return ret
def list_nodes_full(conn=None, call=None):
"""
Return a list of the VMs that are on the provider, with all fields
"""
if call == "action":
raise SaltCloudSystemExit(
"The list_nodes_full function must be called with -f or --function."
)
if not conn:
conn = get_conn()
ret = {}
nodes = conn.list_servers()
for node in nodes:
ret[node["name"]] = node
return ret
def list_nodes_select(conn=None, call=None):
"""
Return a list of the VMs that are on the provider, with select fields
"""
if not conn:
conn = get_conn()
return salt.utils.cloud.list_nodes_select(
list_nodes_full(conn, "function"),
__opts__["query.selection"],
call,
)
def show_instance(name, call=None):
"""
Show the details from the provider concerning an instance
"""
if call != "action":
raise SaltCloudSystemExit(
"The show_instance action must be called with -a or --action."
)
nodes = list_nodes_full()
__utils__["cloud.cache_node"](nodes[name], _get_active_provider_name(), __opts__)
return nodes[name]
def _get_server(vm_):
"""
Construct server instance from cloud profile config
"""
description = config.get_cloud_config_value(
"description", vm_, __opts__, default=None, search_global=False
)
ssh_key = load_public_key(vm_)
vcore = None
cores_per_processor = None
ram = None
fixed_instance_size_id = None
if "fixed_instance_size" in vm_:
fixed_instance_size = get_size(vm_)
fixed_instance_size_id = fixed_instance_size["id"]
elif vm_["vcore"] and vm_["cores_per_processor"] and vm_["ram"] and vm_["hdds"]:
vcore = config.get_cloud_config_value(
"vcore", vm_, __opts__, default=None, search_global=False
)
cores_per_processor = config.get_cloud_config_value(
"cores_per_processor", vm_, __opts__, default=None, search_global=False
)
ram = config.get_cloud_config_value(
"ram", vm_, __opts__, default=None, search_global=False
)
else:
raise SaltCloudConfigError(
"'fixed_instance_size' or 'vcore',"
"'cores_per_processor', 'ram', and 'hdds'"
"must be provided."
)
appliance_id = config.get_cloud_config_value(
"appliance_id", vm_, __opts__, default=None, search_global=False
)
password = config.get_cloud_config_value(
"password", vm_, __opts__, default=None, search_global=False
)
firewall_policy_id = config.get_cloud_config_value(
"firewall_policy_id", vm_, __opts__, default=None, search_global=False
)
ip_id = config.get_cloud_config_value(
"ip_id", vm_, __opts__, default=None, search_global=False
)
load_balancer_id = config.get_cloud_config_value(
"load_balancer_id", vm_, __opts__, default=None, search_global=False
)
monitoring_policy_id = config.get_cloud_config_value(
"monitoring_policy_id", vm_, __opts__, default=None, search_global=False
)
datacenter_id = config.get_cloud_config_value(
"datacenter_id", vm_, __opts__, default=None, search_global=False
)
private_network_id = config.get_cloud_config_value(
"private_network_id", vm_, __opts__, default=None, search_global=False
)
power_on = config.get_cloud_config_value(
"power_on", vm_, __opts__, default=True, search_global=False
)
public_key = config.get_cloud_config_value(
"public_key_ids", vm_, __opts__, default=True, search_global=False
)
# Contruct server object
return Server(
name=vm_["name"],
description=description,
fixed_instance_size_id=fixed_instance_size_id,
vcore=vcore,
cores_per_processor=cores_per_processor,
ram=ram,
appliance_id=appliance_id,
password=password,
power_on=power_on,
firewall_policy_id=firewall_policy_id,
ip_id=ip_id,
load_balancer_id=load_balancer_id,
monitoring_policy_id=monitoring_policy_id,
datacenter_id=datacenter_id,
rsa_key=ssh_key,
private_network_id=private_network_id,
public_key=public_key,
)
def _get_hdds(vm_):
"""
Construct VM hdds from cloud profile config
"""
_hdds = config.get_cloud_config_value(
"hdds", vm_, __opts__, default=None, search_global=False
)
hdds = []
for hdd in _hdds:
hdds.append(Hdd(size=hdd["size"], is_main=hdd["is_main"]))
return hdds
def create(vm_):
"""
Create a single VM from a data dict
"""
try:
# Check for required profile parameters before sending any API calls.
if (
vm_["profile"]
and config.is_profile_configured(
__opts__, (_get_active_provider_name() or "oneandone"), vm_["profile"]
)
is False
):
return False
except AttributeError:
pass
data = None
conn = get_conn()
hdds = []
# Assemble the composite server object.
server = _get_server(vm_)
if not bool(server.specs["hardware"]["fixed_instance_size_id"]):
# Assemble the hdds object.
hdds = _get_hdds(vm_)
__utils__["cloud.fire_event"](
"event",
"requesting instance",
"salt/cloud/{}/requesting".format(vm_["name"]),
args={"name": vm_["name"]},
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
try:
data = conn.create_server(server=server, hdds=hdds)
_wait_for_completion(conn, get_wait_timeout(vm_), data["id"])
except Exception as exc: # pylint: disable=W0703
log.error(
"Error creating %s on 1and1\n\n"
"The following exception was thrown by the 1and1 library "
"when trying to run the initial deployment: \n%s",
vm_["name"],
exc,
exc_info_on_loglevel=logging.DEBUG,
)
return False
vm_["server_id"] = data["id"]
password = data["first_password"]
def __query_node_data(vm_, data):
"""
Query node data until node becomes available.
"""
running = False
try:
data = show_instance(vm_["name"], "action")
if not data:
return False
log.debug(
"Loaded node data for %s:\nname: %s\nstate: %s",
vm_["name"],
pprint.pformat(data["name"]),
data["status"]["state"],
)
except Exception as err: # pylint: disable=broad-except
log.error(
"Failed to get nodes list: %s",
err,
# Show the trackback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG,
)
# Trigger a failure in the wait for IP function
return False
running = data["status"]["state"].lower() == "powered_on"
if not running:
# Still not running, trigger another iteration
return
vm_["ssh_host"] = data["ips"][0]["ip"]
return data
try:
data = salt.utils.cloud.wait_for_ip(
__query_node_data,
update_args=(vm_, data),
timeout=config.get_cloud_config_value(
"wait_for_ip_timeout", vm_, __opts__, default=10 * 60
),
interval=config.get_cloud_config_value(
"wait_for_ip_interval", vm_, __opts__, default=10
),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_["name"])
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(str(exc.message))
log.debug("VM is now running")
log.info("Created Cloud VM %s", vm_)
log.debug("%s VM creation details:\n%s", vm_, pprint.pformat(data))
__utils__["cloud.fire_event"](
"event",
"created instance",
"salt/cloud/{}/created".format(vm_["name"]),
args={
"name": vm_["name"],
"profile": vm_["profile"],
"provider": vm_["driver"],
},
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
if "ssh_host" in vm_:
vm_["password"] = password
vm_["key_filename"] = get_key_filename(vm_)
ret = __utils__["cloud.bootstrap"](vm_, __opts__)
ret.update(data)
return ret
else:
raise SaltCloudSystemExit("A valid IP address was not found.")
def destroy(name, call=None):
"""
destroy a server by name
:param name: name given to the server
:param call: call value in this case is 'action'
:return: array of booleans , true if successfully stopped and true if
successfully removed
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
"""
if call == "function":
raise SaltCloudSystemExit(
"The destroy action must be called with -d, --destroy, -a or --action."
)
__utils__["cloud.fire_event"](
"event",
"destroying instance",
"salt/cloud/{}/destroying".format(name),
args={"name": name},
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
conn = get_conn()
node = get_node(conn, name)
conn.delete_server(server_id=node["id"])
__utils__["cloud.fire_event"](
"event",
"destroyed instance",
"salt/cloud/{}/destroyed".format(name),
args={"name": name},
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
if __opts__.get("update_cachedir", False) is True:
__utils__["cloud.delete_minion_cachedir"](
name, _get_active_provider_name().split(":")[0], __opts__
)
return True
def reboot(name, call=None):
"""
reboot a server by name
:param name: name given to the machine
:param call: call value in this case is 'action'
:return: true if successful
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
"""
conn = get_conn()
node = get_node(conn, name)
conn.modify_server_status(server_id=node["id"], action="REBOOT")
return True
def stop(name, call=None):
"""
stop a server by name
:param name: name given to the machine
:param call: call value in this case is 'action'
:return: true if successful
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
"""
conn = get_conn()
node = get_node(conn, name)
conn.stop_server(server_id=node["id"])
return True
def start(name, call=None):
"""
start a server by name
:param name: name given to the machine
:param call: call value in this case is 'action'
:return: true if successful
CLI Example:
.. code-block:: bash
salt-cloud -a start vm_name
"""
conn = get_conn()
node = get_node(conn, name)
conn.start_server(server_id=node["id"])
return True
def get_node(conn, name):
"""
Return a node for the named VM
"""
for node in conn.list_servers(per_page=1000):
if node["name"] == name:
return node
def get_key_filename(vm_):
"""
Check SSH private key file and return absolute path if exists.
"""
key_filename = config.get_cloud_config_value(
"ssh_private_key", vm_, __opts__, search_global=False, default=None
)
if key_filename is not None:
key_filename = os.path.expanduser(key_filename)
if not os.path.isfile(key_filename):
raise SaltCloudConfigError(
"The defined ssh_private_key '{}' does not exist".format(key_filename)
)
return key_filename
def load_public_key(vm_):
"""
Load the public key file if exists.
"""
public_key_filename = config.get_cloud_config_value(
"ssh_public_key", vm_, __opts__, search_global=False, default=None
)
if public_key_filename is not None:
public_key_filename = os.path.expanduser(public_key_filename)
if not os.path.isfile(public_key_filename):
raise SaltCloudConfigError(
"The defined ssh_public_key '{}' does not exist".format(
public_key_filename
)
)
with salt.utils.files.fopen(public_key_filename, "r") as public_key:
key = salt.utils.stringutils.to_unicode(public_key.read().replace("\n", ""))
return key
def get_wait_timeout(vm_):
"""
Return the wait_for_timeout for resource provisioning.
"""
return config.get_cloud_config_value(
"wait_for_timeout", vm_, __opts__, default=15 * 60, search_global=False
)
def _wait_for_completion(conn, wait_timeout, server_id):
"""
Poll request status until resource is provisioned.
"""
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time():
time.sleep(5)
server = conn.get_server(server_id)
server_state = server["status"]["state"].lower()
if server_state == "powered_on":
return
elif server_state == "failed":
raise Exception("Server creation failed for {}".format(server_id))
elif server_state in ("active", "enabled", "deploying", "configuring"):
continue
else:
raise Exception("Unknown server state {}".format(server_state))
raise Exception(
"Timed out waiting for server create completion for {}".format(server_id)
)
| |
# Copyright 2015 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from rally.common.i18n import _
from rally.common import logging
from rally.common import utils
from rally.common import validation
from rally import consts as rally_consts
from rally import exceptions
from rally.plugins.openstack.cleanup import manager as resource_manager
from rally.plugins.openstack.context.manila import consts
from rally.plugins.openstack.scenarios.manila import utils as manila_utils
from rally.task import context
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
CONTEXT_NAME = consts.SHARE_NETWORKS_CONTEXT_NAME
SHARE_NETWORKS_ARG_DESCR = """
This context arg will be used only when context arg "use_share_networks" is
set to True.
If context arg 'share_networks' has values then they will be used else share
networks will be autocreated - one for each tenant network. If networks do not
exist then will be created one share network for each tenant without network
data.
Expected value is dict of lists where tenant Name or ID is key and list of
share_network Names or IDs is value. Example:
.. code-block:: json
"context": {
"manila_share_networks": {
"use_share_networks": true,
"share_networks": {
"tenant_1_name_or_id": ["share_network_1_name_or_id",
"share_network_2_name_or_id"],
"tenant_2_name_or_id": ["share_network_3_name_or_id"]}
}
}
Also, make sure that all 'existing users' in appropriate registered deployment
have share networks if its usage is enabled, else Rally will randomly take
users that does not satisfy criteria.
"""
@validation.add("required_platform", platform="openstack", users=True)
@context.configure(name=CONTEXT_NAME, platform="openstack", order=450)
class ShareNetworks(context.Context):
"""This context creates share networks for Manila project."""
CONFIG_SCHEMA = {
"type": "object",
"$schema": rally_consts.JSON_SCHEMA,
"properties": {
"use_share_networks": {
"type": "boolean",
"description": "specifies whether manila should use share "
"networks for share creation or not."},
"share_networks": {
"type": "object",
"description": SHARE_NETWORKS_ARG_DESCR
},
},
"additionalProperties": False
}
DEFAULT_CONFIG = {
"use_share_networks": False,
"share_networks": {},
}
def _setup_for_existing_users(self):
if (self.config["use_share_networks"] and
not self.config["share_networks"]):
msg = _("Usage of share networks was enabled but for deployment "
"with existing users share networks also should be "
"specified via arg 'share_networks'")
raise exceptions.ContextSetupFailure(
ctx_name=self.get_name(), msg=msg)
for tenant_name_or_id, share_networks in self.config[
"share_networks"].items():
# Verify project existence
for tenant in self.context["tenants"].values():
if tenant_name_or_id in (tenant["id"], tenant["name"]):
tenant_id = tenant["id"]
existing_user = None
for user in self.context["users"]:
if user["tenant_id"] == tenant_id:
existing_user = user
break
break
else:
msg = _("Provided tenant Name or ID '%s' was not found in "
"existing tenants.") % tenant_name_or_id
raise exceptions.ContextSetupFailure(
ctx_name=self.get_name(), msg=msg)
self.context["tenants"][tenant_id][CONTEXT_NAME] = {}
self.context["tenants"][tenant_id][CONTEXT_NAME][
"share_networks"] = []
manila_scenario = manila_utils.ManilaScenario({
"user": existing_user,
"config": {
"api_versions": self.context["config"].get(
"api_versions", [])}
})
existing_sns = manila_scenario._list_share_networks(
detailed=False, search_opts={"project_id": tenant_id})
for sn_name_or_id in share_networks:
# Verify share network existence
for sn in existing_sns:
if sn_name_or_id in (sn.id, sn.name):
break
else:
msg = _("Specified share network '%(sn)s' does not "
"exist for tenant '%(tenant_id)s'") % {
"sn": sn_name_or_id, "tenant_id": tenant_id}
raise exceptions.ContextSetupFailure(
ctx_name=self.get_name(), msg=msg)
# Set share network for project
self.context["tenants"][tenant_id][CONTEXT_NAME][
"share_networks"].append(sn.to_dict())
def _setup_for_autocreated_users(self):
# Create share network for each network of tenant
for user, tenant_id in (utils.iterate_per_tenants(
self.context.get("users", []))):
networks = self.context["tenants"][tenant_id].get("networks")
manila_scenario = manila_utils.ManilaScenario({
"task": self.task,
"owner_id": self.get_owner_id(),
"user": user,
"config": {
"api_versions": self.context["config"].get(
"api_versions", [])}
})
manila_scenario.RESOURCE_NAME_FORMAT = self.RESOURCE_NAME_FORMAT
self.context["tenants"][tenant_id][CONTEXT_NAME] = {
"share_networks": []}
data = {}
def _setup_share_network(tenant_id, data):
share_network = manila_scenario._create_share_network(
**data).to_dict()
self.context["tenants"][tenant_id][CONTEXT_NAME][
"share_networks"].append(share_network)
for ss in self.context["tenants"][tenant_id].get(
consts.SECURITY_SERVICES_CONTEXT_NAME, {}).get(
"security_services", []):
manila_scenario._add_security_service_to_share_network(
share_network["id"], ss["id"])
if networks:
for network in networks:
if network.get("cidr"):
data["nova_net_id"] = network["id"]
elif network.get("subnets"):
data["neutron_net_id"] = network["id"]
data["neutron_subnet_id"] = network["subnets"][0]
else:
LOG.warning(_(
"Can not determine network service provider. "
"Share network will have no data."))
_setup_share_network(tenant_id, data)
else:
_setup_share_network(tenant_id, data)
@logging.log_task_wrapper(LOG.info, _("Enter context: `%s`")
% CONTEXT_NAME)
def setup(self):
self.context[CONTEXT_NAME] = {}
if not self.config["use_share_networks"]:
pass
elif self.context["config"].get("existing_users"):
self._setup_for_existing_users()
else:
self._setup_for_autocreated_users()
@logging.log_task_wrapper(LOG.info, _("Exit context: `%s`") % CONTEXT_NAME)
def cleanup(self):
if (not self.context["config"].get("existing_users") or
self.config["use_share_networks"]):
resource_manager.cleanup(
names=["manila.share_networks"],
users=self.context.get("users", []),
superclass=self.__class__,
api_versions=self.context["config"].get("api_versions"),
task_id=self.get_owner_id())
else:
# NOTE(vponomaryov): assume that share networks were not created
# by test run.
return
| |
# -*- coding: utf-8 -*-
"""
sockjs.tornado.router
~~~~~~~~~~~~~~~~~~~~~
SockJS protocol router implementation.
"""
from tornado import ioloop, version_info
from sockjs.tornado import transports, session, sessioncontainer, static, stats, proto
DEFAULT_SETTINGS = {
# Sessions check interval in seconds
'session_check_interval': 1,
# Session expiration in seconds
'disconnect_delay': 5,
# Heartbeat time in seconds. Do not change this value unless
# you absolutely sure that new value will work.
'heartbeat_delay': 25,
# Enabled protocols
'disabled_transports': [],
# SockJS location
'sockjs_url': 'https://cdn.jsdelivr.net/sockjs/0.3/sockjs.min.js',
# Max response body size
'response_limit': 128 * 1024,
# Enable or disable JSESSIONID cookie handling
'jsessionid': True,
# Should sockjs-tornado flush messages immediately or queue then and
# flush on next ioloop tick
'immediate_flush': True,
# Enable or disable Nagle for persistent transports
'disable_nagle': True,
# Enable IP checks for polling transports. If enabled, all subsequent
# polling calls should be from the same IP address.
'verify_ip': True,
# list of allowed origins for websocket connections
# or "*" - accept all websocket connections
'websocket_allow_origin': "*"
}
GLOBAL_HANDLERS = [
('xhr_send', transports.XhrSendHandler),
('jsonp_send', transports.JSONPSendHandler)
]
TRANSPORTS = {
'websocket': transports.WebSocketTransport,
'xhr': transports.XhrPollingTransport,
'xhr_streaming': transports.XhrStreamingTransport,
'jsonp': transports.JSONPTransport,
'eventsource': transports.EventSourceTransport,
'htmlfile': transports.HtmlFileTransport
}
STATIC_HANDLERS = {
'/chunking_test': static.ChunkingTestHandler,
'/info': static.InfoHandler,
'/iframe[0-9-.a-z_]*.html': static.IFrameHandler,
'/websocket': transports.RawWebSocketTransport,
'/?': static.GreetingsHandler
}
class SockJSRouter(object):
"""SockJS protocol router"""
def __init__(self,
connection,
prefix='',
user_settings=dict(),
io_loop=None):
"""Constructor.
`connection`
SockJSConnection class
`prefix`
Connection prefix
`user_settings`
Settings dictionary
`io_loop`
Optional IOLoop instance
"""
# TODO: Version check
if version_info[0] < 2:
raise Exception('sockjs-tornado requires Tornado 2.0 or higher.')
# Store connection class
self._connection = connection
# Initialize io_loop
self.io_loop = io_loop or ioloop.IOLoop.instance()
# Settings
self.settings = DEFAULT_SETTINGS.copy()
if user_settings:
self.settings.update(user_settings)
self.websockets_enabled = 'websocket' not in self.settings['disabled_transports']
self.cookie_needed = self.settings['jsessionid']
# Sessions
self._sessions = sessioncontainer.SessionContainer()
check_interval = self.settings['session_check_interval'] * 1000
self._sessions_cleanup = ioloop.PeriodicCallback(self._sessions.expire,
check_interval,
self.io_loop)
self._sessions_cleanup.start()
# Stats
self.stats = stats.StatsCollector(self.io_loop)
# Initialize URLs
base = prefix + r'/[^/.]+/(?P<session_id>[^/.]+)'
# Generate global handler URLs
self._transport_urls = [('%s/%s$' % (base, p[0]), p[1], dict(server=self))
for p in GLOBAL_HANDLERS]
for k, v in TRANSPORTS.items():
if k in self.settings['disabled_transports']:
continue
# Only version 1 is supported
self._transport_urls.append(
(r'%s/%s$' % (base, k),
v,
dict(server=self))
)
# Generate static URLs
self._transport_urls.extend([('%s%s' % (prefix, k), v, dict(server=self))
for k, v in STATIC_HANDLERS.items()])
@property
def urls(self):
"""List of the URLs to be added to the Tornado application"""
return self._transport_urls
def apply_routes(self, routes):
"""Feed list of the URLs to the routes list. Returns list"""
routes.extend(self._transport_urls)
return routes
def create_session(self, session_id, register=True):
"""Creates new session object and returns it.
`request`
Request that created the session. Will be used to get query string
parameters and cookies
`register`
Should be session registered in a storage. Websockets don't
need it.
"""
# TODO: Possible optimization here for settings.get
s = session.Session(self._connection,
self,
session_id,
self.settings.get('disconnect_delay')
)
if register:
self._sessions.add(s)
return s
def get_session(self, session_id):
"""Get session by session id
`session_id`
Session id
"""
return self._sessions.get(session_id)
def get_connection_class(self):
"""Return associated connection class"""
return self._connection
# Broadcast helper
def broadcast(self, clients, msg):
"""Optimized `broadcast` implementation. Depending on type of the session, will json-encode
message once and will call either `send_message` or `send_jsonifed`.
`clients`
Clients iterable
`msg`
Message to send
"""
json_msg = None
count = 0
for c in clients:
sess = c.session
if not sess.is_closed:
if sess.send_expects_json:
if json_msg is None:
json_msg = proto.json_encode(msg)
sess.send_jsonified(json_msg, False)
else:
sess.send_message(msg, stats=False)
count += 1
self.stats.on_pack_sent(count)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Shared functions and classes for tfdbg command-line interface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import six
from tensorflow.python.debug.cli import command_parser
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import tensor_format
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
RL = debugger_cli_common.RichLine
# Default threshold number of elements above which ellipses will be used
# when printing the value of the tensor.
DEFAULT_NDARRAY_DISPLAY_THRESHOLD = 2000
COLOR_BLACK = "black"
COLOR_BLUE = "blue"
COLOR_CYAN = "cyan"
COLOR_GRAY = "gray"
COLOR_GREEN = "green"
COLOR_MAGENTA = "magenta"
COLOR_RED = "red"
COLOR_WHITE = "white"
COLOR_YELLOW = "yellow"
TIME_UNIT_US = "us"
TIME_UNIT_MS = "ms"
TIME_UNIT_S = "s"
TIME_UNITS = [TIME_UNIT_US, TIME_UNIT_MS, TIME_UNIT_S]
def bytes_to_readable_str(num_bytes, include_b=False):
"""Generate a human-readable string representing number of bytes.
The units B, kB, MB and GB are used.
Args:
num_bytes: (`int` or None) Number of bytes.
include_b: (`bool`) Include the letter B at the end of the unit.
Returns:
(`str`) A string representing the number of bytes in a human-readable way,
including a unit at the end.
"""
if num_bytes is None:
return str(num_bytes)
if num_bytes < 1024:
result = "%d" % num_bytes
elif num_bytes < 1048576:
result = "%.2fk" % (num_bytes / 1024.0)
elif num_bytes < 1073741824:
result = "%.2fM" % (num_bytes / 1048576.0)
else:
result = "%.2fG" % (num_bytes / 1073741824.0)
if include_b:
result += "B"
return result
def time_to_readable_str(value_us, force_time_unit=None):
"""Convert time value to human-readable string.
Args:
value_us: time value in microseconds.
force_time_unit: force the output to use the specified time unit. Must be
in TIME_UNITS.
Returns:
Human-readable string representation of the time value.
Raises:
ValueError: if force_time_unit value is not in TIME_UNITS.
"""
if not value_us:
return "0"
if force_time_unit:
if force_time_unit not in TIME_UNITS:
raise ValueError("Invalid time unit: %s" % force_time_unit)
order = TIME_UNITS.index(force_time_unit)
time_unit = force_time_unit
return "{:.10g}{}".format(value_us / math.pow(10.0, 3*order), time_unit)
else:
order = min(len(TIME_UNITS) - 1, int(math.log(value_us, 10) / 3))
time_unit = TIME_UNITS[order]
return "{:.3g}{}".format(value_us / math.pow(10.0, 3*order), time_unit)
def parse_ranges_highlight(ranges_string):
"""Process ranges highlight string.
Args:
ranges_string: (str) A string representing a numerical range of a list of
numerical ranges. See the help info of the -r flag of the print_tensor
command for more details.
Returns:
An instance of tensor_format.HighlightOptions, if range_string is a valid
representation of a range or a list of ranges.
"""
ranges = None
def ranges_filter(x):
r = np.zeros(x.shape, dtype=bool)
for range_start, range_end in ranges:
r = np.logical_or(r, np.logical_and(x >= range_start, x <= range_end))
return r
if ranges_string:
ranges = command_parser.parse_ranges(ranges_string)
return tensor_format.HighlightOptions(
ranges_filter, description=ranges_string)
else:
return None
def format_tensor(tensor,
tensor_name,
np_printoptions,
print_all=False,
tensor_slicing=None,
highlight_options=None):
"""Generate formatted str to represent a tensor or its slices.
Args:
tensor: (numpy ndarray) The tensor value.
tensor_name: (str) Name of the tensor, e.g., the tensor's debug watch key.
np_printoptions: (dict) Numpy tensor formatting options.
print_all: (bool) Whether the tensor is to be displayed in its entirety,
instead of printing ellipses, even if its number of elements exceeds
the default numpy display threshold.
(Note: Even if this is set to true, the screen output can still be cut
off by the UI frontend if it consist of more lines than the frontend
can handle.)
tensor_slicing: (str or None) Slicing of the tensor, e.g., "[:, 1]". If
None, no slicing will be performed on the tensor.
highlight_options: (tensor_format.HighlightOptions) options to highlight
elements of the tensor. See the doc of tensor_format.format_tensor()
for more details.
Returns:
(str) Formatted str representing the (potentially sliced) tensor.
"""
if tensor_slicing:
# Validate the indexing.
value = command_parser.evaluate_tensor_slice(tensor, tensor_slicing)
sliced_name = tensor_name + tensor_slicing
else:
value = tensor
sliced_name = tensor_name
if print_all:
np_printoptions["threshold"] = value.size
else:
np_printoptions["threshold"] = DEFAULT_NDARRAY_DISPLAY_THRESHOLD
return tensor_format.format_tensor(
value,
sliced_name,
include_metadata=True,
np_printoptions=np_printoptions,
highlight_options=highlight_options)
def error(msg):
"""Generate a RichTextLines output for error.
Args:
msg: (str) The error message.
Returns:
(debugger_cli_common.RichTextLines) A representation of the error message
for screen output.
"""
return debugger_cli_common.rich_text_lines_from_rich_line_list([
RL("ERROR: " + msg, COLOR_RED)])
def _get_fetch_name(fetch):
"""Obtain the name or string representation of a fetch.
Args:
fetch: The fetch in question.
Returns:
If the attribute 'name' is available, return the name. Otherwise, return
str(fetch).
"""
return fetch.name if hasattr(fetch, "name") else str(fetch)
def _get_fetch_names(fetches):
"""Get a flattened list of the names in run() call fetches.
Args:
fetches: Fetches of the `Session.run()` call. It maybe a Tensor, an
Operation or a Variable. It may also be nested lists, tuples or
dicts. See doc of `Session.run()` for more details.
Returns:
(list of str) A flattened list of fetch names from `fetches`.
"""
lines = []
if isinstance(fetches, (list, tuple)):
for fetch in fetches:
lines.extend(_get_fetch_names(fetch))
elif isinstance(fetches, dict):
for key in fetches:
lines.extend(_get_fetch_names(fetches[key]))
else:
# This ought to be a Tensor, an Operation or a Variable, for which the name
# attribute should be available. (Bottom-out condition of the recursion.)
lines.append(_get_fetch_name(fetches))
return lines
def _recommend_command(command, description, indent=2, create_link=False):
"""Generate a RichTextLines object that describes a recommended command.
Args:
command: (str) The command to recommend.
description: (str) A description of what the command does.
indent: (int) How many spaces to indent in the beginning.
create_link: (bool) Whether a command link is to be applied to the command
string.
Returns:
(RichTextLines) Formatted text (with font attributes) for recommending the
command.
"""
indent_str = " " * indent
if create_link:
font_attr = [debugger_cli_common.MenuItem("", command), "bold"]
else:
font_attr = "bold"
lines = [RL(indent_str) + RL(command, font_attr) + ":",
indent_str + " " + description]
return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)
def get_tfdbg_logo():
"""Make an ASCII representation of the tfdbg logo."""
lines = [
"",
"TTTTTT FFFF DDD BBBB GGG ",
" TT F D D B B G ",
" TT FFF D D BBBB G GG",
" TT F D D B B G G",
" TT F DDD BBBB GGG ",
"",
]
return debugger_cli_common.RichTextLines(lines)
_HORIZONTAL_BAR = "======================================"
def get_run_start_intro(run_call_count,
fetches,
feed_dict,
tensor_filters,
is_callable_runner=False):
"""Generate formatted intro for run-start UI.
Args:
run_call_count: (int) Run call counter.
fetches: Fetches of the `Session.run()` call. See doc of `Session.run()`
for more details.
feed_dict: Feeds to the `Session.run()` call. See doc of `Session.run()`
for more details.
tensor_filters: (dict) A dict from tensor-filter name to tensor-filter
callable.
is_callable_runner: (bool) whether a runner returned by
Session.make_callable is being run.
Returns:
(RichTextLines) Formatted intro message about the `Session.run()` call.
"""
fetch_lines = _get_fetch_names(fetches)
if not feed_dict:
feed_dict_lines = ["(Empty)"]
else:
feed_dict_lines = []
for feed_key in feed_dict:
if isinstance(feed_key, six.string_types):
feed_dict_lines.append(feed_key)
else:
feed_dict_lines.append(feed_key.name)
intro_lines = [_HORIZONTAL_BAR]
if is_callable_runner:
intro_lines.append("Running a runner returned by Session.make_callabe()")
else:
intro_lines.extend([
"Session.run() call #%d:" % run_call_count,
"", "Fetch(es):"
])
intro_lines.extend([" " + line for line in fetch_lines])
intro_lines.extend(["", "Feed dict(s):"])
intro_lines.extend([" " + line for line in feed_dict_lines])
intro_lines.extend([
_HORIZONTAL_BAR, "",
"Select one of the following commands to proceed ---->"
])
out = debugger_cli_common.RichTextLines(intro_lines)
out.extend(
_recommend_command(
"run",
"Execute the run() call with debug tensor-watching",
create_link=True))
out.extend(
_recommend_command(
"run -n",
"Execute the run() call without debug tensor-watching",
create_link=True))
out.extend(
_recommend_command(
"run -t <T>",
"Execute run() calls (T - 1) times without debugging, then "
"execute run() once more with debugging and drop back to the CLI"))
out.extend(
_recommend_command(
"run -f <filter_name>",
"Keep executing run() calls until a dumped tensor passes a given, "
"registered filter (conditional breakpoint mode)"))
more_lines = [" Registered filter(s):"]
if tensor_filters:
filter_names = []
for filter_name in tensor_filters:
filter_names.append(filter_name)
command_menu_node = debugger_cli_common.MenuItem(
"", "run -f %s" % filter_name)
more_lines.append(RL(" * ") + RL(filter_name, command_menu_node))
else:
more_lines.append(" (None)")
out.extend(
debugger_cli_common.rich_text_lines_from_rich_line_list(more_lines))
out.extend(
_recommend_command(
"invoke_stepper",
"Use the node-stepper interface, which allows you to interactively "
"step through nodes involved in the graph run() call and "
"inspect/modify their values", create_link=True))
out.append("")
out.append_rich_line(RL("For more details, see ") +
RL("help.", debugger_cli_common.MenuItem("", "help")) +
".")
out.append("")
# Make main menu for the run-start intro.
menu = debugger_cli_common.Menu()
menu.append(debugger_cli_common.MenuItem("run", "run"))
menu.append(debugger_cli_common.MenuItem(
"invoke_stepper", "invoke_stepper"))
menu.append(debugger_cli_common.MenuItem("exit", "exit"))
out.annotations[debugger_cli_common.MAIN_MENU_KEY] = menu
return out
def get_run_short_description(run_call_count,
fetches,
feed_dict,
is_callable_runner=False):
"""Get a short description of the run() call.
Args:
run_call_count: (int) Run call counter.
fetches: Fetches of the `Session.run()` call. See doc of `Session.run()`
for more details.
feed_dict: Feeds to the `Session.run()` call. See doc of `Session.run()`
for more details.
is_callable_runner: (bool) whether a runner returned by
Session.make_callable is being run.
Returns:
(str) A short description of the run() call, including information about
the fetche(s) and feed(s).
"""
if is_callable_runner:
return "runner from make_callable()"
description = "run #%d: " % run_call_count
if isinstance(fetches, (ops.Tensor, ops.Operation, variables.Variable)):
description += "1 fetch (%s); " % _get_fetch_name(fetches)
else:
# Could be (nested) list, tuple, dict or namedtuple.
num_fetches = len(_get_fetch_names(fetches))
if num_fetches > 1:
description += "%d fetches; " % num_fetches
else:
description += "%d fetch; " % num_fetches
if not feed_dict:
description += "0 feeds"
else:
if len(feed_dict) == 1:
for key in feed_dict:
description += "1 feed (%s)" % (
key if isinstance(key, six.string_types) else key.name)
else:
description += "%d feeds" % len(feed_dict)
return description
def get_error_intro(tf_error):
"""Generate formatted intro for TensorFlow run-time error.
Args:
tf_error: (errors.OpError) TensorFlow run-time error object.
Returns:
(RichTextLines) Formatted intro message about the run-time OpError, with
sample commands for debugging.
"""
op_name = tf_error.op.name
intro_lines = [
"--------------------------------------",
RL("!!! An error occurred during the run !!!", "blink"),
"",
"You may use the following commands to debug:",
]
out = debugger_cli_common.rich_text_lines_from_rich_line_list(intro_lines)
out.extend(
_recommend_command("ni -a -d -t %s" % op_name,
"Inspect information about the failing op.",
create_link=True))
out.extend(
_recommend_command("li -r %s" % op_name,
"List inputs to the failing op, recursively.",
create_link=True))
out.extend(
_recommend_command(
"lt",
"List all tensors dumped during the failing run() call.",
create_link=True))
more_lines = [
"",
"Op name: " + op_name,
"Error type: " + str(type(tf_error)),
"",
"Details:",
str(tf_error),
"",
"WARNING: Using client GraphDef due to the error, instead of "
"executor GraphDefs.",
"--------------------------------------",
"",
]
out.extend(debugger_cli_common.RichTextLines(more_lines))
return out
| |
import argparse
import logging
from dvc.command import completion
from dvc.command.base import CmdBase, append_doc_link
logger = logging.getLogger(__name__)
class CmdDataBase(CmdBase):
def log_summary(self, stats):
from dvc.ui import ui
from dvc.utils.humanize import get_summary
default_msg = "Everything is up to date."
ui.write(get_summary(stats.items()) or default_msg)
class CmdDataPull(CmdDataBase):
def log_summary(self, stats):
from dvc.command.checkout import log_changes
log_changes(stats)
super().log_summary(stats)
def run(self):
from dvc.exceptions import CheckoutError, DvcException
try:
stats = self.repo.pull(
targets=self.args.targets,
jobs=self.args.jobs,
remote=self.args.remote,
all_branches=self.args.all_branches,
all_tags=self.args.all_tags,
all_commits=self.args.all_commits,
with_deps=self.args.with_deps,
force=self.args.force,
recursive=self.args.recursive,
run_cache=self.args.run_cache,
glob=self.args.glob,
)
self.log_summary(stats)
except (CheckoutError, DvcException) as exc:
self.log_summary(getattr(exc, "stats", {}))
logger.exception("failed to pull data from the cloud")
return 1
return 0
class CmdDataPush(CmdDataBase):
def run(self):
from dvc.exceptions import DvcException
try:
processed_files_count = self.repo.push(
targets=self.args.targets,
jobs=self.args.jobs,
remote=self.args.remote,
all_branches=self.args.all_branches,
all_tags=self.args.all_tags,
all_commits=self.args.all_commits,
with_deps=self.args.with_deps,
recursive=self.args.recursive,
run_cache=self.args.run_cache,
glob=self.args.glob,
)
self.log_summary({"pushed": processed_files_count})
except DvcException:
logger.exception("failed to push data to the cloud")
return 1
return 0
class CmdDataFetch(CmdDataBase):
def run(self):
from dvc.exceptions import DvcException
try:
processed_files_count = self.repo.fetch(
targets=self.args.targets,
jobs=self.args.jobs,
remote=self.args.remote,
all_branches=self.args.all_branches,
all_tags=self.args.all_tags,
all_commits=self.args.all_commits,
with_deps=self.args.with_deps,
recursive=self.args.recursive,
run_cache=self.args.run_cache,
)
self.log_summary({"fetched": processed_files_count})
except DvcException:
logger.exception("failed to fetch data from the cloud")
return 1
return 0
def shared_parent_parser():
from dvc.cli import get_parent_parser
# Parent parser used in pull/push/status
parent_parser = argparse.ArgumentParser(
add_help=False, parents=[get_parent_parser()]
)
parent_parser.add_argument(
"-j",
"--jobs",
type=int,
help=(
"Number of jobs to run simultaneously. "
"The default value is 4 * cpu_count(). "
"For SSH remotes, the default is 4. "
),
metavar="<number>",
)
parent_parser.add_argument(
"targets",
nargs="*",
help=(
"Limit command scope to these tracked files/directories, "
".dvc files, or stage names."
),
).complete = completion.DVC_FILE
return parent_parser
def add_parser(subparsers, _parent_parser):
from dvc.command.status import CmdDataStatus
# Pull
PULL_HELP = "Download tracked files or directories from remote storage."
pull_parser = subparsers.add_parser(
"pull",
parents=[shared_parent_parser()],
description=append_doc_link(PULL_HELP, "pull"),
help=PULL_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
pull_parser.add_argument(
"-r", "--remote", help="Remote storage to pull from", metavar="<name>"
)
pull_parser.add_argument(
"-a",
"--all-branches",
action="store_true",
default=False,
help="Fetch cache for all branches.",
)
pull_parser.add_argument(
"-T",
"--all-tags",
action="store_true",
default=False,
help="Fetch cache for all tags.",
)
pull_parser.add_argument(
"-A",
"--all-commits",
action="store_true",
default=False,
help="Fetch cache for all commits.",
)
pull_parser.add_argument(
"-f",
"--force",
action="store_true",
default=False,
help="Do not prompt when removing working directory files.",
)
pull_parser.add_argument(
"-d",
"--with-deps",
action="store_true",
default=False,
help="Fetch cache for all dependencies of the specified target.",
)
pull_parser.add_argument(
"-R",
"--recursive",
action="store_true",
default=False,
help="Pull cache for subdirectories of the specified directory.",
)
pull_parser.add_argument(
"--run-cache",
action="store_true",
default=False,
help="Fetch run history for all stages.",
)
pull_parser.add_argument(
"--glob",
action="store_true",
default=False,
help="Pull cache for targets matching shell-style wildcards.",
)
pull_parser.set_defaults(func=CmdDataPull)
# Push
PUSH_HELP = "Upload tracked files or directories to remote storage."
push_parser = subparsers.add_parser(
"push",
parents=[shared_parent_parser()],
description=append_doc_link(PUSH_HELP, "push"),
help=PUSH_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
push_parser.add_argument(
"-r", "--remote", help="Remote storage to push to", metavar="<name>"
)
push_parser.add_argument(
"-a",
"--all-branches",
action="store_true",
default=False,
help="Push cache for all branches.",
)
push_parser.add_argument(
"-T",
"--all-tags",
action="store_true",
default=False,
help="Push cache for all tags.",
)
push_parser.add_argument(
"-A",
"--all-commits",
action="store_true",
default=False,
help="Push cache for all commits.",
)
push_parser.add_argument(
"-d",
"--with-deps",
action="store_true",
default=False,
help="Push cache for all dependencies of the specified target.",
)
push_parser.add_argument(
"-R",
"--recursive",
action="store_true",
default=False,
help="Push cache for subdirectories of specified directory.",
)
push_parser.add_argument(
"--run-cache",
action="store_true",
default=False,
help="Push run history for all stages.",
)
push_parser.add_argument(
"--glob",
action="store_true",
default=False,
help="Allows targets containing shell-style wildcards.",
)
push_parser.set_defaults(func=CmdDataPush)
# Fetch
FETCH_HELP = (
"Download files or directories from remote storage to the cache."
)
fetch_parser = subparsers.add_parser(
"fetch",
parents=[shared_parent_parser()],
description=append_doc_link(FETCH_HELP, "fetch"),
help=FETCH_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
fetch_parser.add_argument(
"-r", "--remote", help="Remote storage to fetch from", metavar="<name>"
)
fetch_parser.add_argument(
"-a",
"--all-branches",
action="store_true",
default=False,
help="Fetch cache for all branches.",
)
fetch_parser.add_argument(
"-T",
"--all-tags",
action="store_true",
default=False,
help="Fetch cache for all tags.",
)
fetch_parser.add_argument(
"-A",
"--all-commits",
action="store_true",
default=False,
help="Fetch cache for all commits.",
)
fetch_parser.add_argument(
"-d",
"--with-deps",
action="store_true",
default=False,
help="Fetch cache for all dependencies of the " "specified target.",
)
fetch_parser.add_argument(
"-R",
"--recursive",
action="store_true",
default=False,
help="Fetch cache for subdirectories of specified directory.",
)
fetch_parser.add_argument(
"--run-cache",
action="store_true",
default=False,
help="Fetch run history for all stages.",
)
fetch_parser.set_defaults(func=CmdDataFetch)
# Status
STATUS_HELP = (
"Show changed stages, compare local cache and a remote storage."
)
status_parser = subparsers.add_parser(
"status",
parents=[shared_parent_parser()],
description=append_doc_link(STATUS_HELP, "status"),
help=STATUS_HELP,
conflict_handler="resolve",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
status_parser.add_argument(
"-q",
"--quiet",
action="store_true",
default=False,
help=(
"Suppresses all output."
" Exit with 0 if pipelines are up to date, otherwise 1."
),
)
status_parser.add_argument(
"-c",
"--cloud",
action="store_true",
default=False,
help="Show status of a local cache compared to a remote repository.",
)
status_parser.add_argument(
"-r",
"--remote",
help="Remote storage to compare local cache to",
metavar="<name>",
)
status_parser.add_argument(
"-a",
"--all-branches",
action="store_true",
default=False,
help="Show status of a local cache compared to a remote repository "
"for all branches.",
)
status_parser.add_argument(
"-T",
"--all-tags",
action="store_true",
default=False,
help="Show status of a local cache compared to a remote repository "
"for all tags.",
)
status_parser.add_argument(
"-A",
"--all-commits",
action="store_true",
default=False,
help="Show status of a local cache compared to a remote repository "
"for all commits.",
)
status_parser.add_argument(
"-d",
"--with-deps",
action="store_true",
default=False,
help="Show status for all dependencies of the specified target.",
)
status_parser.add_argument(
"-R",
"--recursive",
action="store_true",
default=False,
help="Show status of all stages in the specified directory.",
)
status_parser.add_argument(
"--json",
"--show-json",
action="store_true",
default=False,
help="Show status in JSON format.",
)
status_parser.set_defaults(func=CmdDataStatus)
| |
'''Multi Ojective EI Example'''
import shutil
from tempfile import mkdtemp
from numpy import sin, cos, pi
from openmdao.examples.expected_improvement.spiral_component import SpiralComponent
from openmdao.lib.components.api import MetaModel, ParetoFilter, \
MultiObjExpectedImprovement
from openmdao.lib.doegenerators.api import OptLatinHypercube
from openmdao.lib.drivers.adaptivesampledriver import AdaptiveSampleDriver
from openmdao.lib.drivers.api import Genetic, FixedPointIterator
from openmdao.lib.surrogatemodels.api import KrigingSurrogate
from openmdao.main.api import Assembly
class Analysis(Assembly):
'''Top level assembly for the Multi Ojective EI Example.'''
def __init__(self):
super(Analysis, self).__init__()
self._tdir = mkdtemp()
def configure(self):
driver = self.add('driver', FixedPointIterator())
adapt = self.add('adapt', AdaptiveSampleDriver())
MOEI_opt = self.add('MOEI_opt', Genetic())
self.add('spiral', SpiralComponent())
kwargs = {'params': ("x", "y"),
'responses': ('f1_xy', 'f2_xy')}
meta = self.add('meta', MetaModel(**kwargs))
meta.default_surrogate = KrigingSurrogate()
self.add('pareto', ParetoFilter(**kwargs))
self.add('MOEI', MultiObjExpectedImprovement())
# initial training DOE
adapt.DOEgenerator = OptLatinHypercube(num_samples=25)
adapt.add_parameter('spiral.x')
adapt.add_parameter('spiral.y')
adapt.add_response('spiral.f1_xy')
adapt.add_response('spiral.f2_xy')
# pass training data from sampler to metamodel and pareto filter
self.connect('adapt.all_case_inputs.spiral.x', ['meta.params.x',
'pareto.params.x'])
self.connect('adapt.all_case_inputs.spiral.y', ['meta.params.y',
'pareto.params.y'])
self.connect('adapt.all_case_outputs.spiral.f1_xy', ['meta.responses.f1_xy',
'pareto.responses.f1_xy'])
self.connect('adapt.all_case_outputs.spiral.f2_xy', ['meta.responses.f2_xy',
'pareto.responses.f2_xy'])
# connect meta and pareto to ei
self.connect('[meta.f1_xy, meta.f2_xy]', 'MOEI.current')
self.connect('pareto.pareto_outputs', 'MOEI.target')
# MOEI optimization to find next point
MOEI_opt.opt_type = "maximize"
MOEI_opt.population_size = 100
MOEI_opt.generations = 10
# MOEI_opt.selection_method = "tournament"
MOEI_opt.add_parameter("meta.x", low=0.75, high=5. * pi)
MOEI_opt.add_parameter("meta.y", low=0.75, high=5. * pi)
MOEI_opt.add_objective("MOEI.PI")
# Iterative sampling process
driver.add_parameter('adapt.adaptive_inputs.spiral.x[0]')
driver.add_parameter('adapt.adaptive_inputs.spiral.y[0]')
driver.add_constraint('adapt.adaptive_inputs.spiral.x[0] = meta.x')
driver.add_constraint('adapt.adaptive_inputs.spiral.y[0] = meta.y')
driver.max_iterations = 30
# Iteration Heirarchy
driver.workflow.add(['adapt', 'pareto', 'MOEI_opt'])
adapt.workflow.add(['spiral'])
MOEI_opt.workflow.add(['meta', 'MOEI'])
# FPI now support stop conditions
driver.add_stop_condition('MOEI.PI <= .0001')
def cleanup(self):
"""cleans up any files left in the temp directory from execution"""
shutil.rmtree(self._tdir, ignore_errors=True)
if __name__ == "__main__": # pragma: no cover
import sys
seed = None
backend = None
figname = None
for arg in sys.argv[1:]:
if arg.startswith('--seed='):
import random
seed = int(arg.split('=')[1])
random.seed(seed)
if arg.startswith('--backend='):
backend = arg.split('=')[1]
if arg.startswith('--figname='):
figname = arg.split('=')[1]
import matplotlib
if backend is not None:
matplotlib.use(backend)
elif sys.platform == 'win32':
matplotlib.use('WxAgg')
from matplotlib import pyplot as plt
from matplotlib.pylab import get_cmap
from numpy import meshgrid, array, arange
# create the analysis
analysis = Analysis()
# run the analysis
analysis.run()
# plot the samples points, along with the data from the function
def f1(x, y):
return cos(x) / x + sin(y) / y
def f2(x, y):
return sin(x) / x + cos(y) / y
X_range = arange(0.75, 5. * pi, 0.5)
Y_range = arange(0.75, 5. * pi, 0.5)
X, Y = meshgrid(X_range, Y_range)
Z1, Z2 = f1(X, Y), f2(X, Y)
plt.figure()
plt.subplot(121)
plt.contour(X, Y, Z1, 50)
plt.axis([0.75, 5 * pi, 0.75, 5 * pi])
plt.subplot(122)
plt.contour(X, Y, Z2, 50)
cb = plt.colorbar(shrink=.6)
plt.axis([0.75, 5 * pi, 0.75, 5 * pi])
plt.figure()
Z1_pred = []
Z2_pred = []
for x_row, y_row in zip(X, Y):
row1 = []
row2 = []
for x, y in zip(x_row, y_row):
analysis.meta.x = x
analysis.meta.y = y
analysis.meta.execute()
row1.append(analysis.meta.f1_xy.mu)
row2.append(analysis.meta.f2_xy.mu)
Z1_pred.append(row1)
Z2_pred.append(row2)
Z1_pred = array(Z1_pred)
Z2_pred = array(Z2_pred)
# plot the initial training data
data_train = {}
data_train['meta.y'] = analysis.adapt.DOE_inputs.spiral.y
data_train['meta.x'] = analysis.adapt.DOE_inputs.spiral.x
data_train['meta.f1_xy'] = analysis.adapt.DOE_outputs.spiral.f1_xy
data_train['meta.f2_xy'] = analysis.adapt.DOE_outputs.spiral.f2_xy
plt.scatter(data_train['meta.x'],
data_train['meta.y'], s=30, c='#572E07', zorder=10)
n_train = len(data_train['meta.y'])
data_EI = {}
data_EI['meta.y'] = analysis.adapt.all_case_inputs.spiral.y[n_train:]
data_EI['meta.x'] = analysis.adapt.all_case_inputs.spiral.x[n_train:]
data_EI['meta.f1_xy'] = analysis.adapt.all_case_outputs.spiral.f1_xy[n_train:]
data_EI['meta.f2_xy'] = analysis.adapt.all_case_outputs.spiral.f2_xy[n_train:]
count = len(data_EI['meta.x'])
colors = arange(0, count) / float(count)
color_map = get_cmap('spring')
f1_train = [case for case in data_train['meta.f1_xy']]
f2_train = [case for case in data_train['meta.f2_xy']]
f1_iter = [case for case in data_EI['meta.f1_xy']]
f2_iter = [case for case in data_EI['meta.f2_xy']]
plt.subplot(121)
plt.contour(X, Y, Z1_pred, 50)
plt.scatter(data_train['meta.x'],
data_train['meta.y'], s=30, c='#572E07', zorder=10)
plt.scatter(data_EI['meta.x'], data_EI['meta.y'],
s=30,
c=colors,
zorder=11,
cmap=color_map)
plt.axis([0.75, 5 * pi, 0.75, 5 * pi])
plt.subplot(122)
plt.contour(X, Y, Z2_pred, 50)
cb = plt.colorbar(shrink=.6)
plt.scatter(data_train['meta.x'],
data_train['meta.y'], s=30, c='#572E07', zorder=10)
plt.scatter(data_EI['meta.x'], data_EI['meta.y'],
s=30,
c=colors,
zorder=11,
cmap=color_map)
plt.axis([0.75, 5 * pi, 0.75, 5 * pi])
plt.figure()
plt.scatter(Z1, Z2)
plt.scatter(f1_train, f2_train, s=30, c='#572E07', zorder=10)
plt.scatter(f1_iter, f2_iter, s=30, c=colors, zorder=11, cmap=color_map)
plt.show()
analysis.cleanup()
| |
"""
Flask routing
"""
from flask import Flask, request, session, send_from_directory, render_template
from werkzeug.contrib.fixers import ProxyFix
app = Flask(__name__, static_path="/")
app.wsgi_app = ProxyFix(app.wsgi_app)
import api
import json
import mimetypes
import os.path
from datetime import datetime
from api.common import WebSuccess, WebError
from api.annotations import api_wrapper, require_login, require_teacher, require_admin, check_csrf
from api.annotations import block_before_competition, block_after_competition
from api.annotations import log_action
log = api.logger.use(__name__)
session_cookie_domain = "127.0.0.1"
session_cookie_path = "/"
session_cookie_name = "flask"
secret_key = ""
def guess_mimetype(resource_path):
"""
Guesses the mimetype of a given resource.
Args:
resource_path: the path to a given resource.
Returns:
The mimetype string.
"""
mime = mimetypes.guess_type(resource_path)[0]
if mime is None:
return "application/octet-stream"
return mime
@app.route('/api/autogen/serve/<path>')
@require_login
def serve_autogen_hook(path):
pid = request.args.get("pid", None)
static = request.args.get("static", "false") == "true"
tid = api.user.get_team()["tid"]
if pid not in api.problem.get_unlocked_pids(tid):
return WebError("You have not unlocked this problem!")
instance_number = api.autogen.get_instance_number(pid, tid)
if static:
instance_path = api.autogen.get_static_instance_path(pid, public=True)
else:
instance_path = api.autogen.get_instance_path(pid, instance_number, public=True)
mime = guess_mimetype(path)
if mime == 'text/html':
return send_from_directory(instance_path, path, mimetype=None, as_attachment=False, attachment_filename=None)
else:
return send_from_directory(instance_path, path, mimetype=mime)
def config_app(*args, **kwargs):
"""
Return the app object configured correctly.
This needed to be done for gunicorn.
"""
app.secret_key = secret_key
app.config["SESSION_COOKIE_DOMAIN"] = session_cookie_domain
app.config["SESSION_COOKIE_PATH"] = session_cookie_path
app.config["SESSION_COOKIE_NAME"] = session_cookie_name
api.logger.setup_logs({"verbose": 2})
return app
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Methods', 'GET, POST')
response.headers.add('Access-Control-Allow-Credentials', 'true')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type, *')
response.headers.add('Cache-Control', 'no-cache')
response.headers.add('Cache-Control', 'no-store')
if api.auth.is_logged_in():
if 'token' in session:
response.set_cookie('token', session['token'])
else:
csrf_token = api.common.token()
session['token'] = csrf_token
response.set_cookie('token', csrf_token)
# JB: This is a hack. We need a better solution
if request.path[0:19] != "/api/autogen/serve/":
response.mimetype = 'appication/json'
return response
@app.route('/api/user/shell', methods=['GET'])
@api_wrapper
def get_shell_account_hook():
return WebSuccess(data=api.team.get_shell_account())
@app.route('/api/user/create', methods=['POST'])
@api_wrapper
def create_user_hook():
new_uid = api.user.create_user_request(api.common.flat_multi(request.form))
session['uid'] = new_uid
return WebSuccess("User '{}' registered successfully!".format(request.form["username"]))
@app.route('/api/user/update_password', methods=['POST'])
@api_wrapper
@check_csrf
@require_login
def update_password_hook():
api.user.update_password_request(api.common.flat_multi(request.form), check_current=True)
return WebSuccess("Your password has been successfully updated!")
@app.route('/api/user/disable_account', methods=['POST'])
@api_wrapper
@check_csrf
@require_login
def disable_account_hook():
api.user.disable_account_request(api.common.flat_multi(request.form), check_current=True)
return WebSuccess("Your have successfully disabled your account!")
@app.route('/api/user/reset_password', methods=['GET'])
@api_wrapper
def reset_password_hook():
username = request.args.get("username", None)
api.utilities.request_password_reset(username)
return WebSuccess("A password reset link has been sent to the email address provided during registration.")
@app.route('/api/user/confirm_password_reset', methods=['POST'])
@api_wrapper
def confirm_password_reset_hook():
password = request.form.get("new-password")
confirm = request.form.get("new-password-confirmation")
token = request.form.get("reset-token")
api.utilities.reset_password(token, password, confirm)
return WebSuccess("Your password has been reset")
@app.route('/api/user/login', methods=['POST'])
@api_wrapper
def login_hook():
username = request.form.get('username')
password = request.form.get('password')
api.auth.login(username, password)
return WebSuccess(message="Successfully logged in as " + username, data={'teacher': api.user.is_teacher()})
@app.route('/api/user/logout', methods=['GET'])
@api_wrapper
def logout_hook():
if api.auth.is_logged_in():
api.auth.logout()
return WebSuccess("Successfully logged out.")
else:
return WebError("You do not appear to be logged in.")
@app.route('/api/user/status', methods=['GET'])
@api_wrapper
def status_hook():
status = {
"logged_in": api.auth.is_logged_in(),
"admin": api.auth.is_admin(),
"teacher": api.auth.is_logged_in() and api.user.is_teacher(),
"enable_teachers": api.config.enable_teachers,
"enable_feedback": api.config.enable_feedback,
"shell": api.config.enable_shell,
"enable_captcha": api.config.enable_captcha,
"competition_active": api.utilities.check_competition_active(),
"username": api.user.get_user()['username'] if api.auth.is_logged_in() else ""
}
return WebSuccess(data=status)
@app.route('/api/team', methods=['GET'])
@api_wrapper
@require_login
def team_information_hook():
return WebSuccess(data=api.team.get_team_information())
@app.route('/api/team/score', methods=['GET'])
@api_wrapper
@require_login
def get_team_score_hook():
score = api.stats.get_score(tid=api.user.get_user()['tid'])
if score is not None:
return WebSuccess(data={'score': score})
return WebError("There was an error retrieving your score.")
@app.route('/api/stats/team/solved_problems', methods=['GET'])
@api_wrapper
@require_login
@block_before_competition(WebError("The competition has not begun yet!"))
def get_team_solved_problems_hook():
tid = request.args.get("tid", None)
stats = {
"problems": api.stats.get_problems_by_category(),
"members": api.stats.get_team_member_stats(tid)
}
return WebSuccess(data=stats)
@app.route('/api/stats/team/score_progression', methods=['GET'])
@api_wrapper
@require_login
@block_before_competition(WebError("The competition has not begun yet!"))
def get_team_score_progression():
category = request.form.get("category", None)
tid = api.user.get_team()["tid"]
return WebSuccess(data=[api.stats.get_score_progression(tid=tid, category=category)])
@app.route('/api/admin/getallproblems', methods=['GET'])
@api_wrapper
@require_admin
def get_all_problems_hook():
problems = api.problem.get_all_problems()
if problems is None:
return WebError("There was an error querying problems from the database.")
return WebSuccess(data=problems)
@app.route('/api/admin/getallusers', methods=['GET'])
@api_wrapper
@require_admin
def get_all_users_hook():
users = api.user.get_all_users()
if users is None:
return WebError("There was an error query users from the database.")
return WebSuccess(data=users)
@app.route('/api/problems', methods=['GET'])
@api_wrapper
@require_login
@block_before_competition(WebError("The competition has not begun yet!"))
def get_unlocked_problems_hook():
return WebSuccess(data=api.problem.get_unlocked_problems(api.user.get_user()['tid']))
@app.route('/api/problems/solved', methods=['GET'])
@api_wrapper
@require_login
@block_before_competition(WebError("The competition has not begun yet!"))
def get_solved_problems_hook():
return WebSuccess(api.problem.get_solved_problems(api.user.get_user()['tid']))
@app.route('/api/problems/submit', methods=['POST'])
@api_wrapper
@check_csrf
@require_login
@block_before_competition(WebError("The competition has not begun yet!"))
@block_after_competition(WebError("The competition is over!"))
def submit_key_hook():
user_account = api.user.get_user()
tid = user_account['tid']
uid = user_account['uid']
pid = request.form.get('pid', '')
key = request.form.get('key', '')
ip = request.remote_addr
result = api.problem.submit_key(tid, pid, key, uid, ip)
if result['correct']:
return WebSuccess(result['message'], result['points'])
else:
return WebError(result['message'], {'code': 'wrong'})
@app.route('/api/problems/<path:pid>', methods=['GET'])
@api_wrapper
@require_login
@block_before_competition(WebError("The competition has not begun yet!"))
@block_after_competition(WebError("The competition is over!"))
def get_single_problem_hook(pid):
problem_info = api.problem.get_problem(pid, tid=api.user.get_user()['tid'])
return WebSuccess(data=problem_info)
@app.route('/api/problems/feedback', methods=['POST'])
@api_wrapper
@check_csrf
@require_login
@block_before_competition(WebError("The competition has not begun yet!"))
def problem_feedback_hook():
feedback = json.loads(request.form.get("feedback", ""))
pid = request.form.get("pid", None)
if feedback is None or pid is None:
return WebError("Please supply a pid and feedback.")
api.problem_feedback.add_problem_feedback(pid, api.auth.get_uid(), feedback)
return WebSuccess("Your feedback has been accepted.")
@app.route('/api/problems/feedback/reviewed', methods=['GET'])
@api_wrapper
@require_login
@block_before_competition(WebError("The competition has not begun yet!"))
def problem_reviews_hook():
return WebSuccess(data=api.problem_feedback.get_reviewed_pids())
@app.route("/api/problems/hint", methods=['GET'])
@api_wrapper
@require_login
@block_before_competition(WebError("The competition has not begun yet!"))
def request_problem_hint_hook():
@log_action
def hint(pid, source):
return None
source = request.args.get("source")
pid = request.args.get("pid")
if pid is None:
return WebError("Please supply a pid.")
if source is None:
return WebError("You have to supply the source of the hint.")
tid = api.user.get_team()["tid"]
if pid not in api.problem.get_unlocked_pids(tid):
return WebError("Your team hasn't unlocked this problem yet!")
hint(pid, source)
return WebSuccess("Hint noted.")
@app.route('/api/group/list')
@api_wrapper
@require_login
def get_group_list_hook():
return WebSuccess(data=api.team.get_groups())
@app.route('/api/group', methods=['GET'])
@api_wrapper
@require_login
def get_group_hook():
name = request.form.get("group-name")
owner = request.form.get("group-owner")
owner_uid = api.user.get_user(name=owner)["uid"]
if not api.group.is_member_of_group(name=name, owner_uid=owner_uid):
return WebError("You are not a member of this group.")
return WebSuccess(data=api.group.get_group(name=request.form.get("group-name"), owner_uid=owner_uid))
@app.route('/api/group/member_information', methods=['GET'])
@api_wrapper
def get_memeber_information_hook(gid=None):
gid = request.args.get("gid")
if not api.group.is_owner_of_group(gid):
return WebError("You do not own that group!")
return WebSuccess(data=api.group.get_member_information(gid=gid))
@app.route('/api/group/score', methods=['GET'])
@api_wrapper
@require_teacher
def get_group_score_hook(): #JB: Fix this
name = request.args.get("group-name")
if not api.group.is_owner_of_group(gid=name):
return WebError("You do not own that group!")
#TODO: Investigate!
score = api.stats.get_group_scores(name=name)
if score is None:
return WebError("There was an error retrieving your score.")
return WebSuccess(data={'score': score})
@app.route('/api/group/create', methods=['POST'])
@api_wrapper
@check_csrf
@require_teacher
def create_group_hook():
gid = api.group.create_group_request(api.common.flat_multi(request.form))
return WebSuccess("Successfully created group", gid)
@app.route('/api/group/join', methods=['POST'])
@api_wrapper
@check_csrf
@require_login
def join_group_hook():
api.group.join_group_request(api.common.flat_multi(request.form))
return WebSuccess("Successfully joined group")
@app.route('/api/group/leave', methods=['POST'])
@api_wrapper
@check_csrf
@require_login
def leave_group_hook():
api.group.leave_group_request(api.common.flat_multi(request.form))
return WebSuccess("Successfully left group")
@app.route('/api/group/delete', methods=['POST'])
@api_wrapper
@check_csrf
@require_teacher
def delete_group_hook():
api.group.delete_group_request(api.common.flat_multi(request.form))
return WebSuccess("Successfully deleted group")
@app.route('/api/achievements', methods=['GET'])
@require_login
@api_wrapper
def get_achievements_hook():
tid = api.user.get_team()["tid"]
achievements = api.achievement.get_earned_achievements_display(tid=tid)
for achievement in achievements:
achievement["timestamp"] = None # JB : Hack to temporarily fix achievements timestamp problem
return WebSuccess(data=achievements)
@app.route('/api/stats/scoreboard', methods=['GET'])
@api_wrapper
@block_before_competition(WebError("The competition has not begun yet!"))
def get_scoreboard_hook():
result = {}
result['public'] = api.stats.get_all_team_scores()
result['groups'] = []
if api.auth.is_logged_in():
for group in api.team.get_groups():
result['groups'].append({
'gid': group['gid'],
'name': group['name'],
'scoreboard': api.stats.get_group_scores(gid=group['gid'])
})
return WebSuccess(data=result)
@app.route('/api/stats/top_teams/score_progression', methods=['GET'])
@api_wrapper
def get_top_teams_score_progressions_hook():
return WebSuccess(data=api.stats.get_top_teams_score_progressions())
@app.route('/api/time', methods=['GET'])
@api_wrapper
def get_time():
return WebSuccess(data=int(datetime.utcnow().timestamp()))
| |
# -*- coding: utf-8 -*-
"""
PyHusky
~~~~~~
Python Role Based Permissions Library
:copyright: (c) 2016 by Clivern (hello@clivern.com).
:license: MIT, see LICENSE for more details.
"""
import pymysql.cursors
#from .exceptions import PyHuskyError
import datetime
# print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
"""
get_user_roles
mysql> describe ph_roles;
+--------------+------------------+------+-----+---------------------+----------------+
| Field | Type | Null | Key | Default | Extra |
+--------------+------------------+------+-----+---------------------+----------------+
| id | int(10) unsigned | NO | PRI | NULL | auto_increment |
| name | varchar(255) | NO | UNI | NULL | |
| display_name | varchar(255) | YES | | NULL | |
| description | varchar(255) | YES | | NULL | |
| created_at | timestamp | NO | | 0000-00-00 00:00:00 | |
| updated_at | timestamp | NO | | 0000-00-00 00:00:00 | |
| enabled | tinyint(1) | NO | | 0 | |
+--------------+------------------+------+-----+---------------------+----------------+
7 rows in set (0.00 sec)
SELECT ph_roles.* FROM ph_roles join ph_role_user on ph_role_user.role_id = ph_roles.id WHERE ph_role_user.user_id = {user_id}
SELECT ph_permissions.* FROM ph_permissions join ph_permission_user on ph_permission_user.permission_id = ph_permissions.id WHERE ph_permission_user.user_id = {user_id}
SELECT ph_permissions.* FROM ph_permissions join ph_permission_role on ph_permission_role.permission_id = ph_permissions.id WHERE ph_permission_role.role_id IN ({roles})
INSERT INTO ph_roles (name, display_name, description, created_at, updated_at, enabled) VALUES ();
mysql> describe ph_role_user;
+---------+------------------+------+-----+---------+-------+
| Field | Type | Null | Key | Default | Extra |
+---------+------------------+------+-----+---------+-------+
| user_id | int(10) unsigned | NO | PRI | NULL | |
| role_id | int(10) unsigned | NO | PRI | NULL | |
+---------+------------------+------+-----+---------+-------+
DELETE FROM ph_role_user WHERE role_id={role_id} AND user_id={user_id}
2 rows in set (0.00 sec)
mysql> select users.name, programs.name from linker
-> join users on users.id = linker.user_id
-> join programs on programs.id = linker.program_id;
SELECT * FROM ph_role_user WHERE user_id=%s AND role_id=%
SELECT ph_roles.id, ph_roles.display_name from ph_roles join ph_role_user on ph_role_user.role_id = ph_roles.id WHERE ph_roles.name = {role_name} AND ph_role_user.user_id = {user_id}
mysql> describe ph_permissions;
+--------------+------------------+------+-----+---------------------+----------------+
| Field | Type | Null | Key | Default | Extra |
+--------------+------------------+------+-----+---------------------+----------------+
| id | int(10) unsigned | NO | PRI | NULL | auto_increment |
| name | varchar(255) | NO | UNI | NULL | |
| display_name | varchar(255) | YES | | NULL | |
| description | varchar(255) | YES | | NULL | |
| created_at | timestamp | NO | | 0000-00-00 00:00:00 | |
| updated_at | timestamp | NO | | 0000-00-00 00:00:00 | |
| enabled | tinyint(1) | NO | | 0 | |
+--------------+------------------+------+-----+---------------------+----------------+
7 rows in set (0.01 sec)
mysql> describe ph_permission_user;
+---------------+------------------+------+-----+---------+-------+
| Field | Type | Null | Key | Default | Extra |
+---------------+------------------+------+-----+---------+-------+
| permission_id | int(10) unsigned | NO | PRI | NULL | |
| user_id | int(10) unsigned | NO | PRI | NULL | |
+---------------+------------------+------+-----+---------+-------+
2 rows in set (0.00 sec)
DELETE FROM ph_permission_user WHERE permission_id={permission_id} AND user_id={user_id}
mysql> describe ph_permission_role;
+---------------+------------------+------+-----+---------+-------+
| Field | Type | Null | Key | Default | Extra |
+---------------+------------------+------+-----+---------+-------+
| permission_id | int(10) unsigned | NO | PRI | NULL | |
| role_id | int(10) unsigned | NO | PRI | NULL | |
+---------------+------------------+------+-----+---------+-------+
2 rows in set (0.00 sec)
SELECT ph_permission_role.role_id FROM ph_permission_role join ph_permissions ON ph_permissions.id=ph_permission_role.permission_id WHERE ph_permissions.id = {permission_id}
SELECT ph_permission_role.role_id FROM ph_permission_role join ph_permissions ON ph_permissions.id=ph_permission_role.permission_id WHERE ph_permissions.name = {permission_name}
name
"""
"""
INSERT INTO ph_rules (name, display_name, description, created_at, updated_at, enabled) VALUES (value1, value2, value3,...)
DELETE FROM ph_rules WHERE name=value;
UPDATE ph_rules SET display_name=value, description=value WHERE name=value;
SELECT * FROM ph_rules WHERE name=value
"""
class MySQLModel(object):
"""MySQL Model Module"""
_db={
'host': 'localhost',
'username': 'root',
'password': 'root',
'database': 'pyhusky'
}
_tables={
'prefix': 'ph_',
'users_table': False,
'users_table_id': False,
'roles_table': 'roles',
'permissions_table': 'permissions',
'permission_role_table': 'permission_role',
'role_user_table': 'role_user',
'permission_user_table': 'permission_user'
}
def __init__(self, db={}, tables={}):
"""Set Database Configs and Tables
Args:
db: A list of database configs
tables: A list of tables configs
"""
for key in db:
self._db[key] = db[key]
for key in tables:
self._tables[key] = tables[key]
self._connect()
def has_role(self, user_id, role_name, role_id=False):
if role_id != False:
query="SELECT * FROM {role_user_table} WHERE user_id={user_id} AND role_id={role_id}".format(
role_user_table=self._tables['prefix'] + self._tables['role_user_table'],
user_id=user_id,
role_id=role_id
)
elif role_name != False:
query="SELECT {roles_table}.id, {roles_table}.display_name FROM {roles_table} JOIN {role_user_table} ON {role_user_table}.role_id = {roles_table}.id WHERE {roles_table}.name = {role_name} AND {role_user_table}.user_id = {user_id}".format(
role_user_table=self._tables['prefix'] + self._tables['role_user_table'],
roles_table=self._tables['prefix'] + self._tables['roles_table'],
user_id=user_id,
role_name=role_name
)
else:
raise PyHuskyError("Error! Invalid Method Parameters Submitted 'PyHusky_Model:has_role'")
def role_enabled(self, role_name, role_id=False):
if role_id != False:
query="SELECT * FROM {roles_table} WHERE id={role_id} AND enabled=1".format(
roles_table=self._tables['prefix'] + self._tables['roles_table'],
role_id=role_id
)
elif role_name != False:
query="SELECT * FROM {roles_table} WHERE name={role_name} AND enabled=1".format(
roles_table=self._tables['prefix'] + self._tables['roles_table'],
role_name=role_name
)
else:
raise PyHuskyError("Error! Invalid Method Parameters Submitted 'PyHusky_Model:role_enabled'")
def has_permission(self, user_id, permission_name, permission_id=False):
if permission_id != False:
query_1="SELECT * FROM {permission_user_table} WHERE user_id={user_id} AND permission_id={permission_id}".format(
permission_user_table=self._tables['prefix'] + self._tables['permission_user_table'],
user_id=user_id,
permission_id=permission_id
)
query_2="SELECT {permission_role_table}.role_id FROM {permission_role_table} JOIN {permissions_table} ON {permissions_table}.id={permission_role_table}.permission_id WHERE {permissions_table}.id = {permission_id}".format(
permission_role_table=self._tables['prefix'] + self._tables['permission_role_table'],
permissions_table=self._tables['prefix'] + self._tables['permissions_table'],
permission_id=permission_id
)
if roles_ids != '':
query_3="SELECT * FROM {role_user_table} WHERE user_id={user_id} AND role_id IN ({roles_ids})".format(
role_user_table=self._tables['prefix'] + self._tables['role_user_table'],
user_id=user_id,
roles_ids=roles_ids
)
elif permission_name != False:
query_1="SELECT {permissions_table}.id, {permissions_table}.display_name FROM {permissions_table} JOIN {permission_user_table} ON {permission_user_table}.permission_id = {permissions_table}.id WHERE {permissions_table}.name = {permission_name} AND {permission_user_table}.user_id = {user_id}".format(
permission_user_table=self._tables['prefix'] + self._tables['permission_user_table'],
permissions_table=self._tables['prefix'] + self._tables['permissions_table'],
user_id=user_id,
permission_name=permission_name
)
query_2="SELECT {permission_role_table}.role_id FROM {permission_role_table} JOIN {permissions_table} ON {permissions_table}.id={permission_role_table}.permission_id WHERE {permissions_table}.name = {permission_name}".format(
permission_role_table=self._tables['prefix'] + self._tables['permission_role_table'],
permissions_table=self._tables['prefix'] + self._tables['permissions_table'],
permission_name=permission_name
)
if roles_ids != '':
query_3="SELECT * FROM {role_user_table} WHERE user_id={user_id} AND role_id IN ({roles_ids})".format(
role_user_table=self._tables['prefix'] + self._tables['role_user_table'],
user_id=user_id,
roles_ids=roles_ids
)
else:
raise PyHuskyError("Error! Invalid Method Parameters Submitted 'PyHusky_Model:has_permission'")
def permission_enabled(self, permission_name, permission_id=False):
if permission_id != False:
query="SELECT * FROM {permissions_table} WHERE id={permission_id} AND enabled=1".format(
permissions_table=self._tables['prefix'] + self._tables['permissions_table'],
permission_id=permission_id
)
elif permission_name != False:
query="SELECT * FROM {permissions_table} WHERE name={permission_name} AND enabled=1".format(
permissions_table=self._tables['prefix'] + self._tables['permissions_table'],
permission_name=permission_name
)
else:
raise PyHuskyError("Error! Invalid Method Parameters Submitted 'PyHusky_Model:permission_enabled'")
def get_user_roles(self, user_id):
query="SELECT {roles_table}.* FROM {roles_table} JOIN {role_user_table} ON {role_user_table}.role_id = {roles_table}.id WHERE {role_user_table}.user_id = {user_id}".format(
roles_table=self._tables['prefix'] + self._tables['roles_table'],
role_user_table=self._tables['prefix'] + self._tables['role_user_table'],
user_id=user_id
)
def get_user_permissions(self, user_id):
query_1="SELECT {roles_table}.* FROM {roles_table} JOIN {role_user_table} ON {role_user_table}.role_id = {roles_table}.id WHERE {role_user_table}.user_id = {user_id}".format(
roles_table=self._tables['prefix'] + self._tables['roles_table'],
role_user_table=self._tables['prefix'] + self._tables['role_user_table'],
user_id=user_id
)
query_2="SELECT {permissions_table}.* FROM {permissions_table} JOIN {permission_user_table} ON {permission_user_table}.permission_id = {permissions_table}.id WHERE {permission_user_table}.user_id = {user_id}".format(
permissions_table=self._tables['prefix'] + self._tables['permissions_table'],
permission_user_table=self._tables['prefix'] + self._tables['permission_user_table'],
user_id=user_id
)
if roles_ids != '':
query_3="SELECT {permissions_table}.* FROM {permissions_table} JOIN {permission_role_table} ON {permission_role_table}.permission_id = {permissions_table}.id WHERE {permission_role_table}.role_id IN ({roles_ids})".format(
permissions_table=self._tables['prefix'] + self._tables['permissions_table'],
permission_role_table=self._tables['prefix'] + self._tables['permission_role_table'],
roles_ids=roles_ids
)
def get_role(self, role_name, role_id=False):
if role_id != False:
query="SELECT * FROM {roles_table} WHERE id={role_id}".format(
roles_table=self._tables['prefix'] + self._tables['roles_table'],
role_id=role_id
)
elif role_name != False:
query="SELECT * FROM {roles_table} WHERE name={role_name}".format(
roles_table=self._tables['prefix'] + self._tables['roles_table'],
role_name=role_name
)
else:
raise PyHuskyError("Error! Invalid Method Parameters Submitted 'PyHusky_Model:get_role'")
def get_roles(self):
query="SELECT * FROM {roles_table}".format(
roles_table=self._tables['prefix'] + self._tables['roles_table']
)
def get_permission(self, permission_name, permission_id=False):
if permission_id != False:
query="SELECT * FROM {permissions_table} WHERE id={permission_id}".format(
permissions_table=self._tables['prefix'] + self._tables['permissions_table'],
permission_id=permission_id
)
elif permission_name != False:
query="SELECT * FROM {permissions_table} WHERE name={permission_name}".format(
permissions_table=self._tables['prefix'] + self._tables['permissions_table'],
permission_name=permission_name
)
else:
raise PyHuskyError("Error! Invalid Method Parameters Submitted 'PyHusky_Model:get_permission'")
def get_permissions(self):
query="SELECT * FROM {permissions_table}".format(
permissions_table=self._tables['prefix'] + self._tables['permissions_table']
)
def add_role(self, role):
role_data={}
if 'name' in role:
raise PyHuskyError("Error! Role name is required 'PyHusky_Model:add_role'")
role_data['name'] = role['name']
else:
role_data['name'] = role['name']
if 'display_name' in role:
role_data['display_name'] = role['display_name']
else:
role_data['display_name'] = ''
if 'description' in role:
role_data['description'] = role['description']
else:
role_data['description'] = ''
if 'created_at' in role:
role_data['created_at'] = role['created_at']
else:
role_data['created_at'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if 'updated_at' in role:
role_data['updated_at'] = role['updated_at']
else:
role_data['updated_at'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if 'enabled' in role:
role_data['enabled'] = role['enabled']
else:
role_data['enabled'] = 1
query="INSERT INTO {roles_table} (name, display_name, description, created_at, updated_at, enabled) VALUES ('{name}', '{display_name}', '{description}', '{created_at}', '{updated_at}', '{enabled}')".format(
roles_table=self._tables['prefix'] + self._tables['roles_table'],
name=role_data['name'],
display_name=role_data['display_name'],
description=role_data['description'],
created_at=role_data['created_at'],
updated_at=role_data['updated_at'],
enabled=role_data['enabled']
)
def add_roles(self, roles):
query="INSERT INTO {roles_table} (name, display_name, description, created_at, updated_at, enabled) VALUES ".format(
roles_table=self._tables['prefix'] + self._tables['roles_table']
)
for role in roles:
role_data={}
if 'name' in role:
raise PyHuskyError("Error! Roles name is required 'PyHusky_Model:add_roles'")
role_data['name'] = role['name']
else:
role_data['name'] = role['name']
if 'display_name' in role:
role_data['display_name'] = role['display_name']
else:
role_data['display_name'] = ''
if 'description' in role:
role_data['description'] = role['description']
else:
role_data['description'] = ''
if 'created_at' in role:
role_data['created_at'] = role['created_at']
else:
role_data['created_at'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if 'updated_at' in role:
role_data['updated_at'] = role['updated_at']
else:
role_data['updated_at'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if 'enabled' in role:
role_data['enabled'] = role['enabled']
else:
role_data['enabled'] = 1
query += "('{name}', '{display_name}', '{description}', '{created_at}', '{updated_at}', '{enabled}'),".format(
name=role_data['name'],
display_name=role_data['display_name'],
description=role_data['description'],
created_at=role_data['created_at'],
updated_at=role_data['updated_at'],
enabled=role_data['enabled']
)
def add_permission(self, permission):
permission_data={}
if 'name' in permission:
raise PyHuskyError("Error! Permission name is required 'PyHusky_Model:add_permission'")
permission_data['name'] = permission['name']
else:
permission_data['name'] = permission['name']
if 'display_name' in permission:
permission_data['display_name'] = permission['display_name']
else:
permission_data['display_name'] = ''
if 'description' in permission:
permission_data['description'] = permission['description']
else:
permission_data['description'] = ''
if 'created_at' in permission:
permission_data['created_at'] = permission['created_at']
else:
permission_data['created_at'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if 'updated_at' in permission:
permission_data['updated_at'] = permission['updated_at']
else:
permission_data['updated_at'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if 'enabled' in permission:
permission_data['enabled'] = permission['enabled']
else:
permission_data['enabled'] = 1
query="INSERT INTO {permissions_table} (name, display_name, description, created_at, updated_at, enabled) VALUES ('{name}', '{display_name}', '{description}', '{created_at}', '{updated_at}', '{enabled}')".format(
permissions_table=self._tables['prefix'] + self._tables['permissions_table'],
name=permission_data['name'],
display_name=permission_data['display_name'],
description=permission_data['description'],
created_at=permission_data['created_at'],
updated_at=permission_data['updated_at'],
enabled=permission_data['enabled']
)
def add_permissions(self, permissions):
query="INSERT INTO {permissions_table} (name, display_name, description, created_at, updated_at, enabled) VALUES ".format(
permissions_table=self._tables['prefix'] + self._tables['permissions_table']
)
for permission in permissions:
permission_data={}
if 'name' in permission:
raise PyHuskyError("Error! Roles name is required 'PyHusky_Model:add_permissions'")
permission_data['name'] = permission['name']
else:
permission_data['name'] = permission['name']
if 'display_name' in permission:
permission_data['display_name'] = permission['display_name']
else:
permission_data['display_name'] = ''
if 'description' in permission:
permission_data['description'] = permission['description']
else:
permission_data['description'] = ''
if 'created_at' in permission:
permission_data['created_at'] = permission['created_at']
else:
permission_data['created_at'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if 'updated_at' in permission:
permission_data['updated_at'] = permission['updated_at']
else:
permission_data['updated_at'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if 'enabled' in permission:
permission_data['enabled'] = permission['enabled']
else:
permission_data['enabled'] = 1
query += "('{name}', '{display_name}', '{description}', '{created_at}', '{updated_at}', '{enabled}'),".format(
name=permission_data['name'],
display_name=permission_data['display_name'],
description=permission_data['description'],
created_at=permission_data['created_at'],
updated_at=permission_data['updated_at'],
enabled=permission_data['enabled']
)
def update_role(self, role, where):
pass
def update_permission(self, role, where):
pass
def update_user_roles(self, user_id, roles_data):
pass
def update_user_permissions(self, user_id, permissions_data):
pass
def remove_user_role(self, user_id, role_name, role_id=False):
"""Remove a User Role
Args:
user_id: User ID
role_name: Role Name
role_id: Role ID
"""
if role_name != False:
query_1="SELECT * FROM {roles_table} WHERE name={role_name}".format(
roles_table=self._tables['prefix'] + self._tables['roles_table'],
role_name=role_name
)
query_2="DELETE FROM {role_user_table} WHERE role_id={role_id} AND user_id={user_id}".format(
role_user_table=self._tables['prefix'] + self._tables['role_user_table'],
role_id=role_id,
user_id=user_id
)
elif role_id != False:
query="DELETE FROM {permission_user_table} WHERE permission_id={permission_id} AND user_id={user_id}".format(
permission_user_table=self._tables['prefix'] + self._tables['permission_user_table'],
permission_id=permission_id,
user_id=user_id
)
else:
raise PyHuskyError("Error! Role Name or Role ID Required 'PyHusky_Model:remove_user_role'")
def remove_user_permission(self, user_id, permission_name, permission_id=False):
"""Remove a User Permission
Args:
user_id: User ID
permission_name: Permission Name
permission_id: Permission ID
"""
if permission_name != False:
query_1="SELECT * FROM {permissions_table} WHERE name={permission_name}".format(
permissions_table=self._tables['prefix'] + self._tables['permissions_table'],
permission_name=permission_name
)
query_2="DELETE FROM {permission_user_table} WHERE permission_id={permission_id} AND user_id={user_id}".format(
permission_user_table=self._tables['prefix'] + self._tables['permission_user_table'],
permission_id=permission_id,
user_id=user_id
)
elif permission_id != False:
query="DELETE FROM {permission_user_table} WHERE permission_id={permission_id} AND user_id={user_id}".format(
permission_user_table=self._tables['prefix'] + self._tables['permission_user_table'],
permission_id=permission_id,
user_id=user_id
)
else:
raise PyHuskyError("Error! Role Name or Role ID Required 'PyHusky_Model:remove_user_permission'")
def delete_role(self, role_name, role_id=False):
"""Delete a Role
Args:
role_name: Role Name
role_id: Role ID
"""
if role_id != False:
query="DELETE FROM {roles_table} WHERE id={role_id}".format(
roles_table=self._tables['prefix'] + self._tables['roles_table'],
role_id=role_id
)
elif role_name != False:
query="DELETE FROM {roles_table} WHERE name={role_name}".format(
roles_table=self._tables['prefix'] + self._tables['roles_table'],
role_name=role_name
)
else:
raise PyHuskyError("Error! Invalid Method Parameters Submitted 'PyHusky_Model:delete_role'")
def delete_permission(self, permission_name, permission_id=False):
"""Delete a Permission
Args:
permission_name: Permission Name
permission_id: Permission ID
"""
if permission_id != False:
query="DELETE FROM {permissions_table} WHERE id={permission_id}".format(
permissions_table=self._tables['prefix'] + self._tables['permissions_table'],
permission_id=permission_id
)
elif permission_name != False:
query="DELETE FROM {permissions_table} WHERE name={permission_name}".format(
permissions_table=self._tables['prefix'] + self._tables['permissions_table'],
permission_name=permission_name
)
else:
raise PyHuskyError("Error! Invalid Method Parameters Submitted 'PyHusky_Model:delete_permission'")
def _table_exists(self, table_name):
"""Check if Tables Exist
Args:
table_name: a table name to check
"""
with self._connection.cursor() as cursor:
cursor.execute("SHOW TABLES LIKE '" + table_name +"';")
self._connection.commit()
for row in cursor:
return table_name in row.values()
def _connect(self):
"""Connect to Database"""
try:
self._connection = pymysql.connect(host=self._db['host'], user=self._db['username'], password=self._db['password'], db=self._db['database'], charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor)
except Exception as e:
raise PyHuskyError("Error! Cann't Connect to Database '%s'" % self._db['database'])
def close(self):
"""Close Database Connection"""
self._connection.close()
def _query(self, query):
"""Run MySQL Query
Args:
query: MySQL query to execute
"""
with self._connection.cursor() as cursor:
cursor.execute(query)
self._connection.commit()
class SQLLiteModel(object):
"""SQLLite Model Module"""
pass
class PostgreSQLModel(object):
"""PostgreSQL Model Module"""
pass
| |
from functools import partial
from inspect import signature
from itertools import product
from itertools import chain
from itertools import permutations
import numpy as np
import scipy.sparse as sp
import pytest
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import _num_samples
from sklearn.utils.validation import check_random_state
from sklearn.utils import shuffle
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_less
from sklearn.utils._testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import brier_score_loss
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import coverage_error
from sklearn.metrics import det_curve
from sklearn.metrics import explained_variance_score
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_score
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import log_loss
from sklearn.metrics import max_error
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_absolute_percentage_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_tweedie_deviance
from sklearn.metrics import mean_poisson_deviance
from sklearn.metrics import mean_gamma_deviance
from sklearn.metrics import median_absolute_error
from sklearn.metrics import multilabel_confusion_matrix
from sklearn.metrics import mean_pinball_loss
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import precision_score
from sklearn.metrics import r2_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import zero_one_loss
from sklearn.metrics import ndcg_score
from sklearn.metrics import dcg_score
from sklearn.metrics import top_k_accuracy_score
from sklearn.metrics._base import _average_binary_score
# Note toward developers about metric testing
# -------------------------------------------
# It is often possible to write one general test for several metrics:
#
# - invariance properties, e.g. invariance to sample order
# - common behavior for an argument, e.g. the "normalize" with value True
# will return the mean of the metrics and with value False will return
# the sum of the metrics.
#
# In order to improve the overall metric testing, it is a good idea to write
# first a specific test for the given metric and then add a general test for
# all metrics that have the same behavior.
#
# Two types of datastructures are used in order to implement this system:
# dictionaries of metrics and lists of metrics wit common properties.
#
# Dictionaries of metrics
# ------------------------
# The goal of having those dictionaries is to have an easy way to call a
# particular metric and associate a name to each function:
#
# - REGRESSION_METRICS: all regression metrics.
# - CLASSIFICATION_METRICS: all classification metrics
# which compare a ground truth and the estimated targets as returned by a
# classifier.
# - THRESHOLDED_METRICS: all classification metrics which
# compare a ground truth and a score, e.g. estimated probabilities or
# decision function (format might vary)
#
# Those dictionaries will be used to test systematically some invariance
# properties, e.g. invariance toward several input layout.
#
REGRESSION_METRICS = {
"max_error": max_error,
"mean_absolute_error": mean_absolute_error,
"mean_squared_error": mean_squared_error,
"mean_pinball_loss": mean_pinball_loss,
"median_absolute_error": median_absolute_error,
"mean_absolute_percentage_error": mean_absolute_percentage_error,
"explained_variance_score": explained_variance_score,
"r2_score": partial(r2_score, multioutput='variance_weighted'),
"mean_normal_deviance": partial(mean_tweedie_deviance, power=0),
"mean_poisson_deviance": mean_poisson_deviance,
"mean_gamma_deviance": mean_gamma_deviance,
"mean_compound_poisson_deviance":
partial(mean_tweedie_deviance, power=1.4),
}
CLASSIFICATION_METRICS = {
"accuracy_score": accuracy_score,
"balanced_accuracy_score": balanced_accuracy_score,
"adjusted_balanced_accuracy_score": partial(balanced_accuracy_score,
adjusted=True),
"unnormalized_accuracy_score": partial(accuracy_score, normalize=False),
# `confusion_matrix` returns absolute values and hence behaves unnormalized
# . Naming it with an unnormalized_ prefix is necessary for this module to
# skip sample_weight scaling checks which will fail for unnormalized
# metrics.
"unnormalized_confusion_matrix": confusion_matrix,
"normalized_confusion_matrix": lambda *args, **kwargs: (
confusion_matrix(*args, **kwargs).astype('float') / confusion_matrix(
*args, **kwargs).sum(axis=1)[:, np.newaxis]
),
"unnormalized_multilabel_confusion_matrix": multilabel_confusion_matrix,
"unnormalized_multilabel_confusion_matrix_sample":
partial(multilabel_confusion_matrix, samplewise=True),
"hamming_loss": hamming_loss,
"zero_one_loss": zero_one_loss,
"unnormalized_zero_one_loss": partial(zero_one_loss, normalize=False),
# These are needed to test averaging
"jaccard_score": jaccard_score,
"precision_score": precision_score,
"recall_score": recall_score,
"f1_score": f1_score,
"f2_score": partial(fbeta_score, beta=2),
"f0.5_score": partial(fbeta_score, beta=0.5),
"matthews_corrcoef_score": matthews_corrcoef,
"weighted_f0.5_score": partial(fbeta_score, average="weighted", beta=0.5),
"weighted_f1_score": partial(f1_score, average="weighted"),
"weighted_f2_score": partial(fbeta_score, average="weighted", beta=2),
"weighted_precision_score": partial(precision_score, average="weighted"),
"weighted_recall_score": partial(recall_score, average="weighted"),
"weighted_jaccard_score": partial(jaccard_score, average="weighted"),
"micro_f0.5_score": partial(fbeta_score, average="micro", beta=0.5),
"micro_f1_score": partial(f1_score, average="micro"),
"micro_f2_score": partial(fbeta_score, average="micro", beta=2),
"micro_precision_score": partial(precision_score, average="micro"),
"micro_recall_score": partial(recall_score, average="micro"),
"micro_jaccard_score": partial(jaccard_score, average="micro"),
"macro_f0.5_score": partial(fbeta_score, average="macro", beta=0.5),
"macro_f1_score": partial(f1_score, average="macro"),
"macro_f2_score": partial(fbeta_score, average="macro", beta=2),
"macro_precision_score": partial(precision_score, average="macro"),
"macro_recall_score": partial(recall_score, average="macro"),
"macro_jaccard_score": partial(jaccard_score, average="macro"),
"samples_f0.5_score": partial(fbeta_score, average="samples", beta=0.5),
"samples_f1_score": partial(f1_score, average="samples"),
"samples_f2_score": partial(fbeta_score, average="samples", beta=2),
"samples_precision_score": partial(precision_score, average="samples"),
"samples_recall_score": partial(recall_score, average="samples"),
"samples_jaccard_score": partial(jaccard_score, average="samples"),
"cohen_kappa_score": cohen_kappa_score,
}
def precision_recall_curve_padded_thresholds(*args, **kwargs):
"""
The dimensions of precision-recall pairs and the threshold array as
returned by the precision_recall_curve do not match. See
func:`sklearn.metrics.precision_recall_curve`
This prevents implicit conversion of return value triple to an higher
dimensional np.array of dtype('float64') (it will be of dtype('object)
instead). This again is needed for assert_array_equal to work correctly.
As a workaround we pad the threshold array with NaN values to match
the dimension of precision and recall arrays respectively.
"""
precision, recall, thresholds = precision_recall_curve(*args, **kwargs)
pad_threshholds = len(precision) - len(thresholds)
return np.array([
precision,
recall,
np.pad(thresholds.astype(np.float64),
pad_width=(0, pad_threshholds),
mode='constant',
constant_values=[np.nan])
])
CURVE_METRICS = {
"roc_curve": roc_curve,
"precision_recall_curve": precision_recall_curve_padded_thresholds,
"det_curve": det_curve,
}
THRESHOLDED_METRICS = {
"coverage_error": coverage_error,
"label_ranking_loss": label_ranking_loss,
"log_loss": log_loss,
"unnormalized_log_loss": partial(log_loss, normalize=False),
"hinge_loss": hinge_loss,
"brier_score_loss": brier_score_loss,
"roc_auc_score": roc_auc_score, # default: average="macro"
"weighted_roc_auc": partial(roc_auc_score, average="weighted"),
"samples_roc_auc": partial(roc_auc_score, average="samples"),
"micro_roc_auc": partial(roc_auc_score, average="micro"),
"ovr_roc_auc": partial(roc_auc_score, average="macro", multi_class='ovr'),
"weighted_ovr_roc_auc": partial(roc_auc_score, average="weighted",
multi_class='ovr'),
"ovo_roc_auc": partial(roc_auc_score, average="macro", multi_class='ovo'),
"weighted_ovo_roc_auc": partial(roc_auc_score, average="weighted",
multi_class='ovo'),
"partial_roc_auc": partial(roc_auc_score, max_fpr=0.5),
"average_precision_score":
average_precision_score, # default: average="macro"
"weighted_average_precision_score":
partial(average_precision_score, average="weighted"),
"samples_average_precision_score":
partial(average_precision_score, average="samples"),
"micro_average_precision_score":
partial(average_precision_score, average="micro"),
"label_ranking_average_precision_score":
label_ranking_average_precision_score,
"ndcg_score": ndcg_score,
"dcg_score": dcg_score,
"top_k_accuracy_score": top_k_accuracy_score
}
ALL_METRICS = dict()
ALL_METRICS.update(THRESHOLDED_METRICS)
ALL_METRICS.update(CLASSIFICATION_METRICS)
ALL_METRICS.update(REGRESSION_METRICS)
ALL_METRICS.update(CURVE_METRICS)
# Lists of metrics with common properties
# ---------------------------------------
# Lists of metrics with common properties are used to test systematically some
# functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics that
# are symmetric with respect to their input argument y_true and y_pred.
#
# When you add a new metric or functionality, check if a general test
# is already written.
# Those metrics don't support binary inputs
METRIC_UNDEFINED_BINARY = {
"samples_f0.5_score",
"samples_f1_score",
"samples_f2_score",
"samples_precision_score",
"samples_recall_score",
"samples_jaccard_score",
"coverage_error",
"unnormalized_multilabel_confusion_matrix_sample",
"label_ranking_loss",
"label_ranking_average_precision_score",
"dcg_score",
"ndcg_score"
}
# Those metrics don't support multiclass inputs
METRIC_UNDEFINED_MULTICLASS = {
"brier_score_loss",
"micro_roc_auc",
"samples_roc_auc",
"partial_roc_auc",
"roc_auc_score",
"weighted_roc_auc",
"average_precision_score",
"weighted_average_precision_score",
"micro_average_precision_score",
"samples_average_precision_score",
"jaccard_score",
# with default average='binary', multiclass is prohibited
"precision_score",
"recall_score",
"f1_score",
"f2_score",
"f0.5_score",
# curves
"roc_curve",
"precision_recall_curve",
"det_curve",
}
# Metric undefined with "binary" or "multiclass" input
METRIC_UNDEFINED_BINARY_MULTICLASS = METRIC_UNDEFINED_BINARY.union(
METRIC_UNDEFINED_MULTICLASS)
# Metrics with an "average" argument
METRICS_WITH_AVERAGING = {
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"jaccard_score"
}
# Threshold-based metrics with an "average" argument
THRESHOLDED_METRICS_WITH_AVERAGING = {
"roc_auc_score", "average_precision_score", "partial_roc_auc",
}
# Metrics with a "pos_label" argument
METRICS_WITH_POS_LABEL = {
"roc_curve",
"precision_recall_curve",
"det_curve",
"brier_score_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"jaccard_score",
"average_precision_score",
"weighted_average_precision_score",
"micro_average_precision_score",
"samples_average_precision_score",
}
# Metrics with a "labels" argument
# TODO: Handle multi_class metrics that has a labels argument as well as a
# decision function argument. e.g hinge_loss
METRICS_WITH_LABELS = {
"unnormalized_confusion_matrix",
"normalized_confusion_matrix",
"roc_curve",
"precision_recall_curve",
"det_curve",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"jaccard_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"weighted_jaccard_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"micro_jaccard_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"macro_jaccard_score",
"unnormalized_multilabel_confusion_matrix",
"unnormalized_multilabel_confusion_matrix_sample",
"cohen_kappa_score",
}
# Metrics with a "normalize" option
METRICS_WITH_NORMALIZE_OPTION = {
"accuracy_score",
"top_k_accuracy_score",
"zero_one_loss",
}
# Threshold-based metrics with "multilabel-indicator" format support
THRESHOLDED_MULTILABEL_METRICS = {
"log_loss",
"unnormalized_log_loss",
"roc_auc_score", "weighted_roc_auc", "samples_roc_auc",
"micro_roc_auc", "partial_roc_auc",
"average_precision_score", "weighted_average_precision_score",
"samples_average_precision_score", "micro_average_precision_score",
"coverage_error", "label_ranking_loss",
"ndcg_score",
"dcg_score",
"label_ranking_average_precision_score",
}
# Classification metrics with "multilabel-indicator" format
MULTILABELS_METRICS = {
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"zero_one_loss", "unnormalized_zero_one_loss",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"weighted_jaccard_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"macro_jaccard_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"micro_jaccard_score",
"unnormalized_multilabel_confusion_matrix",
"samples_f0.5_score", "samples_f1_score", "samples_f2_score",
"samples_precision_score", "samples_recall_score",
"samples_jaccard_score",
}
# Regression metrics with "multioutput-continuous" format support
MULTIOUTPUT_METRICS = {
"mean_absolute_error", "median_absolute_error", "mean_squared_error",
"r2_score", "explained_variance_score", "mean_absolute_percentage_error",
"mean_pinball_loss"
}
# Symmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) == metric(y_pred, y_true).
SYMMETRIC_METRICS = {
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"zero_one_loss", "unnormalized_zero_one_loss",
"micro_jaccard_score", "macro_jaccard_score",
"jaccard_score",
"samples_jaccard_score",
"f1_score", "micro_f1_score", "macro_f1_score",
"weighted_recall_score",
# P = R = F = accuracy in multiclass case
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"matthews_corrcoef_score", "mean_absolute_error", "mean_squared_error",
"median_absolute_error", "max_error",
# Pinball loss is only symmetric for alpha=0.5 which is the default.
"mean_pinball_loss",
"cohen_kappa_score", "mean_normal_deviance"
}
# Asymmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) != metric(y_pred, y_true).
NOT_SYMMETRIC_METRICS = {
"balanced_accuracy_score",
"adjusted_balanced_accuracy_score",
"explained_variance_score",
"r2_score",
"unnormalized_confusion_matrix",
"normalized_confusion_matrix",
"roc_curve",
"precision_recall_curve",
"det_curve",
"precision_score", "recall_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_jaccard_score",
"unnormalized_multilabel_confusion_matrix",
"macro_f0.5_score", "macro_f2_score", "macro_precision_score",
"macro_recall_score", "log_loss", "hinge_loss",
"mean_gamma_deviance", "mean_poisson_deviance",
"mean_compound_poisson_deviance", "mean_absolute_percentage_error"
}
# No Sample weight support
METRICS_WITHOUT_SAMPLE_WEIGHT = {
"median_absolute_error",
"max_error",
"ovo_roc_auc",
"weighted_ovo_roc_auc"
}
METRICS_REQUIRE_POSITIVE_Y = {
"mean_poisson_deviance",
"mean_gamma_deviance",
"mean_compound_poisson_deviance",
}
def _require_positive_targets(y1, y2):
"""Make targets strictly positive"""
offset = abs(min(y1.min(), y2.min())) + 1
y1 += offset
y2 += offset
return y1, y2
def test_symmetry_consistency():
# We shouldn't forget any metrics
assert ((SYMMETRIC_METRICS | NOT_SYMMETRIC_METRICS |
set(THRESHOLDED_METRICS) | METRIC_UNDEFINED_BINARY_MULTICLASS) ==
set(ALL_METRICS))
assert (SYMMETRIC_METRICS & NOT_SYMMETRIC_METRICS) == set()
@pytest.mark.parametrize("name", sorted(SYMMETRIC_METRICS))
def test_symmetric_metric(name):
# Test the symmetry of score and loss functions
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
if name in METRICS_REQUIRE_POSITIVE_Y:
y_true, y_pred = _require_positive_targets(y_true, y_pred)
y_true_bin = random_state.randint(0, 2, size=(20, 25))
y_pred_bin = random_state.randint(0, 2, size=(20, 25))
metric = ALL_METRICS[name]
if name in METRIC_UNDEFINED_BINARY:
if name in MULTILABELS_METRICS:
assert_allclose(metric(y_true_bin, y_pred_bin),
metric(y_pred_bin, y_true_bin),
err_msg="%s is not symmetric" % name)
else:
assert False, "This case is currently unhandled"
else:
assert_allclose(metric(y_true, y_pred),
metric(y_pred, y_true),
err_msg="%s is not symmetric" % name)
@pytest.mark.parametrize("name", sorted(NOT_SYMMETRIC_METRICS))
def test_not_symmetric_metric(name):
# Test the symmetry of score and loss functions
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
if name in METRICS_REQUIRE_POSITIVE_Y:
y_true, y_pred = _require_positive_targets(y_true, y_pred)
metric = ALL_METRICS[name]
# use context manager to supply custom error message
with pytest.raises(AssertionError):
assert_array_equal(metric(y_true, y_pred), metric(y_pred, y_true))
raise ValueError("%s seems to be symmetric" % name)
@pytest.mark.parametrize(
'name',
sorted(set(ALL_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS))
def test_sample_order_invariance(name):
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
if name in METRICS_REQUIRE_POSITIVE_Y:
y_true, y_pred = _require_positive_targets(y_true, y_pred)
y_true_shuffle, y_pred_shuffle = shuffle(y_true, y_pred, random_state=0)
with ignore_warnings():
metric = ALL_METRICS[name]
assert_allclose(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant" % name)
@ignore_warnings
def test_sample_order_invariance_multilabel_and_multioutput():
random_state = check_random_state(0)
# Generate some data
y_true = random_state.randint(0, 2, size=(20, 25))
y_pred = random_state.randint(0, 2, size=(20, 25))
y_score = random_state.normal(size=y_true.shape)
y_true_shuffle, y_pred_shuffle, y_score_shuffle = shuffle(y_true,
y_pred,
y_score,
random_state=0)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
assert_allclose(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant" % name)
for name in THRESHOLDED_MULTILABEL_METRICS:
metric = ALL_METRICS[name]
assert_allclose(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant" % name)
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_allclose(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant" % name)
assert_allclose(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant" % name)
@pytest.mark.parametrize(
'name',
sorted(set(ALL_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS))
def test_format_invariance_with_1d_vectors(name):
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
if name in METRICS_REQUIRE_POSITIVE_Y:
y1, y2 = _require_positive_targets(y1, y2)
y1_list = list(y1)
y2_list = list(y2)
y1_1d, y2_1d = np.array(y1), np.array(y2)
assert_array_equal(y1_1d.ndim, 1)
assert_array_equal(y2_1d.ndim, 1)
y1_column = np.reshape(y1_1d, (-1, 1))
y2_column = np.reshape(y2_1d, (-1, 1))
y1_row = np.reshape(y1_1d, (1, -1))
y2_row = np.reshape(y2_1d, (1, -1))
with ignore_warnings():
metric = ALL_METRICS[name]
measure = metric(y1, y2)
assert_allclose(metric(y1_list, y2_list), measure,
err_msg="%s is not representation invariant with list"
"" % name)
assert_allclose(metric(y1_1d, y2_1d), measure,
err_msg="%s is not representation invariant with "
"np-array-1d" % name)
assert_allclose(metric(y1_column, y2_column), measure,
err_msg="%s is not representation invariant with "
"np-array-column" % name)
# Mix format support
assert_allclose(metric(y1_1d, y2_list), measure,
err_msg="%s is not representation invariant with mix "
"np-array-1d and list" % name)
assert_allclose(metric(y1_list, y2_1d), measure,
err_msg="%s is not representation invariant with mix "
"np-array-1d and list" % name)
assert_allclose(metric(y1_1d, y2_column), measure,
err_msg="%s is not representation invariant with mix "
"np-array-1d and np-array-column" % name)
assert_allclose(metric(y1_column, y2_1d), measure,
err_msg="%s is not representation invariant with mix "
"np-array-1d and np-array-column" % name)
assert_allclose(metric(y1_list, y2_column), measure,
err_msg="%s is not representation invariant with mix "
"list and np-array-column" % name)
assert_allclose(metric(y1_column, y2_list), measure,
err_msg="%s is not representation invariant with mix "
"list and np-array-column" % name)
# These mix representations aren't allowed
with pytest.raises(ValueError):
metric(y1_1d, y2_row)
with pytest.raises(ValueError):
metric(y1_row, y2_1d)
with pytest.raises(ValueError):
metric(y1_list, y2_row)
with pytest.raises(ValueError):
metric(y1_row, y2_list)
with pytest.raises(ValueError):
metric(y1_column, y2_row)
with pytest.raises(ValueError):
metric(y1_row, y2_column)
# NB: We do not test for y1_row, y2_row as these may be
# interpreted as multilabel or multioutput data.
if (name not in (MULTIOUTPUT_METRICS | THRESHOLDED_MULTILABEL_METRICS |
MULTILABELS_METRICS)):
with pytest.raises(ValueError):
metric(y1_row, y2_row)
@pytest.mark.parametrize(
'name',
sorted(set(CLASSIFICATION_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS))
def test_classification_invariance_string_vs_numbers_labels(name):
# Ensure that classification metrics with string labels are invariant
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_str = np.array(["eggs", "spam"])[y1]
y2_str = np.array(["eggs", "spam"])[y2]
pos_label_str = "spam"
labels_str = ["eggs", "spam"]
with ignore_warnings():
metric = CLASSIFICATION_METRICS[name]
measure_with_number = metric(y1, y2)
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number invariance "
"test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
if name in METRICS_WITH_LABELS:
metric_str = partial(metric_str, labels=labels_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string vs number "
"invariance test".format(name))
@pytest.mark.parametrize('name', THRESHOLDED_METRICS)
def test_thresholded_invariance_string_vs_numbers_labels(name):
# Ensure that thresholded metrics with string labels are invariant
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_str = np.array(["eggs", "spam"])[y1]
pos_label_str = "spam"
with ignore_warnings():
metric = THRESHOLDED_METRICS[name]
if name not in METRIC_UNDEFINED_BINARY:
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_number = metric(y1, y2)
measure_with_str = metric_str(y1_str, y2)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'), y2)
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
else:
# TODO those metrics doesn't support string label yet
with pytest.raises(ValueError):
metric(y1_str, y2)
with pytest.raises(ValueError):
metric(y1_str.astype('O'), y2)
invalids_nan_inf = [
([0, 1], [np.inf, np.inf]),
([0, 1], [np.nan, np.nan]),
([0, 1], [np.nan, np.inf]),
([0, 1], [np.inf, 1]),
([0, 1], [np.nan, 1]),
]
@pytest.mark.parametrize(
'metric',
chain(THRESHOLDED_METRICS.values(), REGRESSION_METRICS.values())
)
@pytest.mark.parametrize("y_true, y_score", invalids_nan_inf)
def test_regression_thresholded_inf_nan_input(metric, y_true, y_score):
with pytest.raises(ValueError, match="contains NaN, infinity"):
metric(y_true, y_score)
@pytest.mark.parametrize('metric', CLASSIFICATION_METRICS.values())
@pytest.mark.parametrize(
"y_true, y_score",
invalids_nan_inf +
# Add an additional case for classification only
# non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/6809
[([np.nan, 1, 2], [1, 2, 3])] # type: ignore
)
def test_classification_inf_nan_input(metric, y_true, y_score):
"""check that classification metrics raise a message mentioning the
occurrence of non-finite values in the target vectors."""
err_msg = "Input contains NaN, infinity or a value too large"
with pytest.raises(ValueError, match=err_msg):
metric(y_true, y_score)
@pytest.mark.parametrize('metric', CLASSIFICATION_METRICS.values())
def test_classification_binary_continuous_input(metric):
"""check that classification metrics raise a message of mixed type data
with continuous/binary target vectors."""
y_true, y_score = ['a', 'b', 'a'], [0.1, 0.2, 0.3]
err_msg = (
"Classification metrics can't handle a mix of binary and continuous "
"targets"
)
with pytest.raises(ValueError, match=err_msg):
metric(y_true, y_score)
@ignore_warnings
def check_single_sample(name):
# Non-regression test: scores should work with a single sample.
# This is important for leave-one-out cross validation.
# Score functions tested are those that formerly called np.squeeze,
# which turns an array of size 1 into a 0-d array (!).
metric = ALL_METRICS[name]
# assert that no exception is thrown
if name in METRICS_REQUIRE_POSITIVE_Y:
values = [1, 2]
else:
values = [0, 1]
for i, j in product(values, repeat=2):
metric([i], [j])
@ignore_warnings
def check_single_sample_multioutput(name):
metric = ALL_METRICS[name]
for i, j, k, l in product([0, 1], repeat=4):
metric(np.array([[i, j]]), np.array([[k, l]]))
@pytest.mark.parametrize(
'name',
sorted(
set(ALL_METRICS)
# Those metrics are not always defined with one sample
# or in multiclass classification
- METRIC_UNDEFINED_BINARY_MULTICLASS - set(THRESHOLDED_METRICS)))
def test_single_sample(name):
check_single_sample(name)
@pytest.mark.parametrize('name',
sorted(MULTIOUTPUT_METRICS | MULTILABELS_METRICS))
def test_single_sample_multioutput(name):
check_single_sample_multioutput(name)
@pytest.mark.parametrize('name', sorted(MULTIOUTPUT_METRICS))
def test_multioutput_number_of_output_differ(name):
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0], [1, 0], [0, 0]])
metric = ALL_METRICS[name]
with pytest.raises(ValueError):
metric(y_true, y_pred)
@pytest.mark.parametrize('name', sorted(MULTIOUTPUT_METRICS))
def test_multioutput_regression_invariance_to_dimension_shuffling(name):
# test invariance to dimension shuffling
random_state = check_random_state(0)
y_true = random_state.uniform(0, 2, size=(20, 5))
y_pred = random_state.uniform(0, 2, size=(20, 5))
metric = ALL_METRICS[name]
error = metric(y_true, y_pred)
for _ in range(3):
perm = random_state.permutation(y_true.shape[1])
assert_allclose(metric(y_true[:, perm], y_pred[:, perm]),
error,
err_msg="%s is not dimension shuffling invariant" % (
name))
@ignore_warnings
def test_multilabel_representation_invariance():
# Generate some data
n_classes = 4
n_samples = 50
_, y1 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=0, n_samples=n_samples,
allow_unlabeled=True)
_, y2 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=1, n_samples=n_samples,
allow_unlabeled=True)
# To make sure at least one empty label is present
y1 = np.vstack([y1, [[0] * n_classes]])
y2 = np.vstack([y2, [[0] * n_classes]])
y1_sparse_indicator = sp.coo_matrix(y1)
y2_sparse_indicator = sp.coo_matrix(y2)
y1_list_array_indicator = list(y1)
y2_list_array_indicator = list(y2)
y1_list_list_indicator = [list(a) for a in y1_list_array_indicator]
y2_list_list_indicator = [list(a) for a in y2_list_array_indicator]
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
# XXX cruel hack to work with partial functions
if isinstance(metric, partial):
metric.__module__ = 'tmp'
metric.__name__ = name
measure = metric(y1, y2)
# Check representation invariance
assert_allclose(metric(y1_sparse_indicator, y2_sparse_indicator),
measure,
err_msg="%s failed representation invariance between "
"dense and sparse indicator formats." % name)
assert_almost_equal(metric(y1_list_list_indicator,
y2_list_list_indicator),
measure,
err_msg="%s failed representation invariance "
"between dense array and list of list "
"indicator formats." % name)
assert_almost_equal(metric(y1_list_array_indicator,
y2_list_array_indicator),
measure,
err_msg="%s failed representation invariance "
"between dense and list of array "
"indicator formats." % name)
@pytest.mark.parametrize('name', sorted(MULTILABELS_METRICS))
def test_raise_value_error_multilabel_sequences(name):
# make sure the multilabel-sequence format raises ValueError
multilabel_sequences = [
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
[[]],
[()],
np.array([[], [1, 2]], dtype='object')]
metric = ALL_METRICS[name]
for seq in multilabel_sequences:
with pytest.raises(ValueError):
metric(seq, seq)
@pytest.mark.parametrize('name', sorted(METRICS_WITH_NORMALIZE_OPTION))
def test_normalize_option_binary_classification(name):
# Test in the binary case
n_classes = 2
n_samples = 20
random_state = check_random_state(0)
y_true = random_state.randint(0, n_classes, size=(n_samples, ))
y_pred = random_state.randint(0, n_classes, size=(n_samples, ))
y_score = random_state.normal(size=y_true.shape)
metrics = ALL_METRICS[name]
pred = y_score if name in THRESHOLDED_METRICS else y_pred
measure_normalized = metrics(y_true, pred, normalize=True)
measure_not_normalized = metrics(y_true, pred, normalize=False)
assert_array_less(-1.0 * measure_normalized, 0,
err_msg="We failed to test correctly the normalize "
"option")
assert_allclose(measure_normalized, measure_not_normalized / n_samples,
err_msg=f"Failed with {name}")
@pytest.mark.parametrize('name', sorted(METRICS_WITH_NORMALIZE_OPTION))
def test_normalize_option_multiclass_classification(name):
# Test in the multiclass case
n_classes = 4
n_samples = 20
random_state = check_random_state(0)
y_true = random_state.randint(0, n_classes, size=(n_samples, ))
y_pred = random_state.randint(0, n_classes, size=(n_samples, ))
y_score = random_state.uniform(size=(n_samples, n_classes))
metrics = ALL_METRICS[name]
pred = y_score if name in THRESHOLDED_METRICS else y_pred
measure_normalized = metrics(y_true, pred, normalize=True)
measure_not_normalized = metrics(y_true, pred, normalize=False)
assert_array_less(-1.0 * measure_normalized, 0,
err_msg="We failed to test correctly the normalize "
"option")
assert_allclose(measure_normalized, measure_not_normalized / n_samples,
err_msg=f"Failed with {name}")
@pytest.mark.parametrize('name', sorted(
METRICS_WITH_NORMALIZE_OPTION.intersection(MULTILABELS_METRICS)
))
def test_normalize_option_multilabel_classification(name):
# Test in the multilabel case
n_classes = 4
n_samples = 100
random_state = check_random_state(0)
# for both random_state 0 and 1, y_true and y_pred has at least one
# unlabelled entry
_, y_true = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=0,
allow_unlabeled=True,
n_samples=n_samples)
_, y_pred = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=1,
allow_unlabeled=True,
n_samples=n_samples)
y_score = random_state.uniform(size=y_true.shape)
# To make sure at least one empty label is present
y_true += [0]*n_classes
y_pred += [0]*n_classes
metrics = ALL_METRICS[name]
pred = y_score if name in THRESHOLDED_METRICS else y_pred
measure_normalized = metrics(y_true, pred, normalize=True)
measure_not_normalized = metrics(y_true, pred, normalize=False)
assert_array_less(-1.0 * measure_normalized, 0,
err_msg="We failed to test correctly the normalize "
"option")
assert_allclose(measure_normalized, measure_not_normalized / n_samples,
err_msg=f"Failed with {name}")
@ignore_warnings
def _check_averaging(metric, y_true, y_pred, y_true_binarize, y_pred_binarize,
is_multilabel):
n_samples, n_classes = y_true_binarize.shape
# No averaging
label_measure = metric(y_true, y_pred, average=None)
assert_allclose(label_measure,
[metric(y_true_binarize[:, i], y_pred_binarize[:, i])
for i in range(n_classes)])
# Micro measure
micro_measure = metric(y_true, y_pred, average="micro")
assert_allclose(micro_measure,
metric(y_true_binarize.ravel(), y_pred_binarize.ravel()))
# Macro measure
macro_measure = metric(y_true, y_pred, average="macro")
assert_allclose(macro_measure, np.mean(label_measure))
# Weighted measure
weights = np.sum(y_true_binarize, axis=0, dtype=int)
if np.sum(weights) != 0:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_allclose(weighted_measure,
np.average(label_measure, weights=weights))
else:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_allclose(weighted_measure, 0)
# Sample measure
if is_multilabel:
sample_measure = metric(y_true, y_pred, average="samples")
assert_allclose(sample_measure,
np.mean([metric(y_true_binarize[i], y_pred_binarize[i])
for i in range(n_samples)]))
with pytest.raises(ValueError):
metric(y_true, y_pred, average="unknown")
with pytest.raises(ValueError):
metric(y_true, y_pred, average="garbage")
def check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize,
y_score):
is_multilabel = type_of_target(y_true).startswith("multilabel")
metric = ALL_METRICS[name]
if name in METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel)
elif name in THRESHOLDED_METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_score, y_true_binarize,
y_score, is_multilabel)
else:
raise ValueError("Metric is not recorded as having an average option")
@pytest.mark.parametrize('name', sorted(METRICS_WITH_AVERAGING))
def test_averaging_multiclass(name):
n_samples, n_classes = 50, 3
random_state = check_random_state(0)
y_true = random_state.randint(0, n_classes, size=(n_samples, ))
y_pred = random_state.randint(0, n_classes, size=(n_samples, ))
y_score = random_state.uniform(size=(n_samples, n_classes))
lb = LabelBinarizer().fit(y_true)
y_true_binarize = lb.transform(y_true)
y_pred_binarize = lb.transform(y_pred)
check_averaging(name, y_true, y_true_binarize,
y_pred, y_pred_binarize, y_score)
@pytest.mark.parametrize(
'name',
sorted(METRICS_WITH_AVERAGING | THRESHOLDED_METRICS_WITH_AVERAGING))
def test_averaging_multilabel(name):
n_samples, n_classes = 40, 5
_, y = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=5, n_samples=n_samples,
allow_unlabeled=False)
y_true = y[:20]
y_pred = y[20:]
y_score = check_random_state(0).normal(size=(20, n_classes))
y_true_binarize = y_true
y_pred_binarize = y_pred
check_averaging(name, y_true, y_true_binarize,
y_pred, y_pred_binarize, y_score)
@pytest.mark.parametrize('name', sorted(METRICS_WITH_AVERAGING))
def test_averaging_multilabel_all_zeroes(name):
y_true = np.zeros((20, 3))
y_pred = np.zeros((20, 3))
y_score = np.zeros((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
check_averaging(name, y_true, y_true_binarize,
y_pred, y_pred_binarize, y_score)
def test_averaging_binary_multilabel_all_zeroes():
y_true = np.zeros((20, 3))
y_pred = np.zeros((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
# Test _average_binary_score for weight.sum() == 0
binary_metric = (lambda y_true, y_score, average="macro":
_average_binary_score(
precision_score, y_true, y_score, average))
_check_averaging(binary_metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel=True)
@pytest.mark.parametrize('name', sorted(METRICS_WITH_AVERAGING))
def test_averaging_multilabel_all_ones(name):
y_true = np.ones((20, 3))
y_pred = np.ones((20, 3))
y_score = np.ones((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
check_averaging(name, y_true, y_true_binarize,
y_pred, y_pred_binarize, y_score)
@ignore_warnings
def check_sample_weight_invariance(name, metric, y1, y2):
rng = np.random.RandomState(0)
sample_weight = rng.randint(1, 10, size=len(y1))
# top_k_accuracy_score always lead to a perfect score for k > 1 in the
# binary case
metric = partial(metric, k=1) if name == "top_k_accuracy_score" else metric
# check that unit weights gives the same score as no weight
unweighted_score = metric(y1, y2, sample_weight=None)
assert_allclose(
unweighted_score,
metric(y1, y2, sample_weight=np.ones(shape=len(y1))),
err_msg="For %s sample_weight=None is not equivalent to "
"sample_weight=ones" % name)
# check that the weighted and unweighted scores are unequal
weighted_score = metric(y1, y2, sample_weight=sample_weight)
# use context manager to supply custom error message
with pytest.raises(AssertionError):
assert_allclose(unweighted_score, weighted_score)
raise ValueError("Unweighted and weighted scores are unexpectedly "
"almost equal (%s) and (%s) "
"for %s" % (unweighted_score, weighted_score, name))
# check that sample_weight can be a list
weighted_score_list = metric(y1, y2,
sample_weight=sample_weight.tolist())
assert_allclose(
weighted_score, weighted_score_list,
err_msg=("Weighted scores for array and list "
"sample_weight input are not equal (%s != %s) for %s") % (
weighted_score, weighted_score_list, name))
# check that integer weights is the same as repeated samples
repeat_weighted_score = metric(
np.repeat(y1, sample_weight, axis=0),
np.repeat(y2, sample_weight, axis=0), sample_weight=None)
assert_allclose(
weighted_score, repeat_weighted_score,
err_msg="Weighting %s is not equal to repeating samples" % name)
# check that ignoring a fraction of the samples is equivalent to setting
# the corresponding weights to zero
sample_weight_subset = sample_weight[1::2]
sample_weight_zeroed = np.copy(sample_weight)
sample_weight_zeroed[::2] = 0
y1_subset = y1[1::2]
y2_subset = y2[1::2]
weighted_score_subset = metric(y1_subset, y2_subset,
sample_weight=sample_weight_subset)
weighted_score_zeroed = metric(y1, y2,
sample_weight=sample_weight_zeroed)
assert_allclose(
weighted_score_subset, weighted_score_zeroed,
err_msg=("Zeroing weights does not give the same result as "
"removing the corresponding samples (%s != %s) for %s" %
(weighted_score_zeroed, weighted_score_subset, name)))
if not name.startswith('unnormalized'):
# check that the score is invariant under scaling of the weights by a
# common factor
for scaling in [2, 0.3]:
assert_allclose(
weighted_score,
metric(y1, y2, sample_weight=sample_weight * scaling),
err_msg="%s sample_weight is not invariant "
"under scaling" % name)
# Check that if number of samples in y_true and sample_weight are not
# equal, meaningful error is raised.
error_message = (r"Found input variables with inconsistent numbers of "
r"samples: \[{}, {}, {}\]".format(
_num_samples(y1), _num_samples(y2),
_num_samples(sample_weight) * 2))
with pytest.raises(ValueError, match=error_message):
metric(y1, y2, sample_weight=np.hstack([sample_weight,
sample_weight]))
@pytest.mark.parametrize(
'name',
sorted(
set(ALL_METRICS).intersection(set(REGRESSION_METRICS)) -
METRICS_WITHOUT_SAMPLE_WEIGHT))
def test_regression_sample_weight_invariance(name):
n_samples = 50
random_state = check_random_state(0)
# regression
y_true = random_state.random_sample(size=(n_samples,))
y_pred = random_state.random_sample(size=(n_samples,))
metric = ALL_METRICS[name]
check_sample_weight_invariance(name, metric, y_true, y_pred)
@pytest.mark.parametrize(
'name',
sorted(
set(ALL_METRICS) - set(REGRESSION_METRICS) -
METRICS_WITHOUT_SAMPLE_WEIGHT - METRIC_UNDEFINED_BINARY))
def test_binary_sample_weight_invariance(name):
# binary
n_samples = 50
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples,))
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
check_sample_weight_invariance(name, metric, y_true, y_score)
else:
check_sample_weight_invariance(name, metric, y_true, y_pred)
@pytest.mark.parametrize(
'name',
sorted(
set(ALL_METRICS) - set(REGRESSION_METRICS) -
METRICS_WITHOUT_SAMPLE_WEIGHT - METRIC_UNDEFINED_BINARY_MULTICLASS))
def test_multiclass_sample_weight_invariance(name):
# multiclass
n_samples = 50
random_state = check_random_state(0)
y_true = random_state.randint(0, 5, size=(n_samples, ))
y_pred = random_state.randint(0, 5, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples, 5))
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
# softmax
temp = np.exp(-y_score)
y_score_norm = temp / temp.sum(axis=-1).reshape(-1, 1)
check_sample_weight_invariance(name, metric, y_true, y_score_norm)
else:
check_sample_weight_invariance(name, metric, y_true, y_pred)
@pytest.mark.parametrize(
'name',
sorted((MULTILABELS_METRICS | THRESHOLDED_MULTILABEL_METRICS
| MULTIOUTPUT_METRICS) - METRICS_WITHOUT_SAMPLE_WEIGHT))
def test_multilabel_sample_weight_invariance(name):
# multilabel indicator
random_state = check_random_state(0)
_, ya = make_multilabel_classification(n_features=1, n_classes=10,
random_state=0, n_samples=50,
allow_unlabeled=False)
_, yb = make_multilabel_classification(n_features=1, n_classes=10,
random_state=1, n_samples=50,
allow_unlabeled=False)
y_true = np.vstack([ya, yb])
y_pred = np.vstack([ya, ya])
y_score = random_state.randint(1, 4, size=y_true.shape)
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
check_sample_weight_invariance(name, metric, y_true, y_score)
else:
check_sample_weight_invariance(name, metric, y_true, y_pred)
@ignore_warnings
def test_no_averaging_labels():
# test labels argument when not using averaging
# in multi-class and multi-label cases
y_true_multilabel = np.array([[1, 1, 0, 0], [1, 1, 0, 0]])
y_pred_multilabel = np.array([[0, 0, 1, 1], [0, 1, 1, 0]])
y_true_multiclass = np.array([0, 1, 2])
y_pred_multiclass = np.array([0, 2, 3])
labels = np.array([3, 0, 1, 2])
_, inverse_labels = np.unique(labels, return_inverse=True)
for name in METRICS_WITH_AVERAGING:
for y_true, y_pred in [[y_true_multiclass, y_pred_multiclass],
[y_true_multilabel, y_pred_multilabel]]:
if name not in MULTILABELS_METRICS and y_pred.ndim > 1:
continue
metric = ALL_METRICS[name]
score_labels = metric(y_true, y_pred, labels=labels, average=None)
score = metric(y_true, y_pred, average=None)
assert_array_equal(score_labels, score[inverse_labels])
@pytest.mark.parametrize(
'name',
sorted(MULTILABELS_METRICS - {"unnormalized_multilabel_confusion_matrix"}))
def test_multilabel_label_permutations_invariance(name):
random_state = check_random_state(0)
n_samples, n_classes = 20, 4
y_true = random_state.randint(0, 2, size=(n_samples, n_classes))
y_score = random_state.randint(0, 2, size=(n_samples, n_classes))
metric = ALL_METRICS[name]
score = metric(y_true, y_score)
for perm in permutations(range(n_classes), n_classes):
y_score_perm = y_score[:, perm]
y_true_perm = y_true[:, perm]
current_score = metric(y_true_perm, y_score_perm)
assert_almost_equal(score, current_score)
@pytest.mark.parametrize(
'name', sorted(THRESHOLDED_MULTILABEL_METRICS | MULTIOUTPUT_METRICS))
def test_thresholded_multilabel_multioutput_permutations_invariance(name):
random_state = check_random_state(0)
n_samples, n_classes = 20, 4
y_true = random_state.randint(0, 2, size=(n_samples, n_classes))
y_score = random_state.normal(size=y_true.shape)
# Makes sure all samples have at least one label. This works around errors
# when running metrics where average="sample"
y_true[y_true.sum(1) == 4, 0] = 0
y_true[y_true.sum(1) == 0, 0] = 1
metric = ALL_METRICS[name]
score = metric(y_true, y_score)
for perm in permutations(range(n_classes), n_classes):
y_score_perm = y_score[:, perm]
y_true_perm = y_true[:, perm]
current_score = metric(y_true_perm, y_score_perm)
if metric == mean_absolute_percentage_error:
assert np.isfinite(current_score)
assert current_score > 1e6
# Here we are not comparing the values in case of MAPE because
# whenever y_true value is exactly zero, the MAPE value doesn't
# signify anything. Thus, in this case we are just expecting
# very large finite value.
else:
assert_almost_equal(score, current_score)
@pytest.mark.parametrize(
'name',
sorted(set(THRESHOLDED_METRICS) - METRIC_UNDEFINED_BINARY_MULTICLASS))
def test_thresholded_metric_permutation_invariance(name):
n_samples, n_classes = 100, 3
random_state = check_random_state(0)
y_score = random_state.rand(n_samples, n_classes)
temp = np.exp(-y_score)
y_score = temp / temp.sum(axis=-1).reshape(-1, 1)
y_true = random_state.randint(0, n_classes, size=n_samples)
metric = ALL_METRICS[name]
score = metric(y_true, y_score)
for perm in permutations(range(n_classes), n_classes):
inverse_perm = np.zeros(n_classes, dtype=int)
inverse_perm[list(perm)] = np.arange(n_classes)
y_score_perm = y_score[:, inverse_perm]
y_true_perm = np.take(perm, y_true)
current_score = metric(y_true_perm, y_score_perm)
assert_almost_equal(score, current_score)
@pytest.mark.parametrize("metric_name", CLASSIFICATION_METRICS)
def test_metrics_consistent_type_error(metric_name):
# check that an understable message is raised when the type between y_true
# and y_pred mismatch
rng = np.random.RandomState(42)
y1 = np.array(["spam"] * 3 + ["eggs"] * 2, dtype=object)
y2 = rng.randint(0, 2, size=y1.size)
err_msg = "Labels in y_true and y_pred should be of the same type."
with pytest.raises(TypeError, match=err_msg):
CLASSIFICATION_METRICS[metric_name](y1, y2)
@pytest.mark.parametrize(
"metric, y_pred_threshold",
[
(average_precision_score, True),
(brier_score_loss, True),
(f1_score, False),
(partial(fbeta_score, beta=1), False),
(jaccard_score, False),
(precision_recall_curve, True),
(precision_score, False),
(recall_score, False),
(roc_curve, True),
],
)
@pytest.mark.parametrize("dtype_y_str", [str, object])
def test_metrics_pos_label_error_str(metric, y_pred_threshold, dtype_y_str):
# check that the error message if `pos_label` is not specified and the
# targets is made of strings.
rng = np.random.RandomState(42)
y1 = np.array(["spam"] * 3 + ["eggs"] * 2, dtype=dtype_y_str)
y2 = rng.randint(0, 2, size=y1.size)
if not y_pred_threshold:
y2 = np.array(["spam", "eggs"], dtype=dtype_y_str)[y2]
err_msg_pos_label_None = (
"y_true takes value in {'eggs', 'spam'} and pos_label is not "
"specified: either make y_true take value in {0, 1} or {-1, 1} or "
"pass pos_label explicit"
)
err_msg_pos_label_1 = (
r"pos_label=1 is not a valid label. It should be one of "
r"\['eggs', 'spam'\]"
)
pos_label_default = signature(metric).parameters["pos_label"].default
err_msg = (
err_msg_pos_label_1
if pos_label_default == 1
else err_msg_pos_label_None
)
with pytest.raises(ValueError, match=err_msg):
metric(y1, y2)
| |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.core.urlresolvers import NoReverseMatch # noqa
from django.core.urlresolvers import reverse
from django.http import HttpResponse # noqa
from django.template import defaultfilters as filters
from django.utils import html
from django.utils.http import urlencode
from django.utils import safestring
from django.utils.translation import pgettext_lazy
from django.utils.translation import string_concat # noqa
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import exceptions
from horizon import messages
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard import policy
DELETABLE_STATES = ("available", "error", "error_extending")
class VolumePolicyTargetMixin(policy.PolicyTargetMixin):
policy_target_attrs = (("project_id", 'os-vol-tenant-attr:tenant_id'),)
class LaunchVolume(tables.LinkAction):
name = "launch_volume"
verbose_name = _("Launch as Instance")
url = "horizon:project:instances:launch"
classes = ("ajax-modal", "btn-launch")
icon = "cloud-upload"
policy_rules = (("compute", "compute:create"),)
def get_link_url(self, datum):
base_url = reverse(self.url)
vol_id = "%s:vol" % self.table.get_object_id(datum)
params = urlencode({"source_type": "volume_id",
"source_id": vol_id})
return "?".join([base_url, params])
def allowed(self, request, volume=None):
if getattr(volume, 'bootable', '') == 'true':
return volume.status == "available"
return False
class LaunchVolumeNG(LaunchVolume):
name = "launch_volume_ng"
verbose_name = _("Launch as Instance")
url = "horizon:project:volumes:index"
classes = ("btn-launch", )
ajax = False
def __init__(self, attrs=None, **kwargs):
kwargs['preempt'] = True
super(LaunchVolume, self).__init__(attrs, **kwargs)
def get_link_url(self, datum):
url = reverse(self.url)
vol_id = "%s:vol" % self.table.get_object_id(datum)
ngclick = "modal.openLaunchInstanceWizard(" \
"{successUrl: '%s', volumeId: '%s'})" \
% (url, vol_id.split(":vol")[0])
self.attrs.update({
"ng-controller": "LaunchInstanceModalController as modal",
"ng-click": ngclick
})
return "javascript:void(0);"
class DeleteVolume(VolumePolicyTargetMixin, tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Volume",
u"Delete Volumes",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of Volume",
u"Scheduled deletion of Volumes",
count
)
policy_rules = (("volume", "volume:delete"),)
def delete(self, request, obj_id):
cinder.volume_delete(request, obj_id)
def allowed(self, request, volume=None):
if volume:
return (volume.status in DELETABLE_STATES and
not getattr(volume, 'has_snapshot', False))
return True
class CreateVolume(tables.LinkAction):
name = "create"
verbose_name = _("Create Volume")
url = "horizon:project:volumes:volumes:create"
classes = ("ajax-modal", "btn-create")
icon = "plus"
policy_rules = (("volume", "volume:create"),)
ajax = True
def __init__(self, attrs=None, **kwargs):
kwargs['preempt'] = True
super(CreateVolume, self).__init__(attrs, **kwargs)
def allowed(self, request, volume=None):
limits = api.cinder.tenant_absolute_limits(request)
gb_available = (limits.get('maxTotalVolumeGigabytes', float("inf"))
- limits.get('totalGigabytesUsed', 0))
volumes_available = (limits.get('maxTotalVolumes', float("inf"))
- limits.get('totalVolumesUsed', 0))
if gb_available <= 0 or volumes_available <= 0:
if "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ['disabled']
self.verbose_name = string_concat(self.verbose_name, ' ',
_("(Quota exceeded)"))
else:
self.verbose_name = _("Create Volume")
classes = [c for c in self.classes if c != "disabled"]
self.classes = classes
return True
def single(self, table, request, object_id=None):
self.allowed(request, None)
return HttpResponse(self.render())
class ExtendVolume(VolumePolicyTargetMixin, tables.LinkAction):
name = "extend"
verbose_name = _("Extend Volume")
url = "horizon:project:volumes:volumes:extend"
classes = ("ajax-modal", "btn-extend")
policy_rules = (("volume", "volume:extend"),)
def allowed(self, request, volume=None):
return volume.status == "available"
class EditAttachments(tables.LinkAction):
name = "attachments"
verbose_name = _("Manage Attachments")
url = "horizon:project:volumes:volumes:attach"
classes = ("ajax-modal",)
icon = "pencil"
def allowed(self, request, volume=None):
if volume:
project_id = getattr(volume, "os-vol-tenant-attr:tenant_id", None)
attach_allowed = \
policy.check((("compute", "compute:attach_volume"),),
request,
{"project_id": project_id})
detach_allowed = \
policy.check((("compute", "compute:detach_volume"),),
request,
{"project_id": project_id})
if attach_allowed or detach_allowed:
return volume.status in ("available", "in-use")
return False
class CreateSnapshot(VolumePolicyTargetMixin, tables.LinkAction):
name = "snapshots"
verbose_name = _("Create Snapshot")
url = "horizon:project:volumes:volumes:create_snapshot"
classes = ("ajax-modal",)
icon = "camera"
policy_rules = (("volume", "volume:create_snapshot"),)
def allowed(self, request, volume=None):
try:
limits = api.cinder.tenant_absolute_limits(request)
except Exception:
exceptions.handle(request, _('Unable to retrieve tenant limits.'))
limits = {}
snapshots_available = (limits.get('maxTotalSnapshots', float("inf"))
- limits.get('totalSnapshotsUsed', 0))
if snapshots_available <= 0 and "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ['disabled']
self.verbose_name = string_concat(self.verbose_name, ' ',
_("(Quota exceeded)"))
return volume.status in ("available", "in-use")
class CreateTransfer(VolumePolicyTargetMixin, tables.LinkAction):
name = "create_transfer"
verbose_name = _("Create Transfer")
url = "horizon:project:volumes:volumes:create_transfer"
classes = ("ajax-modal",)
policy_rules = (("volume", "volume:create_transfer"),)
def allowed(self, request, volume=None):
return volume.status == "available"
class CreateBackup(VolumePolicyTargetMixin, tables.LinkAction):
name = "backups"
verbose_name = _("Create Backup")
url = "horizon:project:volumes:volumes:create_backup"
classes = ("ajax-modal",)
policy_rules = (("volume", "backup:create"),)
def allowed(self, request, volume=None):
return (cinder.volume_backup_supported(request) and
volume.status == "available")
class UploadToImage(VolumePolicyTargetMixin, tables.LinkAction):
name = "upload_to_image"
verbose_name = _("Upload to Image")
url = "horizon:project:volumes:volumes:upload_to_image"
classes = ("ajax-modal",)
icon = "cloud-upload"
policy_rules = (("volume", "volume:upload_to_image"),)
def allowed(self, request, volume=None):
has_image_service_perm = \
request.user.has_perm('openstack.services.image')
return (volume.status in ("available", "in-use") and
has_image_service_perm)
class EditVolume(VolumePolicyTargetMixin, tables.LinkAction):
name = "edit"
verbose_name = _("Edit Volume")
url = "horizon:project:volumes:volumes:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("volume", "volume:update"),)
def allowed(self, request, volume=None):
return volume.status in ("available", "in-use")
class RetypeVolume(VolumePolicyTargetMixin, tables.LinkAction):
name = "retype"
verbose_name = _("Change Volume Type")
url = "horizon:project:volumes:volumes:retype"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("volume", "volume:retype"),)
def allowed(self, request, volume=None):
return volume.status in ("available", "in-use")
class AcceptTransfer(tables.LinkAction):
name = "accept_transfer"
verbose_name = _("Accept Transfer")
url = "horizon:project:volumes:volumes:accept_transfer"
classes = ("ajax-modal",)
icon = "exchange"
policy_rules = (("volume", "volume:accept_transfer"),)
ajax = True
def single(self, table, request, object_id=None):
return HttpResponse(self.render())
class DeleteTransfer(VolumePolicyTargetMixin, tables.Action):
# This class inherits from tables.Action instead of the more obvious
# tables.DeleteAction due to the confirmation message. When the delete
# is successful, DeleteAction automatically appends the name of the
# volume to the message, e.g. "Deleted volume transfer 'volume'". But
# we are deleting the volume *transfer*, whose name is different.
name = "delete_transfer"
verbose_name = _("Cancel Transfer")
policy_rules = (("volume", "volume:delete_transfer"),)
classes = ('btn-danger',)
help_text = _("This action cannot be undone.")
def allowed(self, request, volume):
return (volume.status == "awaiting-transfer" and
getattr(volume, 'transfer', None))
def single(self, table, request, volume_id):
volume = table.get_object_by_id(volume_id)
try:
cinder.transfer_delete(request, volume.transfer.id)
if volume.transfer.name:
msg = _('Successfully deleted volume transfer "%s"'
) % volume.transfer.name
else:
msg = _("Successfully deleted volume transfer")
messages.success(request, msg)
except Exception:
exceptions.handle(request, _("Unable to delete volume transfer."))
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, volume_id):
volume = cinder.volume_get(request, volume_id)
return volume
def get_size(volume):
return _("%sGiB") % volume.size
def get_attachment_name(request, attachment):
server_id = attachment.get("server_id", None)
if "instance" in attachment and attachment['instance']:
name = attachment["instance"].name
else:
try:
server = api.nova.server_get(request, server_id)
name = server.name
except Exception:
name = None
exceptions.handle(request, _("Unable to retrieve "
"attachment information."))
try:
url = reverse("horizon:project:instances:detail", args=(server_id,))
instance = '<a href="%s">%s</a>' % (url, html.escape(name))
except NoReverseMatch:
instance = html.escape(name)
return instance
class AttachmentColumn(tables.Column):
"""Customized column class.
So it that does complex processing on the attachments
for a volume instance.
"""
def get_raw_data(self, volume):
request = self.table.request
link = _('Attached to %(instance)s on %(dev)s')
attachments = []
# Filter out "empty" attachments which the client returns...
for attachment in [att for att in volume.attachments if att]:
# When a volume is attached it may return the server_id
# without the server name...
instance = get_attachment_name(request, attachment)
vals = {"instance": instance,
"dev": html.escape(attachment.get("device", ""))}
attachments.append(link % vals)
return safestring.mark_safe(", ".join(attachments))
def get_volume_type(volume):
return volume.volume_type if volume.volume_type != "None" else None
def get_encrypted_value(volume):
if not hasattr(volume, 'encrypted') or volume.encrypted is None:
return _("-")
elif volume.encrypted is False:
return _("No")
else:
return _("Yes")
class VolumesTableBase(tables.DataTable):
STATUS_CHOICES = (
("in-use", True),
("available", True),
("creating", None),
("error", False),
("error_extending", False),
("maintenance", False),
)
STATUS_DISPLAY_CHOICES = (
("available", pgettext_lazy("Current status of a Volume",
u"Available")),
("in-use", pgettext_lazy("Current status of a Volume", u"In-use")),
("error", pgettext_lazy("Current status of a Volume", u"Error")),
("creating", pgettext_lazy("Current status of a Volume",
u"Creating")),
("error_extending", pgettext_lazy("Current status of a Volume",
u"Error Extending")),
("extending", pgettext_lazy("Current status of a Volume",
u"Extending")),
("attaching", pgettext_lazy("Current status of a Volume",
u"Attaching")),
("detaching", pgettext_lazy("Current status of a Volume",
u"Detaching")),
("deleting", pgettext_lazy("Current status of a Volume",
u"Deleting")),
("error_deleting", pgettext_lazy("Current status of a Volume",
u"Error deleting")),
("backing-up", pgettext_lazy("Current status of a Volume",
u"Backing Up")),
("restoring-backup", pgettext_lazy("Current status of a Volume",
u"Restoring Backup")),
("error_restoring", pgettext_lazy("Current status of a Volume",
u"Error Restoring")),
("maintenance", pgettext_lazy("Current status of a Volume",
u"Maintenance")),
)
name = tables.Column("name",
verbose_name=_("Name"),
link="horizon:project:volumes:volumes:detail")
description = tables.Column("description",
verbose_name=_("Description"),
truncate=40)
size = tables.Column(get_size,
verbose_name=_("Size"),
attrs={'data-type': 'size'})
status = tables.Column("status",
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
def get_object_display(self, obj):
return obj.name
class VolumesFilterAction(tables.FilterAction):
def filter(self, table, volumes, filter_string):
"""Naive case-insensitive search."""
q = filter_string.lower()
return [volume for volume in volumes
if q in volume.name.lower()]
class VolumesTable(VolumesTableBase):
name = tables.Column("name",
verbose_name=_("Name"),
link="horizon:project:volumes:volumes:detail")
volume_type = tables.Column(get_volume_type,
verbose_name=_("Type"))
attachments = AttachmentColumn("attachments",
verbose_name=_("Attached To"))
availability_zone = tables.Column("availability_zone",
verbose_name=_("Availability Zone"))
bootable = tables.Column('is_bootable',
verbose_name=_("Bootable"),
filters=(filters.yesno, filters.capfirst))
encryption = tables.Column(get_encrypted_value,
verbose_name=_("Encrypted"),
link="horizon:project:volumes:"
"volumes:encryption_detail")
class Meta(object):
name = "volumes"
verbose_name = _("Volumes")
status_columns = ["status"]
row_class = UpdateRow
table_actions = (CreateVolume, AcceptTransfer, DeleteVolume,
VolumesFilterAction)
launch_actions = ()
if getattr(settings, 'LAUNCH_INSTANCE_LEGACY_ENABLED', False):
launch_actions = (LaunchVolume,) + launch_actions
if getattr(settings, 'LAUNCH_INSTANCE_NG_ENABLED', True):
launch_actions = (LaunchVolumeNG,) + launch_actions
row_actions = ((EditVolume, ExtendVolume,) +
launch_actions +
(EditAttachments, CreateSnapshot, CreateBackup,
RetypeVolume, UploadToImage, CreateTransfer,
DeleteTransfer, DeleteVolume))
class DetachVolume(tables.BatchAction):
name = "detach"
classes = ('btn-danger', 'btn-detach')
policy_rules = (("compute", "compute:detach_volume"),)
help_text = _("The data will remain in the volume and another instance"
" will be able to access the data if you attach"
" this volume to it.")
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Detach Volume",
u"Detach Volumes",
count
)
# This action is asynchronous.
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Detaching Volume",
u"Detaching Volumes",
count
)
def action(self, request, obj_id):
attachment = self.table.get_object_by_id(obj_id)
api.nova.instance_volume_detach(request,
attachment.get('server_id', None),
obj_id)
def get_success_url(self, request):
return reverse('horizon:project:volumes:index')
class AttachedInstanceColumn(tables.Column):
"""Customized column class that does complex processing on the attachments
for a volume instance.
"""
def get_raw_data(self, attachment):
request = self.table.request
return safestring.mark_safe(get_attachment_name(request, attachment))
class AttachmentsTable(tables.DataTable):
instance = AttachedInstanceColumn(get_attachment_name,
verbose_name=_("Instance"))
device = tables.Column("device",
verbose_name=_("Device"))
def get_object_id(self, obj):
return obj['id']
def get_object_display(self, attachment):
instance_name = get_attachment_name(self.request, attachment)
vals = {"volume_name": attachment['volume_name'],
"instance_name": html.strip_tags(instance_name)}
return _("Volume %(volume_name)s on instance %(instance_name)s") % vals
def get_object_by_id(self, obj_id):
for obj in self.data:
if self.get_object_id(obj) == obj_id:
return obj
raise ValueError('No match found for the id "%s".' % obj_id)
class Meta(object):
name = "attachments"
verbose_name = _("Attachments")
table_actions = (DetachVolume,)
row_actions = (DetachVolume,)
| |
"""Higher level child and data watching API's.
:Maintainer: Ben Bangert <ben@groovie.org>
:Status: Production
.. note::
:ref:`DataWatch` and :ref:`ChildrenWatch` may only handle a single
function, attempts to associate a single instance with multiple functions
will result in an exception being thrown.
"""
import logging
import time
import warnings
from functools import partial, wraps
from kazoo.retry import KazooRetry
from kazoo.exceptions import (
ConnectionClosedError,
NoNodeError,
KazooException
)
from kazoo.protocol.states import KazooState
log = logging.getLogger(__name__)
_STOP_WATCHING = object()
def _ignore_closed(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except ConnectionClosedError:
pass
return wrapper
class DataWatch(object):
"""Watches a node for data updates and calls the specified
function each time it changes
The function will also be called the very first time its
registered to get the data.
Returning `False` from the registered function will disable future
data change calls. If the client connection is closed (using the
close command), the DataWatch will no longer get updates.
If the function supplied takes three arguments, then the third one
will be a :class:`~kazoo.protocol.states.WatchedEvent`. It will
only be set if the change to the data occurs as a result of the
server notifying the watch that there has been a change. Events
like reconnection or the first call will not include an event.
If the node does not exist, then the function will be called with
``None`` for all values.
.. tip::
Because :class:`DataWatch` can watch nodes that don't exist, it
can be used alternatively as a higher-level Exists watcher that
survives reconnections and session loss.
Example with client:
.. code-block:: python
@client.DataWatch('/path/to/watch')
def my_func(data, stat):
print("Data is %s" % data)
print("Version is %s" % stat.version)
# Above function is called immediately and prints
# Or if you want the event object
@client.DataWatch('/path/to/watch')
def my_func(data, stat, event):
print("Data is %s" % data)
print("Version is %s" % stat.version)
print("Event is %s" % event)
.. versionchanged:: 1.2
DataWatch now ignores additional arguments that were previously
passed to it and warns that they are no longer respected.
"""
def __init__(self, client, path, func=None, send_path=False,
*args, **kwargs):
"""Create a data watcher for a path
:param client: A zookeeper client.
:type client: :class:`~kazoo.client.KazooClient`
:param path: The path to watch for data changes on.
:type path: str
:param func: Function to call initially and every time the
node changes. `func` will be called with a
tuple, the value of the node and a
:class:`~kazoo.client.ZnodeStat` instance.
:type func: callable
:type send_path: bool
:param send_path: Whether the function should be passed the
node path which children has been changed
or None upon initialization (see class
documentation)
"""
self._client = client
self._path = path
self._func = func
self._send_path = send_path
self._stopped = False
self._run_lock = client.handler.lock_object()
self._version = None
self._retry = KazooRetry(max_tries=None,
sleep_func=client.handler.sleep_func)
self._include_event = None
self._ever_called = False
self._used = False
if args or kwargs:
warnings.warn('Passing additional arguments to DataWatch is'
' deprecated. ignore_missing_node is now assumed '
' to be True by default, and the event will be '
' sent if the function can handle receiving it',
DeprecationWarning, stacklevel=2)
# Register our session listener if we're going to resume
# across session losses
if func is not None:
self._used = True
self._client.add_listener(self._session_watcher)
self._get_data()
def __call__(self, func):
"""Callable version for use as a decorator
:param func: Function to call initially and every time the
data changes. `func` will be called with a
tuple, the value of the node and a
:class:`~kazoo.client.ZnodeStat` instance.
:type func: callable
"""
if self._used:
raise KazooException(
"A function has already been associated with this "
"DataWatch instance.")
self._func = func
self._used = True
self._client.add_listener(self._session_watcher)
self._get_data()
return func
def _log_func_exception(self, data, stat, event=None):
try:
# For backwards compatibility, don't send event to the
# callback unless the send_event is set in constructor
if not self._ever_called:
self._ever_called = True
try:
if self._send_path:
result = self._func(data, stat, event, self._path)
else:
result = self._func(data, stat, event)
except TypeError:
if self._send_path:
result = self._func(data, stat, self._path)
else:
result = self._func(data, stat)
if result is False:
self._stopped = True
self._client.remove_listener(self._session_watcher)
except Exception as exc:
log.exception(exc)
raise
@_ignore_closed
def _get_data(self, event=None):
# Ensure this runs one at a time, possible because the session
# watcher may trigger a run
with self._run_lock:
if self._stopped:
return
initial_version = self._version
try:
data, stat = self._retry(self._client.get,
self._path, self._watcher)
except NoNodeError:
data = None
# This will set 'stat' to None if the node does not yet
# exist.
stat = self._retry(self._client.exists, self._path,
self._watcher)
if stat:
self._client.handler.spawn(self._get_data)
return
# No node data, clear out version
if stat is None:
self._version = None
else:
self._version = stat.mzxid
# Call our function if its the first time ever, or if the
# version has changed
if initial_version != self._version or not self._ever_called:
self._log_func_exception(data, stat, event)
def _watcher(self, event):
self._get_data(event=event)
def _set_watch(self, state):
with self._run_lock:
self._watch_established = state
def _session_watcher(self, state):
if state == KazooState.CONNECTED:
self._client.handler.spawn(self._get_data)
class ChildrenWatch(object):
"""Watches a node for children updates and calls the specified
function each time it changes
The function will also be called the very first time its
registered to get children.
Returning `False` from the registered function will disable future
children change calls. If the client connection is closed (using
the close command), the ChildrenWatch will no longer get updates.
If send_event=True in __init__, then the function will always be
called with second parameter, ``event``. Upon initial call or when
recovering a lost session the ``event`` is always ``None``.
Otherwise it's a :class:`~kazoo.prototype.state.WatchedEvent`
instance.
If send_path=True in __init__, then the function will send path of
node, which children has been changed. It will be second parameter if
send_event=False and third if send_event=True
Example with client:
.. code-block:: python
@client.ChildrenWatch('/path/to/watch')
def my_func(children):
print "Children are %s" % children
# Above function is called immediately and prints children
"""
def __init__(self, client, path, func=None,
allow_session_lost=True, send_event=False,
send_path=False):
"""Create a children watcher for a path
:param client: A zookeeper client.
:type client: :class:`~kazoo.client.KazooClient`
:param path: The path to watch for children on.
:type path: str
:param func: Function to call initially and every time the
children change. `func` will be called with a
single argument, the list of children.
:type func: callable
:param allow_session_lost: Whether the watch should be
re-registered if the zookeeper
session is lost.
:type allow_session_lost: bool
:type send_event: bool
:param send_event: Whether the function should be passed the
event sent by ZooKeeper or None upon
initialization (see class documentation)
:type send_path: bool
:param send_path: Whether the function should be passed the
node path ch children has been changed
or None upon initialization (see class
documentation)
The path must already exist for the children watcher to
run.
"""
self._client = client
self._path = path
self._func = func
self._send_event = send_event
self._send_path = send_path
self._stopped = False
self._watch_established = False
self._allow_session_lost = allow_session_lost
self._run_lock = client.handler.lock_object()
self._prior_children = None
self._used = False
# Register our session listener if we're going to resume
# across session losses
if func is not None:
self._used = True
if allow_session_lost:
self._client.add_listener(self._session_watcher)
self._get_children()
def __call__(self, func):
"""Callable version for use as a decorator
:param func: Function to call initially and every time the
children change. `func` will be called with a
single argument, the list of children.
:type func: callable
"""
if self._used:
raise KazooException(
"A function has already been associated with this "
"ChildrenWatch instance.")
self._func = func
self._used = True
if self._allow_session_lost:
self._client.add_listener(self._session_watcher)
self._get_children()
return func
@_ignore_closed
def _get_children(self, event=None):
with self._run_lock: # Ensure this runs one at a time
if self._stopped:
return
try:
children = self._client.retry(self._client.get_children,
self._path, self._watcher)
except NoNodeError:
self._stopped = True
return
if not self._watch_established:
self._watch_established = True
if self._prior_children is not None and \
self._prior_children == children:
return
self._prior_children = children
try:
if self._send_event:
if self._send_path:
result = self._func(children, event, self._path)
else:
result = self._func(children, event)
else:
if self._send_path:
result = self._func(children, self._path)
else:
result = self._func(children)
if result is False:
self._stopped = True
except Exception as exc:
log.exception(exc)
raise
def _watcher(self, event):
self._get_children(event)
def _session_watcher(self, state):
if state in (KazooState.LOST, KazooState.SUSPENDED):
self._watch_established = False
elif (state == KazooState.CONNECTED and
not self._watch_established and not self._stopped):
self._client.handler.spawn(self._get_children)
class PatientChildrenWatch(object):
"""Patient Children Watch that returns values after the children
of a node don't change for a period of time
A separate watcher for the children of a node, that ignores
changes within a boundary time and sets the result only when the
boundary time has elapsed with no children changes.
Example::
watcher = PatientChildrenWatch(client, '/some/path',
time_boundary=5)
async_object = watcher.start()
# Blocks until the children have not changed for time boundary
# (5 in this case) seconds, returns children list and an
# async_result that will be set if the children change in the
# future
children, child_async = async_object.get()
.. note::
This Watch is different from :class:`DataWatch` and
:class:`ChildrenWatch` as it only returns once, does not take
a function that is called, and provides an
:class:`~kazoo.interfaces.IAsyncResult` object that can be
checked to see if the children have changed later.
"""
def __init__(self, client, path, time_boundary=30):
self.client = client
self.path = path
self.children = []
self.time_boundary = time_boundary
self.children_changed = client.handler.event_object()
def start(self):
"""Begin the watching process asynchronously
:returns: An :class:`~kazoo.interfaces.IAsyncResult` instance
that will be set when no change has occurred to the
children for time boundary seconds.
"""
self.asy = asy = self.client.handler.async_result()
self.client.handler.spawn(self._inner_start)
return asy
def _inner_start(self):
try:
while True:
async_result = self.client.handler.async_result()
self.children = self.client.retry(
self.client.get_children, self.path,
partial(self._children_watcher, async_result))
self.client.handler.sleep_func(self.time_boundary)
if self.children_changed.is_set():
self.children_changed.clear()
else:
break
self.asy.set((self.children, async_result))
except Exception as exc:
self.asy.set_exception(exc)
def _children_watcher(self, async, event):
self.children_changed.set()
async.set(time.time())
| |
# autograder.py
# -------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to
# http://inst.eecs.berkeley.edu/~cs188/pacman/pacman.html
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
# imports from python standard library
import grading
import imp
import optparse
import os
import re
import sys
import projectParams
# register arguments and set default values
def readCommand(argv):
parser = optparse.OptionParser(description = 'Run public tests on student code')
parser.set_defaults(generateSolutions=False, edxOutput=False, muteOutput=False, printTestCase=False)
parser.add_option('--test-directory',
dest = 'testRoot',
default = 'test_cases',
help = 'Root test directory which contains subdirectories corresponding to each question')
parser.add_option('--student-code',
dest = 'studentCode',
default = projectParams.STUDENT_CODE_DEFAULT,
help = 'comma separated list of student code files')
parser.add_option('--code-directory',
dest = 'codeRoot',
default = "",
help = 'Root directory containing the student and testClass code')
parser.add_option('--test-case-code',
dest = 'testCaseCode',
default = projectParams.PROJECT_TEST_CLASSES,
help = 'class containing testClass classes for this project')
parser.add_option('--generate-solutions',
dest = 'generateSolutions',
action = 'store_true',
help = 'Write solutions generated to .solution file')
parser.add_option('--edx-output',
dest = 'edxOutput',
action = 'store_true',
help = 'Generate edX output files')
parser.add_option('--mute',
dest = 'muteOutput',
action = 'store_true',
help = 'Mute output from executing tests')
parser.add_option('--print-tests', '-p',
dest = 'printTestCase',
action = 'store_true',
help = 'Print each test case before running them.')
parser.add_option('--test', '-t',
dest = 'runTest',
default = None,
help = 'Run one particular test. Relative to test root.')
parser.add_option('--question', '-q',
dest = 'gradeQuestion',
default = None,
help = 'Grade one particular question.')
(options, args) = parser.parse_args(argv)
return options
# confirm we should author solution files
def confirmGenerate():
print 'WARNING: this action will overwrite any solution files.'
print 'Are you sure you want to proceed? (yes/no)'
while True:
ans = sys.stdin.readline().strip()
if ans == 'yes':
break
elif ans == 'no':
sys.exit(0)
else:
print 'please answer either "yes" or "no"'
# TODO: Fix this so that it tracebacks work correctly
# Looking at source of the traceback module, presuming it works
# the same as the intepreters, it uses co_filename. This is,
# however, a readonly attribute.
def setModuleName(module, filename):
functionType = type(confirmGenerate)
classType = type(optparse.Option)
for i in dir(module):
o = getattr(module, i)
if hasattr(o, '__file__'): continue
if type(o) == functionType:
setattr(o, '__file__', filename)
elif type(o) == classType:
setattr(o, '__file__', filename)
# TODO: assign member __file__'s?
#print i, type(o)
#from cStringIO import StringIO
def loadModuleString(moduleSource):
# Below broken, imp doesn't believe its being passed a file:
# ValueError: load_module arg#2 should be a file or None
#
#f = StringIO(moduleCodeDict[k])
#tmp = imp.load_module(k, f, k, (".py", "r", imp.PY_SOURCE))
tmp = imp.new_module(k)
exec moduleCodeDict[k] in tmp.__dict__
setModuleName(tmp, k)
return tmp
import py_compile
def loadModuleFile(moduleName, filePath):
with open(filePath, 'r') as f:
return imp.load_module(moduleName, f, "%s.py" % moduleName, (".py", "r", imp.PY_SOURCE))
def readFile(path, root=""):
"Read file from disk at specified path and return as string"
with open(os.path.join(root, path), 'r') as handle:
return handle.read()
#######################################################################
# Error Hint Map
#######################################################################
# TODO: use these
ERROR_HINT_MAP = {
'q1': {
"<type 'exceptions.IndexError'>": """
We noticed that your project threw an IndexError on q1.
While many things may cause this, it may have been from
assuming a certain number of successors from a state space
or assuming a certain number of actions available from a given
state. Try making your code more general (no hardcoded indices)
and submit again!
"""
},
'q3': {
"<type 'exceptions.AttributeError'>": """
We noticed that your project threw an AttributeError on q3.
While many things may cause this, it may have been from assuming
a certain size or structure to the state space. For example, if you have
a line of code assuming that the state is (x, y) and we run your code
on a state space with (x, y, z), this error could be thrown. Try
making your code more general and submit again!
"""
}
}
import pprint
def splitStrings(d):
d2 = dict(d)
for k in d:
if k[0:2] == "__":
del d2[k]
continue
if d2[k].find("\n") >= 0:
d2[k] = d2[k].split("\n")
return d2
def printTest(testDict, solutionDict):
pp = pprint.PrettyPrinter(indent=4)
print "Test case:"
for line in testDict["__raw_lines__"]:
print " |", line
print "Solution:"
for line in solutionDict["__raw_lines__"]:
print " |", line
def runTest(testName, moduleDict, printTestCase=False):
import testParser
import testClasses
for module in moduleDict:
setattr(sys.modules[__name__], module, moduleDict[module])
testDict = testParser.TestParser(testName + ".test").parse()
solutionDict = testParser.TestParser(testName + ".solution").parse()
testClass = getattr(projectTestClasses, testDict['class'])
testCase = testClass(testDict)
if printTestCase:
printTest(testDict, solutionDict)
# This is a fragile hack to create a stub grades object
grades = grading.Grades(projectParams.PROJECT_NAME, [(None,0)])
testCase.execute(grades, moduleDict, solutionDict)
# evaluate student code
def evaluate(generateSolutions, testRoot, moduleDict, exceptionMap=ERROR_HINT_MAP, edxOutput=False, muteOutput=False,
printTestCase=False, questionToGrade=None):
# imports of testbench code. note that the testClasses import must follow
# the import of student code due to dependencies
import testParser
import testClasses
for module in moduleDict:
setattr(sys.modules[__name__], module, moduleDict[module])
problemDict = testParser.TestParser(os.path.join(testRoot, 'CONFIG')).parse()
# iterate through and run tests
if 'order' in problemDict:
test_subdirs = problemDict['order'].split()
else:
test_subdirs = sorted(os.listdir(testRoot))
questions = []
questionDicts = {}
for q in test_subdirs:
subdir_path = os.path.join(testRoot, q)
if not os.path.isdir(subdir_path) or q[0] == '.':
continue
if questionToGrade != None and q != questionToGrade:
continue
# create a question object
questionDict = testParser.TestParser(os.path.join(subdir_path, 'CONFIG')).parse()
questionClass = getattr(testClasses, questionDict['class'])
question = questionClass(questionDict)
questionDicts[q] = questionDict
# load test cases into question
tests = filter(lambda t: re.match('[^#~.].*\.test\Z', t), os.listdir(subdir_path))
tests = map(lambda t: re.match('(.*)\.test\Z', t).group(1), tests)
for t in sorted(tests):
test_file = os.path.join(subdir_path, '%s.test' % t)
solution_file = os.path.join(subdir_path, '%s.solution' % t)
testDict = testParser.TestParser(test_file).parse()
if testDict.get("disabled", "false").lower() == "true":
continue
testClass = getattr(projectTestClasses, testDict['class'])
testCase = testClass(testDict)
def makefun(testCase, solution_file):
if generateSolutions:
# write solution file to disk
return lambda grades: testCase.writeSolution(moduleDict, solution_file)
else:
# read in solution dictionary and pass as an argument
testDict = testParser.TestParser(test_file).parse()
solutionDict = testParser.TestParser(solution_file).parse()
if printTestCase:
return lambda grades: printTest(testDict, solutionDict) or testCase.execute(grades, moduleDict, solutionDict)
else:
return lambda grades: testCase.execute(grades, moduleDict, solutionDict)
question.addTestCase(testCase, makefun(testCase, solution_file))
# Note extra function is necessary for scoping reasons
def makefun(question):
return lambda grades: question.execute(grades)
setattr(sys.modules[__name__], q, makefun(question))
questions.append((q, question.getMaxPoints()))
grades = grading.Grades(projectParams.PROJECT_NAME, questions, edxOutput=edxOutput, muteOutput=muteOutput)
for q in questionDicts:
for prereq in questionDicts[q].get('depends', '').split():
grades.addPrereq(q, prereq)
grades.grade(sys.modules[__name__])
return grades.points
if __name__ == '__main__':
options = readCommand(sys.argv)
if options.generateSolutions:
confirmGenerate()
codePaths = options.studentCode.split(',')
# moduleCodeDict = {}
# for cp in codePaths:
# moduleName = re.match('.*?([^/]*)\.py', cp).group(1)
# moduleCodeDict[moduleName] = readFile(cp, root=options.codeRoot)
# moduleCodeDict['projectTestClasses'] = readFile(options.testCaseCode, root=options.codeRoot)
# moduleDict = loadModuleDict(moduleCodeDict)
moduleDict = {}
for cp in codePaths:
moduleName = re.match('.*?([^/]*)\.py', cp).group(1)
moduleDict[moduleName] = loadModuleFile(moduleName, os.path.join(options.codeRoot, cp))
moduleName = re.match('.*?([^/]*)\.py', options.testCaseCode).group(1)
moduleDict['projectTestClasses'] = loadModuleFile(moduleName, os.path.join(options.codeRoot, options.testCaseCode))
if options.runTest != None:
runTest(options.runTest, moduleDict, printTestCase=options.printTestCase)
else:
evaluate(options.generateSolutions, options.testRoot, moduleDict,
edxOutput=options.edxOutput, muteOutput=options.muteOutput, printTestCase=options.printTestCase,
questionToGrade=options.gradeQuestion)
| |
""" Test functions for fftpack.basic module
"""
from numpy import arange, asarray, zeros, dot, exp, pi, double, cdouble
from numpy.random import rand
import numpy as np
from concurrent import futures
import os
import scipy.fftpack
import numpy.fft
try:
import scipy.fft as scipy_fft
has_scipy_fft = True
except ImportError:
scipy_fft = {}
has_scipy_fft = False
from .common import Benchmark
try:
import pyfftw.interfaces.numpy_fft as pyfftw_fft
import pyfftw
pyfftw.interfaces.cache.enable()
has_pyfftw = True
except ImportError:
pyfftw_fft = {}
has_pyfftw = False
class PyfftwBackend:
"""Backend for pyfftw"""
__ua_domain__ = 'numpy.scipy.fft'
@staticmethod
def __ua_function__(method, args, kwargs):
kwargs.pop('overwrite_x', None)
fn = getattr(pyfftw_fft, method.__name__, None)
return (NotImplemented if fn is None
else fn(*args, **kwargs))
def random(size):
return rand(*size)
def direct_dft(x):
x = asarray(x)
n = len(x)
y = zeros(n, dtype=cdouble)
w = -arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w), x)
return y
def direct_idft(x):
x = asarray(x)
n = len(x)
y = zeros(n, dtype=cdouble)
w = arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w), x)/n
return y
def get_module(mod_name):
module_map = {
'scipy.fftpack': scipy.fftpack,
'scipy.fft': scipy_fft,
'numpy.fft': numpy.fft
}
if not has_scipy_fft and mod_name == 'scipy.fft':
raise NotImplementedError
return module_map[mod_name]
class Fft(Benchmark):
params = [
[100, 256, 313, 512, 1000, 1024, 2048, 2048*2, 2048*4],
['real', 'cmplx'],
['scipy.fftpack', 'scipy.fft', 'numpy.fft']
]
param_names = ['size', 'type', 'module']
def setup(self, size, cmplx, module):
if cmplx == 'cmplx':
self.x = random([size]).astype(cdouble)+random([size]).astype(cdouble)*1j
else:
self.x = random([size]).astype(double)
module = get_module(module)
self.fft = getattr(module, 'fft')
self.ifft = getattr(module, 'ifft')
def time_fft(self, size, cmplx, module):
self.fft(self.x)
def time_ifft(self, size, cmplx, module):
self.ifft(self.x)
class NextFastLen(Benchmark):
params = [
[12, 13, # small ones
1021, 1024, # 2 ** 10 and a prime
16381, 16384, # 2 ** 14 and a prime
262139, 262144, # 2 ** 17 and a prime
999983, 1048576, # 2 ** 20 and a prime
],
]
param_names = ['size']
def setup(self, size):
if not has_scipy_fft:
raise NotImplementedError
def time_next_fast_len(self, size):
scipy_fft.next_fast_len.__wrapped__(size)
def time_next_fast_len_cached(self, size):
scipy_fft.next_fast_len(size)
class RFft(Benchmark):
params = [
[100, 256, 313, 512, 1000, 1024, 2048, 2048*2, 2048*4],
['scipy.fftpack', 'scipy.fft', 'numpy.fft']
]
param_names = ['size', 'module']
def setup(self, size, module):
self.x = random([size]).astype(double)
module = get_module(module)
self.rfft = getattr(module, 'rfft')
self.irfft = getattr(module, 'irfft')
self.y = self.rfft(self.x)
def time_rfft(self, size, module):
self.rfft(self.x)
def time_irfft(self, size, module):
self.irfft(self.y)
class RealTransforms1D(Benchmark):
params = [
[75, 100, 135, 256, 313, 512, 675, 1024, 2025, 2048],
['I', 'II', 'III', 'IV'],
['scipy.fftpack', 'scipy.fft']
]
param_names = ['size', 'type', 'module']
def setup(self, size, type, module):
module = get_module(module)
self.dct = getattr(module, 'dct')
self.dst = getattr(module, 'dst')
self.type = {'I':1, 'II':2, 'III':3, 'IV':4}[type]
# The "logical" transform size should be smooth, which for dct/dst
# type 1 is offset by -1/+1 respectively
if self.type == 1:
size += 1
self.x = random([size]).astype(double)
if self.type == 1:
self.x_dst = self.x[:-2].copy()
def time_dct(self, size, type, module):
self.dct(self.x, self.type)
def time_dst(self, size, type, module):
x = self.x if self.type != 1 else self.x_dst
self.dst(x, self.type)
class Fftn(Benchmark):
params = [
["100x100", "313x100", "1000x100", "256x256", "512x512"],
['real', 'cmplx'],
['scipy.fftpack', 'scipy.fft', 'numpy.fft']
]
param_names = ['size', 'type', 'module']
def setup(self, size, cmplx, module):
size = list(map(int, size.split("x")))
if cmplx != 'cmplx':
self.x = random(size).astype(double)
else:
self.x = random(size).astype(cdouble)+random(size).astype(cdouble)*1j
self.fftn = getattr(get_module(module), 'fftn')
def time_fftn(self, size, cmplx, module):
self.fftn(self.x)
class RealTransformsND(Benchmark):
params = [
['75x75', '100x100', '135x135', '313x363', '1000x100', '256x256'],
['I', 'II', 'III', 'IV'],
['scipy.fftpack', 'scipy.fft']
]
param_names = ['size', 'type', 'module']
def setup(self, size, type, module):
self.dctn = getattr(get_module(module), 'dctn')
self.dstn = getattr(get_module(module), 'dstn')
self.type = {'I':1, 'II':2, 'III':3, 'IV':4}[type]
# The "logical" transform size should be smooth, which for dct/dst
# type 1 is offset by -1/+1 respectively
size = list(map(int, size.split('x')))
if self.type == 1:
size = (s + 1 for s in size)
self.x = random(size).astype(double)
if self.type == 1:
self.x_dst = self.x[:-2,:-2].copy()
def time_dctn(self, size, type, module):
self.dctn(self.x, self.type)
def time_dstn(self, size, type, module):
x = self.x if self.type != 1 else self.x_dst
self.dstn(x, self.type)
class FftBackends(Benchmark):
params = [
[100, 256, 313, 512, 1000, 1024, 2048, 2048*2, 2048*4],
['real', 'cmplx'],
['pocketfft', 'pyfftw', 'numpy', 'direct']
]
param_names = ['size', 'type', 'backend']
def setup(self, size, cmplx, backend):
import scipy.fft
if cmplx == 'cmplx':
self.x = random([size]).astype(cdouble)+random([size]).astype(cdouble)*1j
else:
self.x = random([size]).astype(double)
self.fft = scipy.fft.fft
self.ifft = scipy.fft.ifft
if backend == 'pocketfft':
scipy.fft.set_global_backend('scipy')
elif backend == 'pyfftw':
if not has_pyfftw:
raise NotImplementedError
scipy.fft.set_global_backend(PyfftwBackend)
elif backend == 'numpy':
from scipy.fft._debug_backends import NumPyBackend
scipy.fft.set_global_backend(NumPyBackend)
elif backend == 'direct':
import scipy.fft._pocketfft
self.fft = scipy.fft._pocketfft.fft
self.ifft = scipy.fft._pocketfft.ifft
def time_fft(self, size, cmplx, module):
self.fft(self.x)
def time_ifft(self, size, cmplx, module):
self.ifft(self.x)
class FftnBackends(Benchmark):
params = [
["100x100", "313x100", "1000x100", "256x256", "512x512"],
['real', 'cmplx'],
['pocketfft', 'pyfftw', 'numpy', 'direct']
]
param_names = ['size', 'type', 'backend']
def setup(self, size, cmplx, backend):
import scipy.fft
size = list(map(int, size.split("x")))
if cmplx == 'cmplx':
self.x = random(size).astype(double)+random(size).astype(double)*1j
else:
self.x = random(size).astype(double)
self.fftn = scipy.fft.fftn
self.ifftn = scipy.fft.ifftn
if backend == 'pocketfft':
scipy.fft.set_global_backend('scipy')
elif backend == 'pyfftw':
if not has_pyfftw:
raise NotImplementedError
scipy.fft.set_global_backend(PyfftwBackend)
elif backend == 'numpy':
from scipy.fft._debug_backends import NumPyBackend
scipy.fft.set_global_backend(NumPyBackend)
elif backend == 'direct':
import scipy.fft._pocketfft
self.fftn = scipy.fft._pocketfft.fftn
self.ifftn = scipy.fft._pocketfft.ifftn
def time_fft(self, size, cmplx, module):
self.fftn(self.x)
def time_ifft(self, size, cmplx, module):
self.ifftn(self.x)
class FftThreading(Benchmark):
params = [
['100x100', '1000x100', '256x256', '512x512'],
[1, 8, 32, 100],
['workers', 'threading']
]
param_names = ['size', 'num_transforms', 'method']
def setup(self, size, num_transforms, method):
if not has_scipy_fft:
raise NotImplementedError
size = list(map(int, size.split("x")))
self.xs = [(random(size)+1j*random(size)).astype(np.complex128)
for _ in range(num_transforms)]
if method == 'threading':
self.pool = futures.ThreadPoolExecutor(os.cpu_count())
def map_thread(self, func):
f = []
for x in self.xs:
f.append(self.pool.submit(func, x))
futures.wait(f)
def time_fft(self, size, num_transforms, method):
if method == 'threading':
self.map_thread(scipy_fft.fft)
else:
for x in self.xs:
scipy_fft.fft(x, workers=-1)
def time_fftn(self, size, num_transforms, method):
if method == 'threading':
self.map_thread(scipy_fft.fftn)
else:
for x in self.xs:
scipy_fft.fftn(x, workers=-1)
| |
# -*- coding: utf-8 -*-
"""OAuth utility functions."""
from __future__ import (
absolute_import, division, print_function, unicode_literals)
import logging
from datetime import datetime
from allauth.socialaccount.models import SocialAccount
from builtins import object
from django.conf import settings
from oauthlib.oauth2.rfc6749.errors import InvalidClientIdError
from requests.exceptions import RequestException
from requests_oauthlib import OAuth2Session
log = logging.getLogger(__name__)
class Service(object):
"""
Service mapping for local accounts.
:param user: User to use in token lookup and session creation
:param account: :py:class:`SocialAccount` instance for user
"""
adapter = None
url_pattern = None
default_user_avatar_url = settings.OAUTH_AVATAR_USER_DEFAULT_URL
default_org_avatar_url = settings.OAUTH_AVATAR_ORG_DEFAULT_URL
def __init__(self, user, account):
self.session = None
self.user = user
self.account = account
@classmethod
def for_user(cls, user):
"""Return list of instances if user has an account for the provider."""
try:
accounts = SocialAccount.objects.filter(
user=user,
provider=cls.adapter.provider_id,
)
return [cls(user=user, account=account) for account in accounts]
except SocialAccount.DoesNotExist:
return []
def get_adapter(self):
return self.adapter
@property
def provider_id(self):
return self.get_adapter().provider_id
def get_session(self):
if self.session is None:
self.create_session()
return self.session
def create_session(self):
"""
Create OAuth session for user.
This configures the OAuth session based on the :py:class:`SocialToken`
attributes. If there is an ``expires_at``, treat the session as an auto
renewing token. Some providers expire tokens after as little as 2 hours.
"""
token = self.account.socialtoken_set.first()
if token is None:
return None
token_config = {
'access_token': token.token,
'token_type': 'bearer',
}
if token.expires_at is not None:
token_expires = (token.expires_at - datetime.now()).total_seconds()
token_config.update({
'refresh_token': token.token_secret,
'expires_in': token_expires,
})
self.session = OAuth2Session(
client_id=token.app.client_id,
token=token_config,
auto_refresh_kwargs={
'client_id': token.app.client_id,
'client_secret': token.app.secret,
},
auto_refresh_url=self.get_adapter().access_token_url,
token_updater=self.token_updater(token),
)
return self.session or None
def token_updater(self, token):
"""
Update token given data from OAuth response.
Expect the following response into the closure::
{
u'token_type': u'bearer',
u'scopes': u'webhook repository team account',
u'refresh_token': u'...',
u'access_token': u'...',
u'expires_in': 3600,
u'expires_at': 1449218652.558185
}
"""
def _updater(data):
token.token = data['access_token']
token.expires_at = datetime.fromtimestamp(data['expires_at'])
token.save()
log.info('Updated token %s:', token)
return _updater
def paginate(self, url, **kwargs):
"""
Recursively combine results from service's pagination.
:param url: start url to get the data from.
:type url: unicode
:param kwargs: optional parameters passed to .get() method
:type kwargs: dict
"""
try:
resp = self.get_session().get(url, data=kwargs)
next_url = self.get_next_url_to_paginate(resp)
results = self.get_paginated_results(resp)
if next_url:
results.extend(self.paginate(next_url))
return results
# Catch specific exception related to OAuth
except InvalidClientIdError:
log.warning('access_token or refresh_token failed: %s', url)
raise Exception('You should reconnect your account')
# Catch exceptions with request or deserializing JSON
except (RequestException, ValueError):
# Response data should always be JSON, still try to log if not
# though
try:
debug_data = resp.json()
except ValueError:
debug_data = resp.content
log.debug(
'Paginate failed at %s with response: %s',
url,
debug_data,
)
return []
def sync(self):
raise NotImplementedError
def create_repository(self, fields, privacy=None, organization=None):
raise NotImplementedError
def create_organization(self, fields):
raise NotImplementedError
def get_next_url_to_paginate(self, response):
"""
Return the next url to feed the `paginate` method.
:param response: response from where to get the `next_url` attribute
:type response: requests.Response
"""
raise NotImplementedError
def get_paginated_results(self, response):
"""
Return the results for the current response/page.
:param response: response from where to get the results.
:type response: requests.Response
"""
raise NotImplementedError
def setup_webhook(self, project):
raise NotImplementedError
def update_webhook(self, project, integration):
raise NotImplementedError
@classmethod
def is_project_service(cls, project):
"""
Determine if this is the service the project is using.
.. note::
This should be deprecated in favor of attaching the
:py:class:`RemoteRepository` to the project instance. This is a
slight improvement on the legacy check for webhooks
"""
# TODO Replace this check by keying project to remote repos
return (
cls.url_pattern is not None and
cls.url_pattern.search(project.repo) is not None)
| |
"""
@author: Diogo Silva
Various functions to build graphs.
"""
import numpy as np
from numba import jit, cuda, int32, float32
from MyML.graph.mst import memSet, compute_cuda_grid_dim
from MyML.helper.scan import scan_gpu as ex_prefix_sum_gpu,\
exprefixsumNumbaSingle as ex_prefix_sum_cpu,\
exprefixsumNumba as ex_prefix_sum_cpu2
@jit
def binaryOriginVertexSearch(key, dest, fe, od):
"""
Inputs:
key : edge id
dest : destination array where the i-th element
is the ID of the destination vertex of the
i-th edge
fe : first_edge array
od : outdegree array
"""
imin = 0
imax = fe.size
while imin < imax:
imid = (imax + imin) / 2
imid_fe = fe[imid]
# key is before
if key < imid_fe:
imax = imid
# key is after
elif key > imid_fe + od[imid] - 1:
imin = imid + 1
# key is between first edge of imid and next first edge
else:
return imid
return -1
# @jit(["int32(int32[:], int32[:], int32[:], int32[:], int32[:], int32[:], int32[:], int32[:], int32[:])",
# "int32(int32[:], float32[:], int32[:], int32[:], int32[:], int32[:], int32[:], int32[:], float32[:])"], nopython=True)
@jit(nopython=True)
def getGraphFromEdges_seq(dest, weight, fe, od, edges, nod, nfe, ndest, nweight):
# first build the outDegree to get the first_edge
for e in range(edges.size):
edge = edges[e]
o_v = dest[edge] # destination
i_v = binaryOriginVertexSearch(edge, dest, fe, od)
if i_v == -1:
return -1
nod[o_v] += 1
nod[i_v] += 1
# get first edge from outDegree
ex_prefix_sum_cpu2(nod, nfe, init = 0)
#get copy of newFirstEdge to serve as pointers for the newDest
top_edge = np.empty(nfe.size, dtype = np.int32)
for i in range(nfe.size):
top_edge[i] = nfe[i]
#top_edge = nfe.copy()
# go through all the mst edges again and write the new edges in the new arrays
for e in range(edges.size):
edge = edges[e]
o_v = dest[edge] # destination vertex
i_v = binaryOriginVertexSearch(edge, dest, fe, od)
if i_v == -1:
return -1
i_ptr = top_edge[i_v]
o_ptr = top_edge[o_v]
ndest[i_ptr] = o_v
ndest[o_ptr] = i_v
edge_w = weight[edge]
nweight[i_ptr] = edge_w
nweight[o_ptr] = edge_w
top_edge[i_v] += 1
top_edge[o_v] += 1
return 0
def getGraphFromEdges_gpu(dest, weight, fe, od, edges, n_edges = None,
MAX_TPB = 512, stream = None):
"""
All input (except MAX_TPB and stream) are device arrays.
edges : array with the IDs of the edges that will be part of the new graph
n_edges : array of 1 element with the number of valid edges in the edges array;
if n_edges < size of edges, the last elements of the edges array are
not considered
"""
# check if number of valid edges was received
if n_edges is None:
edges_size = edges.size
n_edges = cuda.to_device(np.array([edges_size], dtype = np.int32))
else:
edges_size = int(n_edges.getitem(0))
# check if a stream was received, if not create one
if stream is None:
myStream = cuda.stream()
else:
myStream = stream
new_n_edges = edges_size * 2
# allocate memory for new graph
ndest = cuda.device_array(new_n_edges, dtype = dest.dtype,
stream = myStream)
nweight = cuda.device_array(new_n_edges, dtype = weight.dtype,
stream = myStream)
nfe = cuda.device_array_like(fe, stream = myStream)
nod = cuda.device_array_like(od, stream = myStream)
# fill new outdegree with zeros
vertexGrid = compute_cuda_grid_dim(nod.size, MAX_TPB)
memSet[vertexGrid, MAX_TPB, myStream](nod, 0)
# count all edges of new array and who they belong to
edgeGrid = compute_cuda_grid_dim(edges_size, MAX_TPB)
countEdges[edgeGrid, MAX_TPB, myStream](edges, n_edges, dest, fe, od, nod)
# get new first_edge array from new outdegree
nfe.copy_to_device(nod, stream=myStream)
ex_prefix_sum_gpu(nfe, MAX_TPB = MAX_TPB, stream = myStream)
# copy new first_edge to top_edge to serve as pointer in adding edges
top_edge = cuda.device_array_like(nfe, stream = myStream)
top_edge.copy_to_device(nfe, stream = myStream)
addEdges[edgeGrid, MAX_TPB, myStream](edges, n_edges, dest, weight, fe, od,
top_edge, ndest, nweight)
del top_edge
#del dest, weight, fe, od
return ndest, nweight, nfe, nod
@cuda.jit
def countEdges(edges, n_edges, dest, fe, od, nod):
# n_edges_sm = cuda.shared.array(0, dtype = int32)
edge = cuda.grid(1)
# if edge == 0:
# n_edges_sm[0] = n_edges[0]
# if edge >= n_edges_sm[0]:
if edge >= n_edges[0]:
return
key = edges[edge]
# if edge is -1 it was marked for removal
if key == -1:
return
o_v = dest[key]
i_v = binaryOriginVertexSearch_CUDA(key, dest, fe, od)
# increment edges on origin and destination vertices
cuda.atomic.add(nod, i_v, 1)
cuda.atomic.add(nod, o_v, 1)
@cuda.jit
def addEdges(edges, n_edges, dest, weight, fe, od, top_edge, ndest, nweight):
n_edges_sm = cuda.shared.array(0, dtype = int32)
edge = cuda.grid(1)
# if edge == 0:
# n_edges_sm[0] = n_edges[0]
key = edges[edge]
# if edge is -1 it was marked for removal
if key == -1:
return
o_v = dest[key]
i_v = binaryOriginVertexSearch_CUDA(key, dest, fe, od)
# get and increment pointers for each vertex
i_ptr = cuda.atomic.add(top_edge, i_v, 1)
o_ptr =cuda.atomic.add(top_edge, o_v, 1)
# add edges to destination array
ndest[i_ptr] = o_v
ndest[o_ptr] = i_v
# add weight to edges
edge_w = weight[key]
nweight[i_ptr] = edge_w
nweight[o_ptr] = edge_w
@cuda.jit(device=True)
def binaryOriginVertexSearch_CUDA(key, dest, fe, od):
"""
TODO: test separately
"""
imin = 0
imax = fe.size
while imin < imax:
imid = (imax + imin) / 2
imid_fe = fe[imid]
# key is before
if key < imid_fe:
imax = imid
# key is after
elif key > imid_fe + od[imid] - 1:
imin = imid + 1
# key is between first edge of imid and next first edge
else:
return imid
return -1
| |
import ctypes
from pytibrv.api import *
from pytibrv.status import *
from pytibrv.msg import *
import unittest
class MsgTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
status = tibrv_Open()
assert TIBRV_OK == status, tibrvStatus_GetText(status)
@classmethod
def tearDownClass(cls):
tibrv_Close()
def test_new(self):
status, msg = tibrvMsg_Create()
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status, sz = tibrvMsg_ConvertToString(msg)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
self.assertEqual("{}", sz)
status = tibrvMsg_Destroy(msg)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
def test_copy(self):
status, msg = tibrvMsg_Create()
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status = tibrvMsg_UpdateString(msg, 'A', 'TEST')
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status, msg2 = tibrvMsg_CreateCopy(msg)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status, sz = tibrvMsg_ConvertToString(msg)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status, sz2 = tibrvMsg_ConvertToString(msg2)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
self.assertEqual(sz, sz2)
status = tibrvMsg_Destroy(msg)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status = tibrvMsg_Destroy(msg2)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
def test_invalid(self):
status, msg = tibrvMsg_Create()
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status = tibrvMsg_Destroy(msg)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
# construct by invalid msg id, which just destroyed
status = tibrvMsg_SetSendSubject(msg, 'TEST')
self.assertEqual(TIBRV_INVALID_MSG, status, tibrvStatus_GetText(status))
status = tibrvMsg_Destroy(msg)
self.assertEqual(TIBRV_INVALID_MSG, status, tibrvStatus_GetText(status))
# assign random msg id, ex: 12345
# DONT TRY IT, SEGMENT FAULT
#
#status = tibrvMsg_Destroy(12345)
def test_subject(self):
status, msg = tibrvMsg_Create()
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status = tibrvMsg_SetSendSubject(msg, 'TEST')
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status, subj = tibrvMsg_GetSendSubject(msg)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
self.assertEqual('TEST', subj)
status = tibrvMsg_SetReplySubject(msg, 'TEST2')
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status, subj = tibrvMsg_GetReplySubject(msg)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
self.assertEqual('TEST2', subj)
status = tibrvMsg_Destroy(msg)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
def test_get(self):
status, msg = tibrvMsg_Create()
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status = tibrvMsg_UpdateI8(msg, 'I8', 0xFFFF)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status, n = tibrvMsg_GetI8(msg, 'I8')
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
self.assertEqual(-1, n)
status = tibrvMsg_UpdateU8(msg, 'U8', 0xFFFF)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status, n = tibrvMsg_GetU8(msg, 'U8')
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
self.assertEqual(0x00FF, n)
status = tibrvMsg_UpdateI16(msg, 'I16', 0xFFFFFFFE)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status, n = tibrvMsg_GetI16(msg, 'I16')
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
self.assertEqual(-2, n)
status = tibrvMsg_UpdateU16(msg, 'U16', 0xFFFFFFFE)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status, n = tibrvMsg_GetU16(msg, 'U16')
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
self.assertEqual(0x00FFFE, n)
status = tibrvMsg_UpdateI32(msg, 'I32', 0x0000FFFFFFFFFFFD)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status, n = tibrvMsg_GetI32(msg, 'I32')
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
self.assertEqual(-3, n)
status = tibrvMsg_UpdateU32(msg, 'U32', 0x0000FFFFFFFFFFFD)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status, n = tibrvMsg_GetU32(msg, 'U32')
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
self.assertEqual(0x00FFFFFFFD, n)
status = tibrvMsg_UpdateI64(msg, 'I64', 0xfffffffffffffffc)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status, n = tibrvMsg_GetI64(msg, 'I64')
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
self.assertEqual(-4, n)
status = tibrvMsg_UpdateU64(msg, 'U64', 0xFFFFFFFFFFFFFFFC)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status, n = tibrvMsg_GetU64(msg, 'U64')
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
self.assertEqual(0x00FFFFFFFFFFFFFFFC, n)
status = tibrvMsg_UpdateString(msg, 'STR', 'TEST')
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status, sz = tibrvMsg_GetString(msg, 'STR')
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
self.assertEqual('TEST', sz)
status, msg2 = tibrvMsg_Create()
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status = tibrvMsg_UpdateString(msg2, 'DATA', 'TEST')
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status = tibrvMsg_UpdateMsg(msg, 'MSG', msg2)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status, mm = tibrvMsg_GetMsg(msg, 'MSG')
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status, sz = tibrvMsg_ConvertToString(msg2)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status, sz2 = tibrvMsg_ConvertToString(mm)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
self.assertEqual(sz, sz2)
status = tibrvMsg_Destroy(msg)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status = tibrvMsg_Destroy(msg2)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
def test_datetime(self):
status, msg = tibrvMsg_Create()
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status = tibrvMsg_AddDateTime(msg, 'DT', None)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status = tibrvMsg_UpdateDateTime(msg, 'DT', None)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status, ret = tibrvMsg_GetDateTime(msg, 'DT')
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
assert ret is not None
status, dt = tibrvMsg_GetCurrentTime()
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
self.assertTrue(type(dt) is tibrvMsgDateTime)
print(dt)
status = tibrvMsg_UpdateDateTime(msg, 'DT', dt)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status, dt2 = tibrvMsg_GetDateTime(msg, 'DT')
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
self.assertTrue(type(dt2) is tibrvMsgDateTime)
self.assertEqual(dt, dt2)
dt3 = tibrvMsgDateTime()
status = tibrvMsg_UpdateDateTime(msg, 'DT3', dt3)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status, dt4 = tibrvMsg_GetDateTime(msg, 'DT3')
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
self.assertEqual(dt3, dt4)
status = tibrvMsg_Destroy(msg)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
def test_array(self):
status, msg = tibrvMsg_Create()
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
# I8
data = [1,2,3,4,5]
status = tibrvMsg_UpdateI8Array(msg, 'I8', data)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status, ret = tibrvMsg_GetI8Array(msg, 'I8')
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
assert data == ret
# U8
data = [1,2,3,4,5]
status = tibrvMsg_UpdateU8Array(msg, 'U8', data)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status, ret = tibrvMsg_GetU8Array(msg, 'U8')
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
assert data == ret
# I16
data = [1,2,3,4,5]
status = tibrvMsg_UpdateI16Array(msg, 'I16', data)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status, ret = tibrvMsg_GetI16Array(msg, 'I16')
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
assert data == ret
# U16
data = [1,2,3,4,5]
status = tibrvMsg_UpdateU16Array(msg, 'U16', data)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status, ret = tibrvMsg_GetU16Array(msg, 'U16')
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
assert data == ret
# I32
data = [1,2,3,4,5]
status = tibrvMsg_UpdateI32Array(msg, 'I32', data)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status, ret = tibrvMsg_GetI32Array(msg, 'I32')
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
assert data == ret
# U32
data = [1,2,3,4,5]
status = tibrvMsg_UpdateU32Array(msg, 'U32', data)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status, ret = tibrvMsg_GetU32Array(msg, 'U32')
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
assert data == ret
# I64
data = [1,2,3,4,5]
status = tibrvMsg_UpdateI64Array(msg, 'I64', data)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status, ret = tibrvMsg_GetI32Array(msg, 'I64')
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
assert data == ret
# U64
data = [1,2,3,4,5]
status = tibrvMsg_UpdateU64Array(msg, 'U64', data)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status, ret = tibrvMsg_GetU64Array(msg, 'U64')
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
assert data == ret
# F32
data = [1.1,2.2,3.3,4.4,5.5]
status = tibrvMsg_UpdateF32Array(msg, 'F32', data)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status, ret = tibrvMsg_GetF32Array(msg, 'F32')
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
for x in range(len(data)):
f = ctypes.c_float(data[x]).value # convert to F32
assert f == ret[x]
# F64
data = [1.1,2.2,3.3,4.4,5.5]
status = tibrvMsg_UpdateF64Array(msg, 'F64', data)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status, ret = tibrvMsg_GetF64Array(msg, 'F64')
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
assert data == ret
if __name__ == "__main__":
unittest.main(verbosity=2)
| |
import pytest
from pycountry import countries
from schwifty import IBAN
from schwifty.exceptions import SchwiftyException
valid = [
"AL47 2121 1009 0000 0002 3569 8741", # Albania
"AD12 0001 2030 2003 5910 0100", # Andorra
"AT61 1904 3002 3457 3201", # Austria
"AZ21 NABZ 0000 0000 1370 1000 1944", # Republic of Azerbaijan
"BH67 BMAG 0000 1299 1234 56", # Bahrain (Kingdom of)
"BE68 5390 0754 7034", # Belgium
"BA39 1290 0794 0102 8494", # Bosnia and Herzegovina
"BR97 0036 0305 0000 1000 9795 493P 1", # Brazil
"BR18 0000 0000 1414 5512 3924 100C 2", # Brazil
"BG80 BNBG 9661 1020 3456 78", # Bulgaria
"CR05 0152 0200 1026 2840 66", # Costa Rica
"HR12 1001 0051 8630 0016 0", # Croatia
"CY17 0020 0128 0000 0012 0052 7600", # Cyprus
"CZ65 0800 0000 1920 0014 5399", # Czech Republic
"CZ94 5500 0000 0010 1103 8930", # Czech Republic
"DK50 0040 0440 1162 43", # Greenland
"FO62 6460 0001 6316 34", # Faroer
"GL89 6471 0001 0002 06", # Denmark
"DO28 BAGR 0000 0001 2124 5361 1324", # Dominican Republic
"EE38 2200 2210 2014 5685", # Estonia
"FI21 1234 5600 0007 85", # Finland
"FR14 2004 1010 0505 0001 3M02 606", # France
"GE29 NB00 0000 0101 9049 17", # Georgia
"DE89 3704 0044 0532 0130 00", # Germany
"GI75 NWBK 0000 0000 7099 453", # Gibraltar
"GR16 0110 1250 0000 0001 2300 695", # Greece
"GT82 TRAJ 0102 0000 0012 1002 9690", # Guatemala
"HU42 1177 3016 1111 1018 0000 0000", # Hungary
"IS14 0159 2600 7654 5510 7303 39", # Iceland
"IE29 AIBK 9311 5212 3456 78", # Ireland
"IL62 0108 0000 0009 9999 999", # Israel
"IT60 X054 2811 1010 0000 0123 456", # Italy
"JO94 CBJO 0010 0000 0000 0131 0003 02", # Jordan
"KZ86 125K ZT50 0410 0100", # Kazakhstan
"XK05 1212 0123 4567 8906", # Republic of Kosovo
"KW81 CBKU 0000 0000 0000 1234 5601 01", # Kuwait
"LV80 BANK 0000 4351 9500 1", # Latvia
"LB62 0999 0000 0001 0019 0122 9114", # Lebanon
"LI21 0881 0000 2324 013A A", # Liechtenstein (Principality of)
"LT12 1000 0111 0100 1000", # Lithuania
"LU28 0019 4006 4475 0000", # Luxembourg
"MK07 2501 2000 0058 984", # Macedonia, Former Yugoslav Republic of
"MT84 MALT 0110 0001 2345 MTLC AST0 01S", # Malta
"MR13 0002 0001 0100 0012 3456 753", # Mauritania
"MU17 BOMM 0101 1010 3030 0200 000M UR", # Mauritius
"MD24 AG00 0225 1000 1310 4168", # Moldova
"MC58 1122 2000 0101 2345 6789 030", # Monaco
"ME25 5050 0001 2345 6789 51", # Montenegro
"NL91 ABNA 0417 1643 00", # The Netherlands
"NO93 8601 1117 947", # Norway
"PK36 SCBL 0000 0011 2345 6702", # Pakistan
"PS92 PALS 0000 0000 0400 1234 5670 2", # Palestine, State of
"PL61 1090 1014 0000 0712 1981 2874", # Poland
"PT50 0002 0123 1234 5678 9015 4", # Portugal
"QA58 DOHB 0000 1234 5678 90AB CDEF G", # Qatar
"RO49 AAAA 1B31 0075 9384 0000", # Romania
# 'LC62 HEMM 0001 0001 0012 0012 0002 3015', # Saint Lucia
"SM86 U032 2509 8000 0000 0270 100", # San Marino
"ST68 0001 0001 0051 8453 1011 2", # Sao Tome And Principe
"SA03 8000 0000 6080 1016 7519", # Saudi Arabia
"RS35 2600 0560 1001 6113 79", # Serbia
# 'SC25 SSCB1101 0000 0000 0000 1497 USD', # Seychelles
"SK31 1200 0000 1987 4263 7541", # Slovak Republic
"SI56 1910 0000 0123 438", # Slovenia
"ES91 2100 0418 4502 0005 1332", # Spain
"SE45 5000 0000 0583 9825 7466", # Sweden
"CH93 0076 2011 6238 5295 7", # Switzerland
"TL38 0080 0123 4567 8910 157", # Timor-Leste
"TN59 1000 6035 1835 9847 8831", # Tunisia
"TR33 0006 1005 1978 6457 8413 26", # Turkey
"UA21 3996 2200 0002 6007 2335 6600 1", # Ukraine
"AE07 0331 2345 6789 0123 456", # United Arab Emirates
"GB29 NWBK 6016 1331 9268 19", # United Kingdom
"VG96 VPVG 0000 0123 4567 8901", # Virgin Islands, British
"BY13 NBRB 3600 9000 0000 2Z00 AB00", # Republic of Belarus
"SV62 CENR 0000 0000 0000 0070 0025", # El Salvador
"FO62 6460 0001 6316 34", # Faroe Islands
"GL89 6471 0001 0002 06", # Grenland
"IQ98 NBIQ 8501 2345 6789 012", # Iraq
]
invalid = [
"DE89 3704 0044 0532 0130", # Too short
"DE89 3704 0044 0532 0130 0000", # Too long
"GB96 BARC 2020 1530 0934 591", # Too long
"XX89 3704 0044 0532 0130 00", # Wrong country-code
"DE99 3704 0044 0532 0130 00", # Wrong check digits
"DEAA 3704 0044 0532 0130 00", # Wrong format (check digits)
"GB2L ABBY 0901 2857 2017 07", # Wrong format (check digits)
"DE89 AA04 0044 0532 0130 00", # Wrong format (country specific)
"GB12 BARC 2020 1530 093A 59", # Wrong account format (country specific)
"GB01 BARC 2071 4583 6083 87", # Wrong checksum digits
"GB00 HLFX 1101 6111 4553 65", # Wrong checksum digits
"GB94 BARC 2020 1530 0934 59", # Wrong checksum digits
]
@pytest.mark.parametrize("number", valid)
def test_parse_iban(number):
iban = IBAN(number, validate_bban=True)
assert iban.formatted == number
@pytest.mark.parametrize("number", invalid)
def test_parse_iban_allow_invalid(number):
iban = IBAN(number, allow_invalid=True)
with pytest.raises(SchwiftyException):
iban.validate()
@pytest.mark.parametrize("number", invalid)
def test_invalid_iban(number):
with pytest.raises(SchwiftyException):
IBAN(number)
def test_iban_properties():
iban = IBAN("DE42430609677000534100")
assert iban.bank_code == "43060967"
assert iban.branch_code == ""
assert iban.account_code == "7000534100"
assert iban.country_code == "DE"
assert iban.bic == "GENODEM1GLS"
assert iban.formatted == "DE42 4306 0967 7000 5341 00"
assert iban.length == 22
assert iban.country == countries.get(alpha_2="DE")
@pytest.mark.parametrize(
"components,compact",
[
(("DE", "43060967", "7000534100"), "DE42430609677000534100"),
(("DE", "51230800", "2622196545"), "DE61512308002622196545"),
(("DE", "20690500", "9027378"), "DE37206905000009027378"),
(("DE", "75090900", "7408418"), "DE04750909000007408418"),
(("IT", "0538703601", "000000198036"), "IT18T0538703601000000198036"),
(("IT", "0538703601", "000000198060"), "IT57V0538703601000000198060"),
(("IT", "0538703601", "000000198072"), "IT40Z0538703601000000198072"),
(("IT", "0538742530", "000000802006"), "IT29P0538742530000000802006"),
(("IT", "0306940101", "100100003599"), "IT94I0306940101100100003599"),
(("IT", "0335901600", "100000131525"), "IT63M0335901600100000131525"),
(("IT", "03359", "100000131525", "01600"), "IT63M0335901600100000131525"),
(("GB", "NWBK", "31926819", "601613"), "GB29NWBK60161331926819"),
(("GB", "NWBK", "31926819"), "GB66NWBK00000031926819"),
(("GB", "NWBK601613", "31926819"), "GB29NWBK60161331926819"),
(("BE", "050", "123"), "BE66050000012343"),
(("BE", "050", "123456"), "BE45050012345689"),
(("BE", "539", "0075470"), "BE68539007547034"),
],
)
def test_generate_iban(components, compact):
iban = IBAN.generate(*components)
assert iban.compact == compact
@pytest.mark.parametrize(
"components",
[
("DE", "012345678", "7000123456"),
("DE", "51230800", "01234567891"),
("GB", "NWBK", "31926819", "1234567"),
],
)
def test_generate_iban_invalid(components):
with pytest.raises(SchwiftyException):
IBAN.generate(*components)
def test_magic_methods():
iban = IBAN("DE42430609677000534100")
assert iban == "DE42430609677000534100"
assert iban == IBAN("DE42430609677000534100")
assert iban != IBAN("ES9121000418450200051332")
assert iban < IBAN("ES9121000418450200051332")
assert str(iban) == "DE42430609677000534100"
assert hash(iban) == hash("DE42430609677000534100")
assert repr(iban) == "<IBAN=DE42430609677000534100>"
@pytest.mark.parametrize(
"iban,bic",
[
("AT483200000012345864", "RLNWATWWXXX"),
("AT930100000000123145", "BUNDATWWXXX"),
("BE71096123456769", "GKCCBEBB"),
("CZ5508000000001234567899", "GIBACZPX"),
("DE37206905000009027378", "GENODEF1S11"),
("ES7921000813610123456789", "CAIXESBB"),
("FI1410093000123458", "NDEAFIHH"),
("HR1723600001101234565", "ZABAHR2X"),
("LV97HABA0012345678910", "HABALV22XXX"),
("PL50860000020000000000093122", "POLUPLPRXXX"),
("SI56192001234567892", "SZKBSI2XXXX"),
("NL02ABNA0123456789", "ABNANL2A"),
],
)
def test_bic_from_iban(iban, bic):
assert IBAN(iban).bic.compact == bic
def test_unknown_bic_from_iban():
assert IBAN("SI72000001234567892").bic is None
def test_be_generated_iban_valid():
iban = IBAN.generate("BE", bank_code="050", account_code="123456")
assert iban.validate(validate_bban=True)
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
PySpark supports custom serializers for transferring data; this can improve
performance.
By default, PySpark uses L{PickleSerializer} to serialize objects using Python's
C{cPickle} serializer, which can serialize nearly any Python object.
Other serializers, like L{MarshalSerializer}, support fewer datatypes but can be
faster.
The serializer is chosen when creating L{SparkContext}:
>>> from pyspark.context import SparkContext
>>> from pyspark.serializers import MarshalSerializer
>>> sc = SparkContext('local', 'test', serializer=MarshalSerializer())
>>> sc.parallelize(list(range(1000))).map(lambda x: 2 * x).take(10)
[0, 2, 4, 6, 8, 10, 12, 14, 16, 18]
>>> sc.stop()
PySpark serialize objects in batches; By default, the batch size is chosen based
on the size of objects, also configurable by SparkContext's C{batchSize} parameter:
>>> sc = SparkContext('local', 'test', batchSize=2)
>>> rdd = sc.parallelize(range(16), 4).map(lambda x: x)
Behind the scenes, this creates a JavaRDD with four partitions, each of
which contains two batches of two objects:
>>> rdd.glom().collect()
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]
>>> int(rdd._jrdd.count())
8
>>> sc.stop()
"""
import sys
from itertools import chain, product
import marshal
import struct
import types
import collections
import zlib
import itertools
if sys.version < '3':
import cPickle as pickle
protocol = 2
from itertools import izip as zip, imap as map
else:
import pickle
protocol = 3
xrange = range
from pyspark import cloudpickle
__all__ = ["PickleSerializer", "MarshalSerializer", "UTF8Deserializer"]
class SpecialLengths(object):
END_OF_DATA_SECTION = -1
PYTHON_EXCEPTION_THROWN = -2
TIMING_DATA = -3
END_OF_STREAM = -4
NULL = -5
START_ARROW_STREAM = -6
class PythonEvalType(object):
NON_UDF = 0
SQL_BATCHED_UDF = 1
SQL_PANDAS_UDF = 2
SQL_PANDAS_GROUPED_UDF = 3
class Serializer(object):
def dump_stream(self, iterator, stream):
"""
Serialize an iterator of objects to the output stream.
"""
raise NotImplementedError
def load_stream(self, stream):
"""
Return an iterator of deserialized objects from the input stream.
"""
raise NotImplementedError
def _load_stream_without_unbatching(self, stream):
"""
Return an iterator of deserialized batches (iterable) of objects from the input stream.
if the serializer does not operate on batches the default implementation returns an
iterator of single element lists.
"""
return map(lambda x: [x], self.load_stream(stream))
# Note: our notion of "equality" is that output generated by
# equal serializers can be deserialized using the same serializer.
# This default implementation handles the simple cases;
# subclasses should override __eq__ as appropriate.
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
def __hash__(self):
return hash(str(self))
class FramedSerializer(Serializer):
"""
Serializer that writes objects as a stream of (length, data) pairs,
where C{length} is a 32-bit integer and data is C{length} bytes.
"""
def __init__(self):
# On Python 2.6, we can't write bytearrays to streams, so we need to convert them
# to strings first. Check if the version number is that old.
self._only_write_strings = sys.version_info[0:2] <= (2, 6)
def dump_stream(self, iterator, stream):
for obj in iterator:
self._write_with_length(obj, stream)
def load_stream(self, stream):
while True:
try:
yield self._read_with_length(stream)
except EOFError:
return
def _write_with_length(self, obj, stream):
serialized = self.dumps(obj)
if serialized is None:
raise ValueError("serialized value should not be None")
if len(serialized) > (1 << 31):
raise ValueError("can not serialize object larger than 2G")
write_int(len(serialized), stream)
if self._only_write_strings:
stream.write(str(serialized))
else:
stream.write(serialized)
def _read_with_length(self, stream):
length = read_int(stream)
if length == SpecialLengths.END_OF_DATA_SECTION:
raise EOFError
elif length == SpecialLengths.NULL:
return None
obj = stream.read(length)
if len(obj) < length:
raise EOFError
return self.loads(obj)
def dumps(self, obj):
"""
Serialize an object into a byte array.
When batching is used, this will be called with an array of objects.
"""
raise NotImplementedError
def loads(self, obj):
"""
Deserialize an object from a byte array.
"""
raise NotImplementedError
class ArrowSerializer(FramedSerializer):
"""
Serializes bytes as Arrow data with the Arrow file format.
"""
def dumps(self, batch):
import pyarrow as pa
import io
sink = io.BytesIO()
writer = pa.RecordBatchFileWriter(sink, batch.schema)
writer.write_batch(batch)
writer.close()
return sink.getvalue()
def loads(self, obj):
import pyarrow as pa
reader = pa.RecordBatchFileReader(pa.BufferReader(obj))
return reader.read_all()
def __repr__(self):
return "ArrowSerializer"
def _create_batch(series):
import pyarrow as pa
# Make input conform to [(series1, type1), (series2, type2), ...]
if not isinstance(series, (list, tuple)) or \
(len(series) == 2 and isinstance(series[1], pa.DataType)):
series = [series]
series = ((s, None) if not isinstance(s, (list, tuple)) else s for s in series)
# If a nullable integer series has been promoted to floating point with NaNs, need to cast
# NOTE: this is not necessary with Arrow >= 0.7
def cast_series(s, t):
if t is None or s.dtype == t.to_pandas_dtype():
return s
else:
return s.fillna(0).astype(t.to_pandas_dtype(), copy=False)
arrs = [pa.Array.from_pandas(cast_series(s, t), mask=s.isnull(), type=t) for s, t in series]
return pa.RecordBatch.from_arrays(arrs, ["_%d" % i for i in xrange(len(arrs))])
class ArrowStreamPandasSerializer(Serializer):
"""
Serializes Pandas.Series as Arrow data with Arrow streaming format.
"""
def dump_stream(self, iterator, stream):
"""
Make ArrowRecordBatches from Pandas Series and serialize. Input is a single series or
a list of series accompanied by an optional pyarrow type to coerce the data to.
"""
import pyarrow as pa
writer = None
try:
for series in iterator:
batch = _create_batch(series)
if writer is None:
write_int(SpecialLengths.START_ARROW_STREAM, stream)
writer = pa.RecordBatchStreamWriter(stream, batch.schema)
writer.write_batch(batch)
finally:
if writer is not None:
writer.close()
def load_stream(self, stream):
"""
Deserialize ArrowRecordBatches to an Arrow table and return as a list of pandas.Series.
"""
import pyarrow as pa
reader = pa.open_stream(stream)
for batch in reader:
table = pa.Table.from_batches([batch])
yield [c.to_pandas() for c in table.itercolumns()]
def __repr__(self):
return "ArrowStreamPandasSerializer"
class BatchedSerializer(Serializer):
"""
Serializes a stream of objects in batches by calling its wrapped
Serializer with streams of objects.
"""
UNLIMITED_BATCH_SIZE = -1
UNKNOWN_BATCH_SIZE = 0
def __init__(self, serializer, batchSize=UNLIMITED_BATCH_SIZE):
self.serializer = serializer
self.batchSize = batchSize
def _batched(self, iterator):
if self.batchSize == self.UNLIMITED_BATCH_SIZE:
yield list(iterator)
elif hasattr(iterator, "__len__") and hasattr(iterator, "__getslice__"):
n = len(iterator)
for i in xrange(0, n, self.batchSize):
yield iterator[i: i + self.batchSize]
else:
items = []
count = 0
for item in iterator:
items.append(item)
count += 1
if count == self.batchSize:
yield items
items = []
count = 0
if items:
yield items
def dump_stream(self, iterator, stream):
self.serializer.dump_stream(self._batched(iterator), stream)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def _load_stream_without_unbatching(self, stream):
return self.serializer.load_stream(stream)
def __repr__(self):
return "BatchedSerializer(%s, %d)" % (str(self.serializer), self.batchSize)
class FlattenedValuesSerializer(BatchedSerializer):
"""
Serializes a stream of list of pairs, split the list of values
which contain more than a certain number of objects to make them
have similar sizes.
"""
def __init__(self, serializer, batchSize=10):
BatchedSerializer.__init__(self, serializer, batchSize)
def _batched(self, iterator):
n = self.batchSize
for key, values in iterator:
for i in range(0, len(values), n):
yield key, values[i:i + n]
def load_stream(self, stream):
return self.serializer.load_stream(stream)
def __repr__(self):
return "FlattenedValuesSerializer(%s, %d)" % (self.serializer, self.batchSize)
class AutoBatchedSerializer(BatchedSerializer):
"""
Choose the size of batch automatically based on the size of object
"""
def __init__(self, serializer, bestSize=1 << 16):
BatchedSerializer.__init__(self, serializer, self.UNKNOWN_BATCH_SIZE)
self.bestSize = bestSize
def dump_stream(self, iterator, stream):
batch, best = 1, self.bestSize
iterator = iter(iterator)
while True:
vs = list(itertools.islice(iterator, batch))
if not vs:
break
bytes = self.serializer.dumps(vs)
write_int(len(bytes), stream)
stream.write(bytes)
size = len(bytes)
if size < best:
batch *= 2
elif size > best * 10 and batch > 1:
batch //= 2
def __repr__(self):
return "AutoBatchedSerializer(%s)" % self.serializer
class CartesianDeserializer(Serializer):
"""
Deserializes the JavaRDD cartesian() of two PythonRDDs.
Due to pyspark batching we cannot simply use the result of the Java RDD cartesian,
we additionally need to do the cartesian within each pair of batches.
"""
def __init__(self, key_ser, val_ser):
self.key_ser = key_ser
self.val_ser = val_ser
def _load_stream_without_unbatching(self, stream):
key_batch_stream = self.key_ser._load_stream_without_unbatching(stream)
val_batch_stream = self.val_ser._load_stream_without_unbatching(stream)
for (key_batch, val_batch) in zip(key_batch_stream, val_batch_stream):
# for correctness with repeated cartesian/zip this must be returned as one batch
yield product(key_batch, val_batch)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def __repr__(self):
return "CartesianDeserializer(%s, %s)" % \
(str(self.key_ser), str(self.val_ser))
class PairDeserializer(Serializer):
"""
Deserializes the JavaRDD zip() of two PythonRDDs.
Due to pyspark batching we cannot simply use the result of the Java RDD zip,
we additionally need to do the zip within each pair of batches.
"""
def __init__(self, key_ser, val_ser):
self.key_ser = key_ser
self.val_ser = val_ser
def _load_stream_without_unbatching(self, stream):
key_batch_stream = self.key_ser._load_stream_without_unbatching(stream)
val_batch_stream = self.val_ser._load_stream_without_unbatching(stream)
for (key_batch, val_batch) in zip(key_batch_stream, val_batch_stream):
# For double-zipped RDDs, the batches can be iterators from other PairDeserializer,
# instead of lists. We need to convert them to lists if needed.
key_batch = key_batch if hasattr(key_batch, '__len__') else list(key_batch)
val_batch = val_batch if hasattr(val_batch, '__len__') else list(val_batch)
if len(key_batch) != len(val_batch):
raise ValueError("Can not deserialize PairRDD with different number of items"
" in batches: (%d, %d)" % (len(key_batch), len(val_batch)))
# for correctness with repeated cartesian/zip this must be returned as one batch
yield zip(key_batch, val_batch)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def __repr__(self):
return "PairDeserializer(%s, %s)" % (str(self.key_ser), str(self.val_ser))
class NoOpSerializer(FramedSerializer):
def loads(self, obj):
return obj
def dumps(self, obj):
return obj
# Hook namedtuple, make it picklable
__cls = {}
def _restore(name, fields, value):
""" Restore an object of namedtuple"""
k = (name, fields)
cls = __cls.get(k)
if cls is None:
cls = collections.namedtuple(name, fields)
__cls[k] = cls
return cls(*value)
def _hack_namedtuple(cls):
""" Make class generated by namedtuple picklable """
name = cls.__name__
fields = cls._fields
def __reduce__(self):
return (_restore, (name, fields, tuple(self)))
cls.__reduce__ = __reduce__
cls._is_namedtuple_ = True
return cls
def _hijack_namedtuple():
""" Hack namedtuple() to make it picklable """
# hijack only one time
if hasattr(collections.namedtuple, "__hijack"):
return
global _old_namedtuple # or it will put in closure
global _old_namedtuple_kwdefaults # or it will put in closure too
def _copy_func(f):
return types.FunctionType(f.__code__, f.__globals__, f.__name__,
f.__defaults__, f.__closure__)
def _kwdefaults(f):
# __kwdefaults__ contains the default values of keyword-only arguments which are
# introduced from Python 3. The possible cases for __kwdefaults__ in namedtuple
# are as below:
#
# - Does not exist in Python 2.
# - Returns None in <= Python 3.5.x.
# - Returns a dictionary containing the default values to the keys from Python 3.6.x
# (See https://bugs.python.org/issue25628).
kargs = getattr(f, "__kwdefaults__", None)
if kargs is None:
return {}
else:
return kargs
_old_namedtuple = _copy_func(collections.namedtuple)
_old_namedtuple_kwdefaults = _kwdefaults(collections.namedtuple)
def namedtuple(*args, **kwargs):
for k, v in _old_namedtuple_kwdefaults.items():
kwargs[k] = kwargs.get(k, v)
cls = _old_namedtuple(*args, **kwargs)
return _hack_namedtuple(cls)
# replace namedtuple with new one
collections.namedtuple.__globals__["_old_namedtuple_kwdefaults"] = _old_namedtuple_kwdefaults
collections.namedtuple.__globals__["_old_namedtuple"] = _old_namedtuple
collections.namedtuple.__globals__["_hack_namedtuple"] = _hack_namedtuple
collections.namedtuple.__code__ = namedtuple.__code__
collections.namedtuple.__hijack = 1
# hack the cls already generated by namedtuple
# those created in other module can be pickled as normal,
# so only hack those in __main__ module
for n, o in sys.modules["__main__"].__dict__.items():
if (type(o) is type and o.__base__ is tuple
and hasattr(o, "_fields")
and "__reduce__" not in o.__dict__):
_hack_namedtuple(o) # hack inplace
_hijack_namedtuple()
class PickleSerializer(FramedSerializer):
"""
Serializes objects using Python's pickle serializer:
http://docs.python.org/2/library/pickle.html
This serializer supports nearly any Python object, but may
not be as fast as more specialized serializers.
"""
def dumps(self, obj):
return pickle.dumps(obj, protocol)
if sys.version >= '3':
def loads(self, obj, encoding="bytes"):
return pickle.loads(obj, encoding=encoding)
else:
def loads(self, obj, encoding=None):
return pickle.loads(obj)
class CloudPickleSerializer(PickleSerializer):
def dumps(self, obj):
return cloudpickle.dumps(obj, 2)
class MarshalSerializer(FramedSerializer):
"""
Serializes objects using Python's Marshal serializer:
http://docs.python.org/2/library/marshal.html
This serializer is faster than PickleSerializer but supports fewer datatypes.
"""
def dumps(self, obj):
return marshal.dumps(obj)
def loads(self, obj):
return marshal.loads(obj)
class AutoSerializer(FramedSerializer):
"""
Choose marshal or pickle as serialization protocol automatically
"""
def __init__(self):
FramedSerializer.__init__(self)
self._type = None
def dumps(self, obj):
if self._type is not None:
return b'P' + pickle.dumps(obj, -1)
try:
return b'M' + marshal.dumps(obj)
except Exception:
self._type = b'P'
return b'P' + pickle.dumps(obj, -1)
def loads(self, obj):
_type = obj[0]
if _type == b'M':
return marshal.loads(obj[1:])
elif _type == b'P':
return pickle.loads(obj[1:])
else:
raise ValueError("invalid sevialization type: %s" % _type)
class CompressedSerializer(FramedSerializer):
"""
Compress the serialized data
"""
def __init__(self, serializer):
FramedSerializer.__init__(self)
assert isinstance(serializer, FramedSerializer), "serializer must be a FramedSerializer"
self.serializer = serializer
def dumps(self, obj):
return zlib.compress(self.serializer.dumps(obj), 1)
def loads(self, obj):
return self.serializer.loads(zlib.decompress(obj))
def __repr__(self):
return "CompressedSerializer(%s)" % self.serializer
class UTF8Deserializer(Serializer):
"""
Deserializes streams written by String.getBytes.
"""
def __init__(self, use_unicode=True):
self.use_unicode = use_unicode
def loads(self, stream):
length = read_int(stream)
if length == SpecialLengths.END_OF_DATA_SECTION:
raise EOFError
elif length == SpecialLengths.NULL:
return None
s = stream.read(length)
return s.decode("utf-8") if self.use_unicode else s
def load_stream(self, stream):
try:
while True:
yield self.loads(stream)
except struct.error:
return
except EOFError:
return
def __repr__(self):
return "UTF8Deserializer(%s)" % self.use_unicode
def read_long(stream):
length = stream.read(8)
if not length:
raise EOFError
return struct.unpack("!q", length)[0]
def write_long(value, stream):
stream.write(struct.pack("!q", value))
def pack_long(value):
return struct.pack("!q", value)
def read_int(stream):
length = stream.read(4)
if not length:
raise EOFError
return struct.unpack("!i", length)[0]
def write_int(value, stream):
stream.write(struct.pack("!i", value))
def write_with_length(obj, stream):
write_int(len(obj), stream)
stream.write(obj)
if __name__ == '__main__':
import doctest
(failure_count, test_count) = doctest.testmod()
if failure_count:
exit(-1)
| |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#!/usr/bin/env python2.7
"""Tests for tensorflow_model_server."""
import atexit
import json
import os
import shlex
import socket
import subprocess
import sys
import time
import urllib2
# This is a placeholder for a Google-internal import.
import grpc
from grpc.beta import implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.interfaces.face import face
import tensorflow as tf
from tensorflow.core.framework import types_pb2
from tensorflow.python.platform import flags
from tensorflow.python.saved_model import signature_constants
from tensorflow_serving.apis import classification_pb2
from tensorflow_serving.apis import get_model_status_pb2
from tensorflow_serving.apis import inference_pb2
from tensorflow_serving.apis import model_service_pb2_grpc
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2
from tensorflow_serving.apis import regression_pb2
FLAGS = flags.FLAGS
RPC_TIMEOUT = 5.0
HTTP_REST_TIMEOUT_MS = 5000
CHANNEL_WAIT_TIMEOUT = 5.0
WAIT_FOR_SERVER_READY_INT_SECS = 60
def PickUnusedPort():
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
s.bind(('', 0))
port = s.getsockname()[1]
s.close()
return port
def WaitForServerReady(port):
"""Waits for a server on the localhost to become ready."""
for _ in range(0, WAIT_FOR_SERVER_READY_INT_SECS):
time.sleep(1)
request = predict_pb2.PredictRequest()
request.model_spec.name = 'intentionally_missing_model'
try:
# Send empty request to missing model
channel = implementations.insecure_channel('localhost', port)
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
stub.Predict(request, RPC_TIMEOUT)
except face.AbortionError as error:
# Missing model error will have details containing 'Servable'
if 'Servable' in error.details:
print 'Server is ready'
break
def CallREST(name, url, req):
"""Returns HTTP response body from a REST API call."""
print 'Sending {} request to {} with data:\n{}'.format(name, url, req)
resp = urllib2.urlopen(urllib2.Request(url, data=json.dumps(req)))
resp_data = resp.read()
print 'Received response:\n{}'.format(resp_data)
resp.close()
return resp_data
class TensorflowModelServerTest(tf.test.TestCase):
"""This class defines integration test cases for tensorflow_model_server."""
def __TestSrcDirPath(self, relative_path=''):
return os.path.join(os.environ['TEST_SRCDIR'],
'tf_serving/tensorflow_serving', relative_path)
def __BuildModelConfigFile(self):
"""Write a config file to disk for use in tests.
Substitutes placeholder for test directory with test directory path
in the configuration template file and writes it out to another file
used by the test.
"""
with open(self._GetGoodModelConfigTemplate(), 'r') as template_file:
config = template_file.read().replace('${TEST_HALF_PLUS_TWO_DIR}',
self._GetSavedModelBundlePath())
config = config.replace('${TEST_HALF_PLUS_THREE_DIR}',
self._GetSavedModelHalfPlusThreePath())
with open(self._GetGoodModelConfigFile(), 'w') as config_file:
config_file.write(config)
def setUp(self):
"""Sets up integration test parameters."""
self.binary_dir = self.__TestSrcDirPath('model_servers')
self.testdata_dir = self.__TestSrcDirPath('servables/tensorflow/testdata')
self.temp_dir = tf.test.get_temp_dir()
self.server_proc = None
self.__BuildModelConfigFile()
def tearDown(self):
"""Deletes created configuration file."""
os.remove(self._GetGoodModelConfigFile())
def TerminateProcs(self):
"""Terminate all processes."""
print 'Terminating all processes...'
if self.server_proc is not None:
self.server_proc.terminate()
def RunServer(self,
port,
model_name,
model_path,
batching_parameters_file='',
grpc_channel_arguments='',
wait_for_server_ready=True,
rest_api_port=None):
"""Run tensorflow_model_server using test config."""
print 'Starting test server...'
command = os.path.join(self.binary_dir, 'tensorflow_model_server')
command += ' --port=' + str(port)
command += ' --model_name=' + model_name
command += ' --model_base_path=' + model_path
if batching_parameters_file:
command += ' --enable_batching'
command += ' --batching_parameters_file=' + batching_parameters_file
if grpc_channel_arguments:
command += ' --grpc_channel_arguments=' + grpc_channel_arguments
if rest_api_port:
command += ' --rest_api_port=' + str(rest_api_port)
command += ' --rest_api_timeout_in_ms=' + str(HTTP_REST_TIMEOUT_MS)
print command
self.server_proc = subprocess.Popen(shlex.split(command))
print 'Server started'
if wait_for_server_ready:
WaitForServerReady(port)
return 'localhost:' + str(port)
def RunServerWithModelConfigFile(self,
port,
model_config_file,
pipe=None,
wait_for_server_ready=True):
"""Run tensorflow_model_server using test config."""
print 'Starting test server...'
command = os.path.join(self.binary_dir, 'tensorflow_model_server')
command += ' --port=' + str(port)
command += ' --model_config_file=' + model_config_file
print command
self.server_proc = subprocess.Popen(shlex.split(command), stderr=pipe)
print 'Server started'
if wait_for_server_ready:
WaitForServerReady(port)
return 'localhost:' + str(port)
def VerifyPredictRequest(self,
model_server_address,
expected_output,
expected_version,
model_name='default',
specify_output=True,
signature_name=
signature_constants.
DEFAULT_SERVING_SIGNATURE_DEF_KEY):
"""Send PredictionService.Predict request and verify output."""
print 'Sending Predict request...'
# Prepare request
request = predict_pb2.PredictRequest()
request.model_spec.name = model_name
request.inputs['x'].dtype = types_pb2.DT_FLOAT
request.inputs['x'].float_val.append(2.0)
dim = request.inputs['x'].tensor_shape.dim.add()
dim.size = 1
if specify_output:
request.output_filter.append('y')
# Send request
host, port = model_server_address.split(':')
channel = implementations.insecure_channel(host, int(port))
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
result = stub.Predict(request, RPC_TIMEOUT) # 5 secs timeout
# Verify response
self.assertTrue('y' in result.outputs)
self.assertIs(types_pb2.DT_FLOAT, result.outputs['y'].dtype)
self.assertEquals(1, len(result.outputs['y'].float_val))
self.assertEquals(expected_output, result.outputs['y'].float_val[0])
self._VerifyModelSpec(result.model_spec, request.model_spec.name,
signature_name, expected_version)
def _GetSavedModelBundlePath(self):
"""Returns a path to a model in SavedModel format."""
return os.path.join(os.environ['TEST_SRCDIR'], 'tf_serving/external/org_tensorflow/tensorflow/',
'cc/saved_model/testdata/half_plus_two')
def _GetModelVersion(self, model_path):
"""Returns version of SavedModel/SessionBundle in given path.
This method assumes there is exactly one directory with an 'int' valued
directory name under `model_path`.
Args:
model_path: A string representing path to the SavedModel/SessionBundle.
Returns:
version of SavedModel/SessionBundle in given path.
"""
return int(os.listdir(model_path)[0])
def _GetSavedModelHalfPlusThreePath(self):
"""Returns a path to a half_plus_three model in SavedModel format."""
return os.path.join(self.testdata_dir, 'saved_model_half_plus_three')
def _GetSessionBundlePath(self):
"""Returns a path to a model in SessionBundle format."""
return os.path.join(self.testdata_dir, 'half_plus_two')
def _GetGoodModelConfigTemplate(self):
"""Returns a path to a working configuration file template."""
return os.path.join(self.testdata_dir, 'good_model_config.txt')
def _GetGoodModelConfigFile(self):
"""Returns a path to a working configuration file."""
return os.path.join(self.temp_dir, 'good_model_config.conf')
def _GetBadModelConfigFile(self):
"""Returns a path to a improperly formatted configuration file."""
return os.path.join(self.testdata_dir, 'bad_model_config.txt')
def _GetBatchingParametersFile(self):
"""Returns a path to a batching configuration file."""
return os.path.join(self.testdata_dir, 'batching_config.txt')
def _VerifyModelSpec(self,
actual_model_spec,
exp_model_name,
exp_signature_name,
exp_version):
"""Verifies model_spec matches expected model name, signature, version.
Args:
actual_model_spec: An instance of ModelSpec proto.
exp_model_name: A string that represents expected model name.
exp_signature_name: A string that represents expected signature.
exp_version: An integer that represents expected version.
Returns:
None.
"""
self.assertEquals(actual_model_spec.name, exp_model_name)
self.assertEquals(actual_model_spec.signature_name, exp_signature_name)
self.assertEquals(actual_model_spec.version.value, exp_version)
def testGetModelStatus(self):
"""Test ModelService.GetModelStatus implementation."""
model_path = self._GetSavedModelBundlePath()
atexit.register(self.TerminateProcs)
model_server_address = self.RunServer(PickUnusedPort(), 'default',
model_path)
print 'Sending GetModelStatus request...'
# Send request
request = get_model_status_pb2.GetModelStatusRequest()
request.model_spec.name = 'default'
channel = grpc.insecure_channel(model_server_address)
stub = model_service_pb2_grpc.ModelServiceStub(channel)
result = stub.GetModelStatus(request, RPC_TIMEOUT) # 5 secs timeout
# Verify response
self.assertEquals(1, len(result.model_version_status))
self.assertEquals(123, result.model_version_status[0].version)
# OK error code (0) indicates no error occurred
self.assertEquals(0, result.model_version_status[0].status.error_code)
def testClassify(self):
"""Test PredictionService.Classify implementation."""
model_path = self._GetSavedModelBundlePath()
atexit.register(self.TerminateProcs)
model_server_address = self.RunServer(PickUnusedPort(), 'default',
model_path)
print 'Sending Classify request...'
# Prepare request
request = classification_pb2.ClassificationRequest()
request.model_spec.name = 'default'
request.model_spec.signature_name = 'classify_x_to_y'
example = request.input.example_list.examples.add()
example.features.feature['x'].float_list.value.extend([2.0])
# Send request
host, port = model_server_address.split(':')
channel = implementations.insecure_channel(host, int(port))
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
result = stub.Classify(request, RPC_TIMEOUT) # 5 secs timeout
# Verify response
self.assertEquals(1, len(result.result.classifications))
self.assertEquals(1, len(result.result.classifications[0].classes))
expected_output = 3.0
self.assertEquals(expected_output,
result.result.classifications[0].classes[0].score)
self._VerifyModelSpec(result.model_spec, request.model_spec.name,
request.model_spec.signature_name,
self._GetModelVersion(model_path))
def testRegress(self):
"""Test PredictionService.Regress implementation."""
model_path = self._GetSavedModelBundlePath()
atexit.register(self.TerminateProcs)
model_server_address = self.RunServer(PickUnusedPort(), 'default',
model_path)
print 'Sending Regress request...'
# Prepare request
request = regression_pb2.RegressionRequest()
request.model_spec.name = 'default'
request.model_spec.signature_name = 'regress_x_to_y'
example = request.input.example_list.examples.add()
example.features.feature['x'].float_list.value.extend([2.0])
# Send request
host, port = model_server_address.split(':')
channel = implementations.insecure_channel(host, int(port))
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
result = stub.Regress(request, RPC_TIMEOUT) # 5 secs timeout
# Verify response
self.assertEquals(1, len(result.result.regressions))
expected_output = 3.0
self.assertEquals(expected_output, result.result.regressions[0].value)
self._VerifyModelSpec(result.model_spec, request.model_spec.name,
request.model_spec.signature_name,
self._GetModelVersion(model_path))
def testMultiInference(self):
"""Test PredictionService.MultiInference implementation."""
model_path = self._GetSavedModelBundlePath()
enable_batching = False
atexit.register(self.TerminateProcs)
model_server_address = self.RunServer(PickUnusedPort(), 'default',
model_path,
enable_batching)
print 'Sending MultiInference request...'
# Prepare request
request = inference_pb2.MultiInferenceRequest()
request.tasks.add().model_spec.name = 'default'
request.tasks[0].model_spec.signature_name = 'regress_x_to_y'
request.tasks[0].method_name = 'tensorflow/serving/regress'
request.tasks.add().model_spec.name = 'default'
request.tasks[1].model_spec.signature_name = 'classify_x_to_y'
request.tasks[1].method_name = 'tensorflow/serving/classify'
example = request.input.example_list.examples.add()
example.features.feature['x'].float_list.value.extend([2.0])
# Send request
host, port = model_server_address.split(':')
channel = implementations.insecure_channel(host, int(port))
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
result = stub.MultiInference(request, RPC_TIMEOUT) # 5 secs timeout
# Verify response
self.assertEquals(2, len(result.results))
expected_output = 3.0
self.assertEquals(expected_output,
result.results[0].regression_result.regressions[0].value)
self.assertEquals(expected_output, result.results[
1].classification_result.classifications[0].classes[0].score)
for i in xrange(2):
self._VerifyModelSpec(result.results[i].model_spec,
request.tasks[i].model_spec.name,
request.tasks[i].model_spec.signature_name,
self._GetModelVersion(model_path))
def _TestPredict(self,
model_path,
batching_parameters_file='',
signature_name=
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY):
"""Helper method to test prediction.
Args:
model_path: Path to the model on disk.
batching_parameters_file: Batching parameters file to use (if left empty,
batching is not enabled).
signature_name: Signature name to expect in the PredictResponse.
"""
atexit.register(self.TerminateProcs)
model_server_address = self.RunServer(PickUnusedPort(), 'default',
model_path, batching_parameters_file)
expected_version = self._GetModelVersion(model_path)
self.VerifyPredictRequest(model_server_address, expected_output=3.0,
expected_version=expected_version,
signature_name=signature_name)
self.VerifyPredictRequest(
model_server_address, expected_output=3.0, specify_output=False,
expected_version=expected_version, signature_name=signature_name)
def testPredictBatching(self):
"""Test PredictionService.Predict implementation with SessionBundle."""
self._TestPredict(
self._GetSessionBundlePath(),
batching_parameters_file=self._GetBatchingParametersFile())
def testPredictSavedModel(self):
"""Test PredictionService.Predict implementation with SavedModel."""
self._TestPredict(self._GetSavedModelBundlePath())
def testPredictUpconvertedSavedModel(self):
"""Test PredictionService.Predict implementation.
Using a SessionBundle converted to a SavedModel.
"""
self._TestPredict(self._GetSessionBundlePath())
def _TestBadModel(self):
"""Helper method to test against a bad model export."""
atexit.register(self.TerminateProcs)
# Both SessionBundle and SavedModel use the same bad model path, but in the
# case of SavedModel, the export will get up-converted to a SavedModel.
# As the bad model will prevent the server from becoming ready, we set the
# wait_for_server_ready param to False to avoid blocking/timing out.
model_path = os.path.join(self.testdata_dir, 'bad_half_plus_two'),
model_server_address = self.RunServer(PickUnusedPort(), 'default',
model_path,
wait_for_server_ready=False)
with self.assertRaises(face.AbortionError) as error:
self.VerifyPredictRequest(
model_server_address, expected_output=3.0,
expected_version=self._GetModelVersion(model_path),
signature_name='')
self.assertIs(beta_interfaces.StatusCode.FAILED_PRECONDITION,
error.exception.code)
def _TestBadModelUpconvertedSavedModel(self):
"""Test Predict against a bad upconverted SavedModel model export."""
self._TestBadModel()
def testGoodModelConfig(self):
"""Test server configuration from file works with valid configuration."""
atexit.register(self.TerminateProcs)
model_server_address = self.RunServerWithModelConfigFile(
PickUnusedPort(), self._GetGoodModelConfigFile())
self.VerifyPredictRequest(
model_server_address, model_name='half_plus_two', expected_output=3.0,
expected_version=self._GetModelVersion(self._GetSavedModelBundlePath()))
self.VerifyPredictRequest(
model_server_address, model_name='half_plus_two',
expected_output=3.0, specify_output=False,
expected_version=self._GetModelVersion(self._GetSavedModelBundlePath()))
self.VerifyPredictRequest(
model_server_address, model_name='half_plus_three', expected_output=4.0,
expected_version=self._GetModelVersion(
self._GetSavedModelHalfPlusThreePath()))
self.VerifyPredictRequest(
model_server_address, model_name='half_plus_three', expected_output=4.0,
specify_output=False,
expected_version=self._GetModelVersion(
self._GetSavedModelHalfPlusThreePath()))
def testBadModelConfig(self):
"""Test server model configuration from file fails for invalid file."""
atexit.register(self.TerminateProcs)
self.RunServerWithModelConfigFile(
PickUnusedPort(),
self._GetBadModelConfigFile(),
pipe=subprocess.PIPE,
wait_for_server_ready=False)
error_message = (
'Invalid protobuf file: \'%s\'') % self._GetBadModelConfigFile()
self.assertNotEqual(self.server_proc.stderr, None)
self.assertGreater(self.server_proc.stderr.read().find(error_message), -1)
def testGoodGrpcChannelArgs(self):
"""Test server starts with grpc_channel_arguments specified."""
atexit.register(self.TerminateProcs)
model_server_address = self.RunServer(
PickUnusedPort(),
'default',
self._GetSavedModelBundlePath(),
grpc_channel_arguments=
'grpc.max_connection_age_ms=2000,grpc.lb_policy_name=grpclb')
self.VerifyPredictRequest(
model_server_address,
expected_output=3.0,
specify_output=False,
expected_version=self._GetModelVersion(
self._GetSavedModelHalfPlusThreePath()))
def testClassifyREST(self):
"""Test Classify implementation over REST API."""
model_path = self._GetSavedModelBundlePath()
atexit.register(self.TerminateProcs)
rest_api_port = PickUnusedPort()
model_server_address = self.RunServer(
PickUnusedPort(), 'default', model_path, rest_api_port=rest_api_port)
# Prepare request
url = 'http://{}:{}/v1/models/default:classify'.format(
model_server_address.split(':')[0], rest_api_port)
json_req = {'signature_name': 'classify_x_to_y', 'examples': [{'x': 2.0}]}
# Send request
resp_data = None
try:
resp_data = CallREST('Classify', url, json_req)
except Exception as e: # pylint: disable=broad-except
self.fail('Request failed with error: {}'.format(e))
# Verify response
self.assertEquals(json.loads(resp_data), {'results': [[['', 3.0]]]})
def testRegressREST(self):
"""Test Regress implementation over REST API."""
model_path = self._GetSavedModelBundlePath()
atexit.register(self.TerminateProcs)
rest_api_port = PickUnusedPort()
model_server_address = self.RunServer(
PickUnusedPort(), 'default', model_path, rest_api_port=rest_api_port)
# Prepare request
url = 'http://{}:{}/v1/models/default:regress'.format(
model_server_address.split(':')[0], rest_api_port)
json_req = {'signature_name': 'regress_x_to_y', 'examples': [{'x': 2.0}]}
# Send request
resp_data = None
try:
resp_data = CallREST('Regress', url, json_req)
except Exception as e: # pylint: disable=broad-except
self.fail('Request failed with error: {}'.format(e))
# Verify response
self.assertEquals(json.loads(resp_data), {'results': [3.0]})
def testPredictREST(self):
"""Test Predict implementation over REST API."""
model_path = self._GetSavedModelBundlePath()
atexit.register(self.TerminateProcs)
rest_api_port = PickUnusedPort()
model_server_address = self.RunServer(
PickUnusedPort(), 'default', model_path, rest_api_port=rest_api_port)
# Prepare request
url = 'http://{}:{}/v1/models/default:predict'.format(
model_server_address.split(':')[0], rest_api_port)
json_req = {'instances': [2.0, 3.0, 4.0]}
# Send request
resp_data = None
try:
resp_data = CallREST('Predict', url, json_req)
except Exception as e: # pylint: disable=broad-except
self.fail('Request failed with error: {}'.format(e))
# Verify response
self.assertEquals(json.loads(resp_data), {'predictions': [3.0, 3.5, 4.0]})
if __name__ == '__main__':
tf.test.main()
| |
"""This module defines TemplateExporter, a highly configurable converter
that uses Jinja2 to export notebook files into different formats.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function, absolute_import
# Stdlib imports
import os
# other libs/dependencies are imported at runtime
# to move ImportErrors to runtime when the requirement is actually needed
# IPython imports
from IPython.utils.traitlets import MetaHasTraits, Unicode, List, Dict, Any
from IPython.utils.importstring import import_item
from IPython.utils import py3compat, text
from IPython.nbformat.current import docstring_nbformat_mod
from IPython.nbconvert import filters
from .exporter import Exporter
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
#Jinja2 extensions to load.
JINJA_EXTENSIONS = ['jinja2.ext.loopcontrols']
default_filters = {
'indent': text.indent,
'markdown2html': filters.markdown2html,
'ansi2html': filters.ansi2html,
'filter_data_type': filters.DataTypeFilter,
'get_lines': filters.get_lines,
'highlight2html': filters.Highlight2HTML,
'highlight2latex': filters.Highlight2Latex,
'ipython2python': filters.ipython2python,
'posix_path': filters.posix_path,
'markdown2latex': filters.markdown2latex,
'markdown2rst': filters.markdown2rst,
'comment_lines': filters.comment_lines,
'strip_ansi': filters.strip_ansi,
'strip_dollars': filters.strip_dollars,
'strip_files_prefix': filters.strip_files_prefix,
'html2text' : filters.html2text,
'add_anchor': filters.add_anchor,
'ansi2latex': filters.ansi2latex,
'wrap_text': filters.wrap_text,
'escape_latex': filters.escape_latex,
'citation2latex': filters.citation2latex,
'path2url': filters.path2url,
'add_prompts': filters.add_prompts,
'ascii_only': filters.ascii_only,
}
#-----------------------------------------------------------------------------
# Class
#-----------------------------------------------------------------------------
class TemplateExporter(Exporter):
"""
Exports notebooks into other file formats. Uses Jinja 2 templating engine
to output new formats. Inherit from this class if you are creating a new
template type along with new filters/preprocessors. If the filters/
preprocessors provided by default suffice, there is no need to inherit from
this class. Instead, override the template_file and file_extension
traits via a config file.
{filters}
"""
# finish the docstring
__doc__ = __doc__.format(filters = '- '+'\n - '.join(default_filters.keys()))
template_file = Unicode(u'default',
config=True,
help="Name of the template file to use")
def _template_file_changed(self, name, old, new):
if new == 'default':
self.template_file = self.default_template
else:
self.template_file = new
self.template = None
self._load_template()
default_template = Unicode(u'')
template = Any()
environment = Any()
template_path = List(['.'], config=True)
def _template_path_changed(self, name, old, new):
self._load_template()
default_template_path = Unicode(
os.path.join("..", "templates"),
help="Path where the template files are located.")
template_skeleton_path = Unicode(
os.path.join("..", "templates", "skeleton"),
help="Path where the template skeleton files are located.")
#Jinja block definitions
jinja_comment_block_start = Unicode("", config=True)
jinja_comment_block_end = Unicode("", config=True)
jinja_variable_block_start = Unicode("", config=True)
jinja_variable_block_end = Unicode("", config=True)
jinja_logic_block_start = Unicode("", config=True)
jinja_logic_block_end = Unicode("", config=True)
#Extension that the template files use.
template_extension = Unicode(".tpl", config=True)
filters = Dict(config=True,
help="""Dictionary of filters, by name and namespace, to add to the Jinja
environment.""")
raw_mimetypes = List(config=True,
help="""formats of raw cells to be included in this Exporter's output."""
)
def _raw_mimetypes_default(self):
return [self.output_mimetype, '']
def __init__(self, config=None, extra_loaders=None, **kw):
"""
Public constructor
Parameters
----------
config : config
User configuration instance.
extra_loaders : list[of Jinja Loaders]
ordered list of Jinja loader to find templates. Will be tried in order
before the default FileSystem ones.
template : str (optional, kw arg)
Template to use when exporting.
"""
super(TemplateExporter, self).__init__(config=config, **kw)
#Init
self._init_template()
self._init_environment(extra_loaders=extra_loaders)
self._init_filters()
def _load_template(self):
"""Load the Jinja template object from the template file
This is a no-op if the template attribute is already defined,
or the Jinja environment is not setup yet.
This is triggered by various trait changes that would change the template.
"""
from jinja2 import TemplateNotFound
if self.template is not None:
return
# called too early, do nothing
if self.environment is None:
return
# Try different template names during conversion. First try to load the
# template by name with extension added, then try loading the template
# as if the name is explicitly specified, then try the name as a
# 'flavor', and lastly just try to load the template by module name.
try_names = []
if self.template_file:
try_names.extend([
self.template_file + self.template_extension,
self.template_file,
])
for try_name in try_names:
self.log.debug("Attempting to load template %s", try_name)
try:
self.template = self.environment.get_template(try_name)
except (TemplateNotFound, IOError):
pass
except Exception as e:
self.log.warn("Unexpected exception loading template: %s", try_name, exc_info=True)
else:
self.log.info("Loaded template %s", try_name)
break
@docstring_nbformat_mod
def from_notebook_node(self, nb, resources=None, **kw):
"""
Convert a notebook from a notebook node instance.
Parameters
----------
nb : :class:`~{nbformat_mod}.nbbase.NotebookNode`
Notebook node
resources : dict
Additional resources that can be accessed read/write by
preprocessors and filters.
"""
nb_copy, resources = super(TemplateExporter, self).from_notebook_node(nb, resources, **kw)
resources.setdefault('raw_mimetypes', self.raw_mimetypes)
self._load_template()
if self.template is not None:
output = self.template.render(nb=nb_copy, resources=resources)
else:
raise IOError('template file "%s" could not be found' % self.template_file)
return output, resources
def register_filter(self, name, jinja_filter):
"""
Register a filter.
A filter is a function that accepts and acts on one string.
The filters are accesible within the Jinja templating engine.
Parameters
----------
name : str
name to give the filter in the Jinja engine
filter : filter
"""
if jinja_filter is None:
raise TypeError('filter')
isclass = isinstance(jinja_filter, type)
constructed = not isclass
#Handle filter's registration based on it's type
if constructed and isinstance(jinja_filter, py3compat.string_types):
#filter is a string, import the namespace and recursively call
#this register_filter method
filter_cls = import_item(jinja_filter)
return self.register_filter(name, filter_cls)
if constructed and hasattr(jinja_filter, '__call__'):
#filter is a function, no need to construct it.
self.environment.filters[name] = jinja_filter
return jinja_filter
elif isclass and isinstance(jinja_filter, MetaHasTraits):
#filter is configurable. Make sure to pass in new default for
#the enabled flag if one was specified.
filter_instance = jinja_filter(parent=self)
self.register_filter(name, filter_instance )
elif isclass:
#filter is not configurable, construct it
filter_instance = jinja_filter()
self.register_filter(name, filter_instance)
else:
#filter is an instance of something without a __call__
#attribute.
raise TypeError('filter')
def _init_template(self):
"""
Make sure a template name is specified. If one isn't specified, try to
build one from the information we know.
"""
self._template_file_changed('template_file', self.template_file, self.template_file)
def _init_environment(self, extra_loaders=None):
"""
Create the Jinja templating environment.
"""
from jinja2 import Environment, ChoiceLoader, FileSystemLoader
here = os.path.dirname(os.path.realpath(__file__))
loaders = []
if extra_loaders:
loaders.extend(extra_loaders)
paths = self.template_path
paths.extend([os.path.join(here, self.default_template_path),
os.path.join(here, self.template_skeleton_path)])
loaders.append(FileSystemLoader(paths))
self.environment = Environment(
loader= ChoiceLoader(loaders),
extensions=JINJA_EXTENSIONS
)
#Set special Jinja2 syntax that will not conflict with latex.
if self.jinja_logic_block_start:
self.environment.block_start_string = self.jinja_logic_block_start
if self.jinja_logic_block_end:
self.environment.block_end_string = self.jinja_logic_block_end
if self.jinja_variable_block_start:
self.environment.variable_start_string = self.jinja_variable_block_start
if self.jinja_variable_block_end:
self.environment.variable_end_string = self.jinja_variable_block_end
if self.jinja_comment_block_start:
self.environment.comment_start_string = self.jinja_comment_block_start
if self.jinja_comment_block_end:
self.environment.comment_end_string = self.jinja_comment_block_end
def _init_filters(self):
"""
Register all of the filters required for the exporter.
"""
#Add default filters to the Jinja2 environment
for key, value in default_filters.items():
self.register_filter(key, value)
#Load user filters. Overwrite existing filters if need be.
if self.filters:
for key, user_filter in self.filters.items():
self.register_filter(key, user_filter)
| |
import json
import unittest
import shutil
import tempfile
import os
import random
import pandas as pd
import pytest
import numpy as np
import ray
from ray.tune import (run, Trainable, sample_from, Analysis,
ExperimentAnalysis, grid_search)
from ray.tune.utils.mock import MyTrainableClass
from ray.tune.utils.serialization import TuneFunctionEncoder
class ExperimentAnalysisInMemorySuite(unittest.TestCase):
@classmethod
def setUpClass(cls):
ray.init(local_mode=False, num_cpus=1)
@classmethod
def tearDownClass(cls):
ray.shutdown()
def setUp(self):
class MockTrainable(Trainable):
scores_dict = {
0: [5, 4, 4, 4, 4, 4, 4, 4, 0],
1: [4, 3, 3, 3, 3, 3, 3, 3, 1],
2: [2, 1, 1, 1, 1, 1, 1, 1, 8],
3: [9, 7, 7, 7, 7, 7, 7, 7, 6],
4: [7, 5, 5, 5, 5, 5, 5, 5, 3]
}
def setup(self, config):
self.id = config["id"]
self.idx = 0
def step(self):
val = self.scores_dict[self.id][self.idx]
self.idx += 1
return {"score": val}
def save_checkpoint(self, checkpoint_dir):
pass
def load_checkpoint(self, checkpoint_path):
pass
self.MockTrainable = MockTrainable
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir, ignore_errors=True)
def testInitLegacy(self):
"""Should still work if checkpoints are not json strings"""
experiment_checkpoint_path = os.path.join(self.test_dir,
"experiment_state.json")
checkpoint_data = {
"checkpoints": [{
"trainable_name": "MockTrainable",
"logdir": "/mock/test/MockTrainable_0_id=3_2020-07-12"
}]
}
with open(experiment_checkpoint_path, "w") as f:
f.write(json.dumps(checkpoint_data))
experiment_analysis = ExperimentAnalysis(experiment_checkpoint_path)
self.assertEqual(len(experiment_analysis._checkpoints), 1)
self.assertTrue(experiment_analysis.trials is None)
def testInit(self):
experiment_checkpoint_path = os.path.join(self.test_dir,
"experiment_state.json")
checkpoint_data = {
"checkpoints": [
json.dumps(
{
"trainable_name": "MockTrainable",
"logdir": "/mock/test/MockTrainable_0_id=3_2020-07-12"
},
cls=TuneFunctionEncoder)
]
}
with open(experiment_checkpoint_path, "w") as f:
f.write(json.dumps(checkpoint_data))
experiment_analysis = ExperimentAnalysis(experiment_checkpoint_path)
self.assertEqual(len(experiment_analysis._checkpoints), 1)
self.assertTrue(experiment_analysis.trials is None)
def testInitException(self):
experiment_checkpoint_path = os.path.join(self.test_dir, "mock.json")
with pytest.raises(ValueError):
ExperimentAnalysis(experiment_checkpoint_path)
def testCompareTrials(self):
scores = np.asarray(list(self.MockTrainable.scores_dict.values()))
scores_all = scores.flatten("F")
scores_last = scores_all[5:]
ea = run(
self.MockTrainable,
name="analysis_exp",
local_dir=self.test_dir,
stop={"training_iteration": len(scores[0])},
num_samples=1,
config={"id": grid_search(list(range(5)))})
max_all = ea.get_best_trial("score", "max",
"all").metric_analysis["score"]["max"]
min_all = ea.get_best_trial("score", "min",
"all").metric_analysis["score"]["min"]
max_last = ea.get_best_trial("score", "max",
"last").metric_analysis["score"]["last"]
max_avg = ea.get_best_trial("score", "max",
"avg").metric_analysis["score"]["avg"]
min_avg = ea.get_best_trial("score", "min",
"avg").metric_analysis["score"]["avg"]
max_avg_5 = ea.get_best_trial(
"score", "max",
"last-5-avg").metric_analysis["score"]["last-5-avg"]
min_avg_5 = ea.get_best_trial(
"score", "min",
"last-5-avg").metric_analysis["score"]["last-5-avg"]
max_avg_10 = ea.get_best_trial(
"score", "max",
"last-10-avg").metric_analysis["score"]["last-10-avg"]
min_avg_10 = ea.get_best_trial(
"score", "min",
"last-10-avg").metric_analysis["score"]["last-10-avg"]
self.assertEqual(max_all, max(scores_all))
self.assertEqual(min_all, min(scores_all))
self.assertEqual(max_last, max(scores_last))
self.assertNotEqual(max_last, max(scores_all))
self.assertAlmostEqual(max_avg, max(np.mean(scores, axis=1)))
self.assertAlmostEqual(min_avg, min(np.mean(scores, axis=1)))
self.assertAlmostEqual(max_avg_5, max(np.mean(scores[:, -5:], axis=1)))
self.assertAlmostEqual(min_avg_5, min(np.mean(scores[:, -5:], axis=1)))
self.assertAlmostEqual(max_avg_10, max(
np.mean(scores[:, -10:], axis=1)))
self.assertAlmostEqual(min_avg_10, min(
np.mean(scores[:, -10:], axis=1)))
class AnalysisSuite(unittest.TestCase):
@classmethod
def setUpClass(cls):
ray.init(local_mode=True, include_dashboard=False)
@classmethod
def tearDownClass(cls):
ray.shutdown()
def setUp(self):
self.test_dir = tempfile.mkdtemp()
self.num_samples = 10
self.metric = "episode_reward_mean"
self.run_test_exp(test_name="analysis_exp1")
self.run_test_exp(test_name="analysis_exp2")
def run_test_exp(self, test_name=None):
run(MyTrainableClass,
name=test_name,
local_dir=self.test_dir,
stop={"training_iteration": 1},
num_samples=self.num_samples,
config={
"width": sample_from(
lambda spec: 10 + int(90 * random.random())),
"height": sample_from(lambda spec: int(100 * random.random())),
})
def tearDown(self):
shutil.rmtree(self.test_dir, ignore_errors=True)
def testDataframe(self):
analysis = Analysis(self.test_dir)
df = analysis.dataframe(self.metric, mode="max")
self.assertTrue(isinstance(df, pd.DataFrame))
self.assertEqual(df.shape[0], self.num_samples * 2)
def testBestLogdir(self):
analysis = Analysis(self.test_dir)
logdir = analysis.get_best_logdir(self.metric, mode="max")
self.assertTrue(logdir.startswith(self.test_dir))
logdir2 = analysis.get_best_logdir(self.metric, mode="min")
self.assertTrue(logdir2.startswith(self.test_dir))
self.assertNotEqual(logdir, logdir2)
def testBestConfigIsLogdir(self):
analysis = Analysis(self.test_dir)
for metric, mode in [(self.metric, "min"), (self.metric, "max")]:
logdir = analysis.get_best_logdir(metric, mode=mode)
best_config = analysis.get_best_config(metric, mode=mode)
self.assertEqual(analysis.get_all_configs()[logdir], best_config)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| |
"""
Telnet OOB (Out of band communication)
OOB protocols allow for asynchronous communication between Evennia and
compliant telnet clients. The "text" type of send command will always
be sent "in-band", appearing in the client's main text output. OOB
commands, by contrast, can have many forms and it is up to the client
how and if they are handled. Examples of OOB instructions could be to
instruct the client to play sounds or to update a graphical health
bar.
> Note that in Evennia's Web client, all send commands are "OOB
commands", (including the "text" one), there is no equivalence to
MSDP/GMCP for the webclient since it doesn't need it.
This implements the following telnet OOB communication protocols:
- MSDP (Mud Server Data Protocol), as per
http://tintin.sourceforge.net/msdp/
- GMCP (Generic Mud Communication Protocol) as per
http://www.ironrealms.com/rapture/manual/files/FeatGMCP-txt.html#Generic_MUD_Communication_Protocol%28GMCP%29
Following the lead of KaVir's protocol snippet, we first check if
client supports MSDP and if not, we fallback to GMCP with a MSDP
header where applicable.
"""
from builtins import object
import re
import json
from evennia.utils.utils import to_str
# MSDP-relevant telnet cmd/opt-codes
MSDP = chr(69)
MSDP_VAR = chr(1) # ^A
MSDP_VAL = chr(2) # ^B
MSDP_TABLE_OPEN = chr(3) # ^C
MSDP_TABLE_CLOSE = chr(4) # ^D
MSDP_ARRAY_OPEN = chr(5) # ^E
MSDP_ARRAY_CLOSE = chr(6) # ^F
# GMCP
GMCP = chr(201)
# General Telnet
IAC = chr(255)
SB = chr(250)
SE = chr(240)
def force_str(inp):
"""Helper to shorten code"""
return to_str(inp, force_string=True)
# pre-compiled regexes
# returns 2-tuple
msdp_regex_table = re.compile(r"%s\s*(\w*?)\s*%s\s*%s(.*?)%s"
% (MSDP_VAR, MSDP_VAL,
MSDP_TABLE_OPEN,
MSDP_TABLE_CLOSE))
# returns 2-tuple
msdp_regex_array = re.compile(r"%s\s*(\w*?)\s*%s\s*%s(.*?)%s"
% (MSDP_VAR, MSDP_VAL,
MSDP_ARRAY_OPEN,
MSDP_ARRAY_CLOSE))
msdp_regex_var = re.compile(r"%s" % MSDP_VAR)
msdp_regex_val = re.compile(r"%s" % MSDP_VAL)
EVENNIA_TO_GMCP = {"client_options": "Core.Supports.Get",
"get_inputfuncs": "Core.Commands.Get",
"get_value": "Char.Value.Get",
"repeat": "Char.Repeat.Update",
"monitor": "Char.Monitor.Update"}
# MSDP/GMCP communication handler
class TelnetOOB(object):
"""
Implements the MSDP and GMCP protocols.
"""
def __init__(self, protocol):
"""
Initiates by storing the protocol on itself and trying to
determine if the client supports MSDP.
Args:
protocol (Protocol): The active protocol.
"""
self.protocol = protocol
self.protocol.protocol_flags['OOB'] = False
self.MSDP = False
self.GMCP = False
# ask for the available protocols and assign decoders
# (note that handshake_done() will be called twice!)
self.protocol.negotiationMap[MSDP] = self.decode_msdp
self.protocol.negotiationMap[GMCP] = self.decode_gmcp
self.protocol.will(MSDP).addCallbacks(self.do_msdp, self.no_msdp)
self.protocol.will(GMCP).addCallbacks(self.do_gmcp, self.no_gmcp)
self.oob_reported = {}
def no_msdp(self, option):
"""
Client reports No msdp supported or wanted.
Args:
option (Option): Not used.
"""
# no msdp, check GMCP
self.protocol.handshake_done()
def do_msdp(self, option):
"""
Client reports that it supports msdp.
Args:
option (Option): Not used.
"""
self.MSDP = True
self.protocol.protocol_flags['OOB'] = True
self.protocol.handshake_done()
def no_gmcp(self, option):
"""
If this is reached, it means neither MSDP nor GMCP is
supported.
Args:
option (Option): Not used.
"""
self.protocol.handshake_done()
def do_gmcp(self, option):
"""
Called when client confirms that it can do MSDP or GMCP.
Args:
option (Option): Not used.
"""
self.GMCP = True
self.protocol.protocol_flags['OOB'] = True
self.protocol.handshake_done()
# encoders
def encode_msdp(self, cmdname, *args, **kwargs):
"""
Encode into a valid MSDP command.
Args:
cmdname (str): Name of send instruction.
args, kwargs (any): Arguments to OOB command.
Notes:
The output of this encoding will be
MSDP structures on these forms:
[cmdname, [], {}] -> VAR cmdname VAL ""
[cmdname, [arg], {}] -> VAR cmdname VAL arg
[cmdname, [args],{}] -> VAR cmdname VAL ARRAYOPEN VAL arg VAL arg ... ARRAYCLOSE
[cmdname, [], {kwargs}] -> VAR cmdname VAL TABLEOPEN VAR key VAL val ... TABLECLOSE
[cmdname, [args], {kwargs}] -> VAR cmdname VAL ARRAYOPEN VAL arg VAL arg ... ARRAYCLOSE
VAR cmdname VAL TABLEOPEN VAR key VAL val ... TABLECLOSE
Further nesting is not supported, so if an array argument
consists of an array (for example), that array will be
json-converted to a string.
"""
msdp_cmdname = "{msdp_var}{msdp_cmdname}{msdp_val}".format(
msdp_var=MSDP_VAR, msdp_cmdname=cmdname, msdp_val=MSDP_VAL)
if not (args or kwargs):
return msdp_cmdname
# print("encode_msdp in:", cmdname, args, kwargs) # DEBUG
msdp_args = ''
if args:
msdp_args = msdp_cmdname
if len(args) == 1:
msdp_args += args[0]
else:
msdp_args += "{msdp_array_open}" \
"{msdp_args}" \
"{msdp_array_close}".format(
msdp_array_open=MSDP_ARRAY_OPEN,
msdp_array_close=MSDP_ARRAY_CLOSE,
msdp_args="".join("%s%s"
% (MSDP_VAL, json.dumps(val))
for val in args))
msdp_kwargs = ""
if kwargs:
msdp_kwargs = msdp_cmdname
msdp_kwargs += "{msdp_table_open}" \
"{msdp_kwargs}" \
"{msdp_table_close}".format(
msdp_table_open=MSDP_TABLE_OPEN,
msdp_table_close=MSDP_TABLE_CLOSE,
msdp_kwargs="".join("%s%s%s%s"
% (MSDP_VAR, key, MSDP_VAL,
json.dumps(val))
for key, val in kwargs.iteritems()))
msdp_string = msdp_args + msdp_kwargs
# print("msdp_string:", msdp_string) # DEBUG
return msdp_string
def encode_gmcp(self, cmdname, *args, **kwargs):
"""
Encode into GMCP messages.
Args:
cmdname (str): GMCP OOB command name.
args, kwargs (any): Arguments to OOB command.
Notes:
GMCP messages will be outgoing on the following
form (the non-JSON cmdname at the start is what
IRE games use, supposedly, and what clients appear
to have adopted):
[cmdname, [], {}] -> cmdname
[cmdname, [arg], {}] -> cmdname arg
[cmdname, [args],{}] -> cmdname [args]
[cmdname, [], {kwargs}] -> cmdname {kwargs}
[cmdname, [args, {kwargs}] -> cmdname [[args],{kwargs}]
"""
if not (args or kwargs):
gmcp_string = cmdname
elif args:
if len(args) == 1:
args = args[0]
if kwargs:
gmcp_string = "%s %s" % (cmdname, json.dumps([args, kwargs]))
else:
gmcp_string = "%s %s" % (cmdname, json.dumps(args))
else: # only kwargs
gmcp_string = "%s %s" % (cmdname, json.dumps(kwargs))
# print("gmcp string", gmcp_string) # DEBUG
return gmcp_string
def decode_msdp(self, data):
"""
Decodes incoming MSDP data.
Args:
data (str or list): MSDP data.
Notes:
Clients should always send MSDP data on
one of the following forms:
cmdname '' -> [cmdname, [], {}]
cmdname val -> [cmdname, [val], {}]
cmdname array -> [cmdname, [array], {}]
cmdname table -> [cmdname, [], {table}]
cmdname array cmdname table -> [cmdname, [array], {table}]
Observe that all MSDP_VARS are used to identify cmdnames,
so if there are multiple arrays with the same cmdname
given, they will be merged into one argument array, same
for tables. Different MSDP_VARS (outside tables) will be
identified as separate cmdnames.
"""
if hasattr(data, "__iter__"):
data = "".join(data)
# print("decode_msdp in:", data) # DEBUG
tables = {}
arrays = {}
variables = {}
# decode tables
for key, table in msdp_regex_table.findall(data):
tables[key] = {} if key not in tables else tables[key]
for varval in msdp_regex_var.split(table)[1:]:
var, val = msdp_regex_val.split(varval, 1)
if var:
tables[key][var] = val
# decode arrays from all that was not a table
data_no_tables = msdp_regex_table.sub("", data)
for key, array in msdp_regex_array.findall(data_no_tables):
arrays[key] = [] if key not in arrays else arrays[key]
parts = msdp_regex_val.split(array)
if len(parts) == 2:
arrays[key].append(parts[1])
elif len(parts) > 1:
arrays[key].extend(parts[1:])
# decode remainders from all that were not tables or arrays
data_no_tables_or_arrays = msdp_regex_array.sub("", data_no_tables)
for varval in msdp_regex_var.split(data_no_tables_or_arrays):
# get remaining varvals after cleaning away tables/arrays. If mathcing
# an existing key in arrays, it will be added as an argument to that command,
# otherwise it will be treated as a command without argument.
parts = msdp_regex_val.split(varval)
if len(parts) == 2:
variables[parts[0]] = parts[1]
elif len(parts) > 1:
variables[parts[0]] = parts[1:]
cmds = {}
# merge matching table/array/variables together
for key, table in tables.iteritems():
args, kwargs = [], table
if key in arrays:
args.extend(arrays.pop(key))
if key in variables:
args.append(variables.pop(key))
cmds[key] = [args, kwargs]
for key, arr in arrays.iteritems():
args, kwargs = arr, {}
if key in variables:
args.append(variables.pop(key))
cmds[key] = [args, kwargs]
for key, var in variables.iteritems():
cmds[key] = [[var], {}]
# print("msdp data in:", cmds) # DEBUG
self.protocol.data_in(**cmds)
def decode_gmcp(self, data):
"""
Decodes incoming GMCP data on the form 'varname <structure>'.
Args:
data (str or list): GMCP data.
Notes:
Clients send data on the form "Module.Submodule.Cmdname <structure>".
We assume the structure is valid JSON.
The following is parsed into Evennia's formal structure:
Core.Name -> [name, [], {}]
Core.Name string -> [name, [string], {}]
Core.Name [arg, arg,...] -> [name, [args], {}]
Core.Name {key:arg, key:arg, ...} -> [name, [], {kwargs}]
Core.Name [[args], {kwargs}] -> [name, [args], {kwargs}]
"""
if hasattr(data, "__iter__"):
data = "".join(data)
# print("decode_gmcp in:", data) # DEBUG
if data:
try:
cmdname, structure = data.split(None, 1)
except ValueError:
cmdname, structure = data, ""
cmdname = cmdname.replace(".", "_")
try:
structure = json.loads(structure)
except ValueError:
# maybe the structure is not json-serialized at all
pass
args, kwargs = [], {}
if hasattr(structure, "__iter__"):
if isinstance(structure, dict):
kwargs = {key: value for key, value in structure.iteritems() if key}
else:
args = list(structure)
else:
args = (structure,)
if cmdname.lower().startswith("core_"):
# if Core.cmdname, then use cmdname
cmdname = cmdname[5:]
self.protocol.data_in(**{cmdname.lower(): [args, kwargs]})
# access methods
def data_out(self, cmdname, *args, **kwargs):
"""
Return a MSDP- or GMCP-valid subnegotiation across the protocol.
Args:
cmdname (str): OOB-command name.
args, kwargs (any): Arguments to OOB command.
"""
kwargs.pop("options", None)
if self.MSDP:
msdp_cmdname = cmdname
encoded_oob = self.encode_msdp(msdp_cmdname, *args, **kwargs)
self.protocol._write(IAC + SB + MSDP + encoded_oob + IAC + SE)
if self.GMCP:
if cmdname in EVENNIA_TO_GMCP:
gmcp_cmdname = EVENNIA_TO_GMCP[cmdname]
else:
gmcp_cmdname = "Custom.Cmd"
encoded_oob = self.encode_gmcp(gmcp_cmdname, *args, **kwargs)
self.protocol._write(IAC + SB + GMCP + encoded_oob + IAC + SE)
| |
from datetime import timedelta
from django.test import TestCase
from django.utils.timezone import now
from pretix.base.models import (
Event, Item, ItemVariation, Organizer, Property, PropertyValue,
)
# Do NOT use relative imports here
from pretix.plugins.timerestriction import signals
from pretix.plugins.timerestriction.models import TimeRestriction
class TimeRestrictionTest(TestCase):
"""
This test case tests the various aspects of the time restriction
plugin
"""
@classmethod
def setUpTestData(cls):
o = Organizer.objects.create(name='Dummy', slug='dummy')
cls.event = Event.objects.create(
organizer=o, name='Dummy', slug='dummy',
date_from=now(),
)
cls.item = Item.objects.create(event=cls.event, name='Dummy', default_price=14)
cls.property = Property.objects.create(event=cls.event, name='Size')
cls.value1 = PropertyValue.objects.create(prop=cls.property, value='S')
cls.value2 = PropertyValue.objects.create(prop=cls.property, value='M')
cls.value3 = PropertyValue.objects.create(prop=cls.property, value='L')
cls.variation1 = ItemVariation.objects.create(item=cls.item)
cls.variation1.values.add(cls.value1)
cls.variation2 = ItemVariation.objects.create(item=cls.item)
cls.variation2.values.add(cls.value2)
cls.variation3 = ItemVariation.objects.create(item=cls.item)
cls.variation3.values.add(cls.value3)
def test_nothing(self):
result = signals.availability_handler(
None, item=self.item,
variations=self.item.get_all_variations(),
context=None, cache=self.event.get_cache()
)
self.assertEqual(len(result), 1)
self.assertTrue('available' not in result[0] or result[0]['available'] is True)
def test_simple_case_available(self):
r = TimeRestriction.objects.create(
timeframe_from=now() - timedelta(days=3),
timeframe_to=now() + timedelta(days=3),
event=self.event,
price=12
)
r.item = self.item
r.save()
result = signals.availability_handler(
self.event, item=self.item,
variations=self.item.get_all_variations(),
context=None, cache=self.event.get_cache()
)
self.assertEqual(len(result), 1)
self.assertIn('available', result[0])
self.assertTrue(result[0]['available'])
self.assertEqual(result[0]['price'], 12)
def test_cached_result(self):
r = TimeRestriction.objects.create(
timeframe_from=now() - timedelta(days=3),
timeframe_to=now() + timedelta(days=3),
event=self.event,
price=12
)
r.item = self.item
r.save()
result = signals.availability_handler(
self.event, item=self.item,
variations=self.item.get_all_variations(),
context=None, cache=self.event.get_cache()
)
self.assertEqual(len(result), 1)
self.assertIn('available', result[0])
self.assertTrue(result[0]['available'])
self.assertEqual(result[0]['price'], 12)
result = signals.availability_handler(
self.event, item=self.item,
variations=self.item.get_all_variations(),
context=None, cache=self.event.get_cache()
)
self.assertEqual(len(result), 1)
self.assertIn('available', result[0])
self.assertTrue(result[0]['available'])
self.assertEqual(result[0]['price'], 12)
def test_simple_case_unavailable(self):
r = TimeRestriction.objects.create(
timeframe_from=now() - timedelta(days=5),
timeframe_to=now() - timedelta(days=3),
event=self.event,
price=12
)
r.item = self.item
r.save()
result = signals.availability_handler(
self.event, item=self.item,
variations=self.item.get_all_variations(),
context=None, cache=self.event.get_cache()
)
self.assertEqual(len(result), 1)
self.assertIn('available', result[0])
self.assertFalse(result[0]['available'])
def test_multiple_overlapping_now(self):
r1 = TimeRestriction.objects.create(
timeframe_from=now() - timedelta(days=5),
timeframe_to=now() + timedelta(days=3),
event=self.event,
price=12
)
r1.item = self.item
r1.save()
r2 = TimeRestriction.objects.create(
timeframe_from=now() - timedelta(days=3),
timeframe_to=now() + timedelta(days=5),
event=self.event,
price=8
)
r2.item = self.item
r2.save()
result = signals.availability_handler(
self.event, item=self.item,
variations=self.item.get_all_variations(),
context=None, cache=self.event.get_cache()
)
self.assertEqual(len(result), 1)
self.assertIn('available', result[0])
self.assertTrue(result[0]['available'])
self.assertEqual(result[0]['price'], 8)
def test_multiple_overlapping_tomorrow(self):
r1 = TimeRestriction.objects.create(
timeframe_from=now() - timedelta(days=5),
timeframe_to=now() + timedelta(days=5),
event=self.event,
price=12
)
r1.item = self.item
r1.save()
r2 = TimeRestriction.objects.create(
timeframe_from=now() + timedelta(days=1),
timeframe_to=now() + timedelta(days=7),
event=self.event,
price=8
)
r2.item = self.item
r2.save()
result = signals.availability_handler(
self.event, item=self.item,
variations=self.item.get_all_variations(),
context=None, cache=self.event.get_cache()
)
self.assertEqual(len(result), 1)
self.assertIn('available', result[0])
self.assertTrue(result[0]['available'])
self.assertEqual(result[0]['price'], 12)
def test_multiple_distinct_available(self):
r1 = TimeRestriction.objects.create(
timeframe_from=now() - timedelta(days=5),
timeframe_to=now() + timedelta(days=2),
event=self.event,
price=12
)
r1.item = self.item
r1.save()
r2 = TimeRestriction.objects.create(
timeframe_from=now() + timedelta(days=4),
timeframe_to=now() + timedelta(days=7),
event=self.event,
price=8
)
r2.item = self.item
r2.save()
result = signals.availability_handler(
self.event, item=self.item,
variations=self.item.get_all_variations(),
context=None, cache=self.event.get_cache()
)
self.assertEqual(len(result), 1)
self.assertIn('available', result[0])
self.assertTrue(result[0]['available'])
self.assertEqual(result[0]['price'], 12)
def test_multiple_distinct_unavailable(self):
r1 = TimeRestriction.objects.create(
timeframe_from=now() - timedelta(days=5),
timeframe_to=now() - timedelta(days=1),
event=self.event,
price=12
)
r1.item = self.item
r1.save()
r2 = TimeRestriction.objects.create(
timeframe_from=now() + timedelta(days=4),
timeframe_to=now() + timedelta(days=7),
event=self.event,
price=8
)
r2.item = self.item
r2.save()
result = signals.availability_handler(
self.event, item=self.item,
variations=self.item.get_all_variations(),
context=None, cache=self.event.get_cache()
)
self.assertEqual(len(result), 1)
self.assertIn('available', result[0])
self.assertFalse(result[0]['available'])
def test_variation_specific(self):
self.item.properties.add(self.property)
r1 = TimeRestriction.objects.create(
timeframe_from=now() - timedelta(days=5),
timeframe_to=now() + timedelta(days=1),
event=self.event,
price=12
)
r1.item = self.item
r1.save()
r1.variations.add(self.variation1)
result = signals.availability_handler(
self.event, item=self.item,
variations=self.item.get_all_variations(),
context=None, cache=self.event.get_cache()
)
self.assertEqual(len(result), 3)
for v in result:
if 'variation' in v and v['variation'].pk == self.variation1.pk:
self.assertTrue(v['available'])
self.assertEqual(v['price'], 12)
else:
self.assertTrue(v['available'])
def test_variation_specifics(self):
self.item.properties.add(self.property)
r1 = TimeRestriction.objects.create(
timeframe_from=now() - timedelta(days=5),
timeframe_to=now() + timedelta(days=1),
event=self.event,
price=12
)
r1.item = self.item
r1.save()
r1.variations.add(self.variation1)
r2 = TimeRestriction.objects.create(
timeframe_from=now() - timedelta(days=5),
timeframe_to=now() + timedelta(days=1),
event=self.event,
price=8
)
r2.item = self.item
r2.save()
r2.variations.add(self.variation1)
r3 = TimeRestriction.objects.create(
timeframe_from=now() - timedelta(days=5),
timeframe_to=now() - timedelta(days=1),
event=self.event,
price=8
)
r3.item = self.item
r3.save()
r3.variations.add(self.variation3)
result = signals.availability_handler(
self.event, item=self.item,
variations=self.item.get_all_variations(),
context=None, cache=self.event.get_cache()
)
self.assertEqual(len(result), 3)
for v in result:
if 'variation' in v and v['variation'].pk == self.variation1.pk:
self.assertTrue(v['available'])
self.assertEqual(v['price'], 8)
elif 'variation' in v and v['variation'].pk == self.variation3.pk:
self.assertFalse(v['available'])
else:
self.assertTrue(v['available'])
| |
#!/usr/bin/env python
"""
Converted from ElastiCache_Redis.template located at:
http://aws.amazon.com/cloudformation/aws-cloudformation-templates/
In addition to troposphere, this script requires awacs (Amazon Web Access
Control Subsystem)
"""
from __future__ import absolute_import, division, print_function
import troposphere.ec2 as ec2
import troposphere.elasticache as elasticache
import troposphere.iam as iam
import awacs
from awacs.aws import (Allow,
Statement,
Principal,
Policy)
from awacs.sts import AssumeRole
from troposphere import (Base64,
cloudformation,
FindInMap,
GetAtt,
Join,
Parameter,
Output,
Ref,
Tags,
Template)
from troposphere.policies import (CreationPolicy,
ResourceSignal)
def main():
"""
Create a ElastiCache Redis Node and EC2 Instance
"""
template = Template()
# Description
template.add_description(
'AWS CloudFormation Sample Template ElastiCache_Redis:'
'Sample template showIing how to create an Amazon'
'ElastiCache Redis Cluster. **WARNING** This template'
'creates an Amazon EC2 Instance and an Amazon ElastiCache'
'Cluster. You will be billed for the AWS resources used'
'if you create a stack from this template.')
# Mappings
template.add_mapping('AWSInstanceType2Arch', {
't1.micro': {'Arch': 'PV64'},
't2.micro': {'Arch': 'HVM64'},
't2.small': {'Arch': 'HVM64'},
't2.medium': {'Arch': 'HVM64'},
'm1.small': {'Arch': 'PV64'},
'm1.medium': {'Arch': 'PV64'},
'm1.large': {'Arch': 'PV64'},
'm1.xlarge': {'Arch': 'PV64'},
'm2.xlarge': {'Arch': 'PV64'},
'm2.2xlarge': {'Arch': 'PV64'},
'm2.4xlarge': {'Arch': 'PV64'},
'm3.medium': {'Arch': 'HVM64'},
'm3.large': {'Arch': 'HVM64'},
'm3.xlarge': {'Arch': 'HVM64'},
'm3.2xlarge': {'Arch': 'HVM64'},
'c1.medium': {'Arch': 'PV64'},
'c1.xlarge': {'Arch': 'PV64'},
'c3.large': {'Arch': 'HVM64'},
'c3.xlarge': {'Arch': 'HVM64'},
'c3.2xlarge': {'Arch': 'HVM64'},
'c3.4xlarge': {'Arch': 'HVM64'},
'c3.8xlarge': {'Arch': 'HVM64'},
'c4.large': {'Arch': 'HVM64'},
'c4.xlarge': {'Arch': 'HVM64'},
'c4.2xlarge': {'Arch': 'HVM64'},
'c4.4xlarge': {'Arch': 'HVM64'},
'c4.8xlarge': {'Arch': 'HVM64'},
'g2.2xlarge': {'Arch': 'HVMG2'},
'r3.large': {'Arch': 'HVM64'},
'r3.xlarge': {'Arch': 'HVM64'},
'r3.2xlarge': {'Arch': 'HVM64'},
'r3.4xlarge': {'Arch': 'HVM64'},
'r3.8xlarge': {'Arch': 'HVM64'},
'i2.xlarge': {'Arch': 'HVM64'},
'i2.2xlarge': {'Arch': 'HVM64'},
'i2.4xlarge': {'Arch': 'HVM64'},
'i2.8xlarge': {'Arch': 'HVM64'},
'd2.xlarge': {'Arch': 'HVM64'},
'd2.2xlarge': {'Arch': 'HVM64'},
'd2.4xlarge': {'Arch': 'HVM64'},
'd2.8xlarge': {'Arch': 'HVM64'},
'hi1.4xlarge': {'Arch': 'HVM64'},
'hs1.8xlarge': {'Arch': 'HVM64'},
'cr1.8xlarge': {'Arch': 'HVM64'},
'cc2.8xlarge': {'Arch': 'HVM64'}
})
template.add_mapping('AWSRegionArch2AMI', {
'us-east-1': {'PV64': 'ami-0f4cfd64',
'HVM64': 'ami-0d4cfd66',
'HVMG2': 'ami-5b05ba30'},
'us-west-2': {'PV64': 'ami-d3c5d1e3',
'HVM64': 'ami-d5c5d1e5',
'HVMG2': 'ami-a9d6c099'},
'us-west-1': {'PV64': 'ami-85ea13c1',
'HVM64': 'ami-87ea13c3',
'HVMG2': 'ami-37827a73'},
'eu-west-1': {'PV64': 'ami-d6d18ea1',
'HVM64': 'ami-e4d18e93',
'HVMG2': 'ami-72a9f105'},
'eu-central-1': {'PV64': 'ami-a4b0b7b9',
'HVM64': 'ami-a6b0b7bb',
'HVMG2': 'ami-a6c9cfbb'},
'ap-northeast-1': {'PV64': 'ami-1a1b9f1a',
'HVM64': 'ami-1c1b9f1c',
'HVMG2': 'ami-f644c4f6'},
'ap-southeast-1': {'PV64': 'ami-d24b4280',
'HVM64': 'ami-d44b4286',
'HVMG2': 'ami-12b5bc40'},
'ap-southeast-2': {'PV64': 'ami-ef7b39d5',
'HVM64': 'ami-db7b39e1',
'HVMG2': 'ami-b3337e89'},
'sa-east-1': {'PV64': 'ami-5b098146',
'HVM64': 'ami-55098148',
'HVMG2': 'NOT_SUPPORTED'},
'cn-north-1': {'PV64': 'ami-bec45887',
'HVM64': 'ami-bcc45885',
'HVMG2': 'NOT_SUPPORTED'}
})
template.add_mapping('Region2Principal', {
'us-east-1': {'EC2Principal': 'ec2.amazonaws.com',
'OpsWorksPrincipal': 'opsworks.amazonaws.com'},
'us-west-2': {'EC2Principal': 'ec2.amazonaws.com',
'OpsWorksPrincipal': 'opsworks.amazonaws.com'},
'us-west-1': {'EC2Principal': 'ec2.amazonaws.com',
'OpsWorksPrincipal': 'opsworks.amazonaws.com'},
'eu-west-1': {'EC2Principal': 'ec2.amazonaws.com',
'OpsWorksPrincipal': 'opsworks.amazonaws.com'},
'ap-southeast-1': {'EC2Principal': 'ec2.amazonaws.com',
'OpsWorksPrincipal': 'opsworks.amazonaws.com'},
'ap-northeast-1': {'EC2Principal': 'ec2.amazonaws.com',
'OpsWorksPrincipal': 'opsworks.amazonaws.com'},
'ap-southeast-2': {'EC2Principal': 'ec2.amazonaws.com',
'OpsWorksPrincipal': 'opsworks.amazonaws.com'},
'sa-east-1': {'EC2Principal': 'ec2.amazonaws.com',
'OpsWorksPrincipal': 'opsworks.amazonaws.com'},
'cn-north-1': {'EC2Principal': 'ec2.amazonaws.com.cn',
'OpsWorksPrincipal': 'opsworks.amazonaws.com.cn'},
'eu-central-1': {'EC2Principal': 'ec2.amazonaws.com',
'OpsWorksPrincipal': 'opsworks.amazonaws.com'}
})
# Parameters
cachenodetype = template.add_parameter(Parameter(
'ClusterNodeType',
Description='The compute and memory capacity of the nodes in the Redis'
' Cluster',
Type='String',
Default='cache.m1.small',
AllowedValues=['cache.m1.small',
'cache.m1.large',
'cache.m1.xlarge',
'cache.m2.xlarge',
'cache.m2.2xlarge',
'cache.m2.4xlarge',
'cache.c1.xlarge'],
ConstraintDescription='must select a valid Cache Node type.',
))
instancetype = template.add_parameter(Parameter(
'InstanceType',
Description='WebServer EC2 instance type',
Type='String',
Default='t2.micro',
AllowedValues=['t1.micro',
't2.micro',
't2.small',
't2.medium',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'c1.medium',
'c1.xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'c4.large',
'c4.xlarge',
'c4.2xlarge',
'c4.4xlarge',
'c4.8xlarge',
'g2.2xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'hi1.4xlarge',
'hs1.8xlarge',
'cr1.8xlarge',
'cc2.8xlarge',
'cg1.4xlarge'],
ConstraintDescription='must be a valid EC2 instance type.',
))
keyname = template.add_parameter(Parameter(
'KeyName',
Description='Name of an existing EC2 KeyPair to enable SSH access'
' to the instance',
Type='AWS::EC2::KeyPair::KeyName',
ConstraintDescription='must be the name of an existing EC2 KeyPair.',
))
sshlocation = template.add_parameter(Parameter(
'SSHLocation',
Description='The IP address range that can be used to SSH to'
' the EC2 instances',
Type='String',
MinLength='9',
MaxLength='18',
Default='0.0.0.0/0',
AllowedPattern='(\\d{1,3})\\.(\\d{1,3})\\.'
'(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})',
ConstraintDescription='must be a valid IP CIDR range of the'
' form x.x.x.x/x.'
))
# Resources
webserverrole = template.add_resource(iam.Role(
'WebServerRole',
AssumeRolePolicyDocument=Policy(
Statement=[
Statement(
Effect=Allow,
Action=[AssumeRole],
Principal=Principal('Service',
[FindInMap('Region2Principal',
Ref('AWS::Region'),
'EC2Principal')]),
)
]
),
Path='/',
))
template.add_resource(iam.PolicyType(
'WebServerRolePolicy',
PolicyName='WebServerRole',
PolicyDocument=awacs.aws.Policy(
Statement=[awacs.aws.Statement(
Action=[awacs.aws.Action("elasticache",
"DescribeCacheClusters")],
Resource=["*"],
Effect=awacs.aws.Allow
)]
),
Roles=[Ref(webserverrole)],
))
webserverinstanceprofile = template.add_resource(iam.InstanceProfile(
'WebServerInstanceProfile',
Path='/',
Roles=[Ref(webserverrole)],
))
webserversg = template.add_resource(ec2.SecurityGroup(
'WebServerSecurityGroup',
GroupDescription='Enable HTTP and SSH access',
SecurityGroupIngress=[
ec2.SecurityGroupRule(
IpProtocol='tcp',
FromPort='22',
ToPort='22',
CidrIp=Ref(sshlocation),
),
ec2.SecurityGroupRule(
IpProtocol='tcp',
FromPort='80',
ToPort='80',
CidrIp='0.0.0.0/0',
)
]
))
webserverinstance = template.add_resource(ec2.Instance(
'WebServerInstance',
Metadata=cloudformation.Metadata(
cloudformation.Init({
'config': cloudformation.InitConfig(
packages={
'yum': {
'httpd': [],
'php': [],
'php-devel': [],
'gcc': [],
'make': []
}
},
files=cloudformation.InitFiles({
'/var/www/html/index.php': cloudformation.InitFile(
content=Join('', [
'<?php\n',
'echo \"<h1>AWS CloudFormation sample'
' application for Amazon ElastiCache'
' Redis Cluster</h1>\";\n',
'\n',
'$cluster_config = json_decode('
'file_get_contents(\'/tmp/cacheclusterconfig\''
'), true);\n',
'$endpoint = $cluster_config[\'CacheClusters'
'\'][0][\'CacheNodes\'][0][\'Endpoint\'][\'Add'
'ress\'];\n',
'$port = $cluster_config[\'CacheClusters\'][0]'
'[\'CacheNodes\'][0][\'Endpoint\'][\'Port\'];'
'\n',
'\n',
'echo \"<p>Connecting to Redis Cache Cluster '
'node \'{$endpoint}\' on port {$port}</p>\";'
'\n',
'\n',
'$redis=new Redis();\n',
'$redis->connect($endpoint, $port);\n',
'$redis->set(\'testkey\', \'Hello World!\');'
'\n',
'$return = $redis->get(\'testkey\');\n',
'\n',
'echo \"<p>Retrieved value: $return</p>\";'
'\n',
'?>\n'
]),
mode='000644',
owner='apache',
group='apache'
),
'/etc/cron.d/get_cluster_config':
cloudformation.InitFile(
content='*/5 * * * * root'
' /usr/local/bin/get_cluster_config',
mode='000644',
owner='root',
group='root'
),
'/usr/local/bin/get_cluster_config':
cloudformation.InitFile(
content=Join('', [
'#! /bin/bash\n',
'aws elasticache describe-cache-clusters ',
' --cache-cluster-id ',
Ref('RedisCluster'),
' --show-cache-node-info'
' --region ', Ref('AWS::Region'),
' > /tmp/cacheclusterconfig\n'
]),
mode='000755',
owner='root',
group='root'
),
'/usr/local/bin/install_phpredis':
cloudformation.InitFile(
content=Join('', [
'#! /bin/bash\n',
'cd /tmp\n',
'wget https://github.com/nicolasff/'
'phpredis/zipball/master -O phpredis.zip'
'\n',
'unzip phpredis.zip\n',
'cd nicolasff-phpredis-*\n',
'phpize\n',
'./configure\n',
'make && make install\n',
'touch /etc/php.d/redis.ini\n',
'echo extension=redis.so > /etc/php.d/'
'redis.ini\n'
]),
mode='000755',
owner='root',
group='root'
),
'/etc/cfn/cfn-hup.conf': cloudformation.InitFile(
content=Join('', [
'[main]\n',
'stack=', Ref('AWS::StackId'), '\n',
'region=', Ref('AWS::Region'), '\n'
]),
mode='000400',
owner='root',
group='root'
),
'/etc/cfn/hooks.d/cfn-auto-reloader.conf':
cloudformation.InitFile(
content=Join('', [
'[cfn-auto-reloader-hook]\n',
'triggers=post.update\n',
'path=Resources.WebServerInstance.Metadata'
'.AWS::CloudFormation::Init\n',
'action=/opt/aws/bin/cfn-init -v ',
' --stack ', Ref('AWS::StackName'),
' --resource WebServerInstance ',
' --region ', Ref('AWS::Region'),
'\n',
'runas=root\n'
]),
# Why doesn't the Amazon template have this?
# mode='000400',
# owner='root',
# group='root'
),
}),
commands={
'01-install_phpredis': {
'command': '/usr/local/bin/install_phpredis'
},
'02-get-cluster-config': {
'command': '/usr/local/bin/get_cluster_config'
}
},
services={
"sysvinit": cloudformation.InitServices({
"httpd": cloudformation.InitService(
enabled=True,
ensureRunning=True,
),
"cfn-hup": cloudformation.InitService(
enabled=True,
ensureRunning=True,
files=['/etc/cfn/cfn-hup.conf',
'/etc/cfn/hooks.d/'
'cfn-auto-reloader.conf']
),
}),
},
)
})
),
ImageId=FindInMap('AWSRegionArch2AMI', Ref('AWS::Region'),
FindInMap('AWSInstanceType2Arch',
Ref(instancetype), 'Arch')),
InstanceType=Ref(instancetype),
SecurityGroups=[Ref(webserversg)],
KeyName=Ref(keyname),
IamInstanceProfile=Ref(webserverinstanceprofile),
UserData=Base64(Join('', [
'#!/bin/bash -xe\n',
'yum update -y aws-cfn-bootstrap\n',
'# Setup the PHP sample application\n',
'/opt/aws/bin/cfn-init -v ',
' --stack ', Ref('AWS::StackName'),
' --resource WebServerInstance ',
' --region ', Ref('AWS::Region'), '\n',
'# Signal the status of cfn-init\n',
'/opt/aws/bin/cfn-signal -e $? ',
' --stack ', Ref('AWS::StackName'),
' --resource WebServerInstance ',
' --region ', Ref('AWS::Region'), '\n'
])),
CreationPolicy=CreationPolicy(
ResourceSignal=ResourceSignal(Timeout='PT15M')
),
Tags=Tags(Application=Ref('AWS::StackId'),
Details='Created using Troposhpere')
))
redisclustersg = template.add_resource(elasticache.SecurityGroup(
'RedisClusterSecurityGroup',
Description='Lock the cluster down',
))
template.add_resource(elasticache.SecurityGroupIngress(
'RedisClusterSecurityGroupIngress',
CacheSecurityGroupName=Ref(redisclustersg),
EC2SecurityGroupName=Ref(webserversg),
))
template.add_resource(elasticache.CacheCluster(
'RedisCluster',
Engine='redis',
CacheNodeType=Ref(cachenodetype),
NumCacheNodes='1',
CacheSecurityGroupNames=[Ref(redisclustersg)],
))
# Outputs
template.add_output([
Output(
'WebsiteURL',
Description='Application URL',
Value=Join('', [
'http://',
GetAtt(webserverinstance, 'PublicDnsName'),
])
)
])
# Print CloudFormation Template
print(template.to_json())
if __name__ == '__main__':
main()
| |
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
import netaddr
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.networks.subnets import utils
from openstack_dashboard import policy
from openstack_dashboard.utils import settings as setting_utils
LOG = logging.getLogger(__name__)
class CreateNetworkInfoAction(workflows.Action):
net_name = forms.CharField(max_length=255,
label=_("Network Name"),
required=False)
admin_state = forms.BooleanField(
label=_("Enable Admin State"),
initial=True,
required=False,
help_text=_("If checked, the network will be enabled."))
shared = forms.BooleanField(label=_("Shared"), initial=False,
required=False)
with_subnet = forms.BooleanField(label=_("Create Subnet"),
widget=forms.CheckboxInput(attrs={
'class': 'switchable',
'data-slug': 'with_subnet',
'data-hide-tab': 'create_network__'
'createsubnetinfo'
'action,'
'create_network__'
'createsubnetdetail'
'action',
'data-hide-on-checked': 'false'
}),
initial=True,
required=False)
az_hints = forms.MultipleChoiceField(
label=_("Availability Zone Hints"),
required=False,
help_text=_("Availability zones where the DHCP agents may be "
"scheduled. Leaving this unset is equivalent to "
"selecting all availability zones"))
def __init__(self, request, *args, **kwargs):
super(CreateNetworkInfoAction, self).__init__(request,
*args, **kwargs)
if not policy.check((("network", "create_network:shared"),), request):
self.fields['shared'].widget = forms.HiddenInput()
try:
if api.neutron.is_extension_supported(request,
'network_availability_zone'):
zones = api.neutron.list_availability_zones(
self.request, 'network', 'available')
self.fields['az_hints'].choices = [(zone['name'], zone['name'])
for zone in zones]
else:
del self.fields['az_hints']
except Exception:
msg = _('Failed to get availability zone list.')
messages.warning(request, msg)
del self.fields['az_hints']
class Meta(object):
name = _("Network")
help_text = _('Create a new network. '
'In addition, a subnet associated with the network '
'can be created in the following steps of this wizard.')
class CreateNetworkInfo(workflows.Step):
action_class = CreateNetworkInfoAction
contributes = ("net_name", "admin_state", "with_subnet", "shared",
"az_hints")
class CreateSubnetInfoAction(workflows.Action):
subnet_name = forms.CharField(max_length=255,
widget=forms.TextInput(attrs={
}),
label=_("Subnet Name"),
required=False)
address_source = forms.ChoiceField(
required=False,
label=_('Network Address Source'),
choices=[('manual', _('Enter Network Address manually')),
('subnetpool', _('Allocate Network Address from a pool'))],
widget=forms.ThemableSelectWidget(attrs={
'class': 'switchable',
'data-slug': 'source',
}))
subnetpool = forms.ChoiceField(
label=_("Address pool"),
widget=forms.ThemableSelectWidget(attrs={
'class': 'switched switchable',
'data-required-when-shown': 'true',
'data-slug': 'subnetpool',
'data-switch-on': 'source',
'data-source-subnetpool': _('Address pool')},
data_attrs=('name', 'prefixes',
'ip_version',
'min_prefixlen',
'max_prefixlen',
'default_prefixlen'),
transform=lambda x: "%s (%s)" % (x.name, ", ".join(x.prefixes))
if 'prefixes' in x else "%s" % (x.name)),
required=False)
prefixlen = forms.ChoiceField(widget=forms.ThemableSelectWidget(attrs={
'class': 'switched',
'data-switch-on': 'subnetpool',
}),
label=_('Network Mask'),
required=False)
cidr = forms.IPField(label=_("Network Address"),
required=False,
initial="",
error_messages={
'required': _('Specify "Network Address" or '
'clear "Create Subnet" checkbox '
'in previous step.')},
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'source',
'data-source-manual': _("Network Address"),
}),
help_text=_("Network address in CIDR format "
"(e.g. 192.168.0.0/24, 2001:DB8::/48)"),
version=forms.IPv4 | forms.IPv6,
mask=True)
ip_version = forms.ChoiceField(choices=[(4, 'IPv4'), (6, 'IPv6')],
widget=forms.ThemableSelectWidget(attrs={
'class': 'switchable',
'data-slug': 'ipversion',
}),
label=_("IP Version"),
required=False)
gateway_ip = forms.IPField(
label=_("Gateway IP"),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'gateway_ip',
'data-source-manual': _("Gateway IP")
}),
required=False,
initial="",
help_text=_("IP address of Gateway (e.g. 192.168.0.254) "
"The default value is the first IP of the "
"network address "
"(e.g. 192.168.0.1 for 192.168.0.0/24, "
"2001:DB8::1 for 2001:DB8::/48). "
"If you use the default, leave blank. "
"If you do not want to use a gateway, "
"check 'Disable Gateway' below."),
version=forms.IPv4 | forms.IPv6,
mask=False)
no_gateway = forms.BooleanField(label=_("Disable Gateway"),
widget=forms.CheckboxInput(attrs={
'class': 'switchable',
'data-slug': 'gateway_ip',
'data-hide-on-checked': 'true'
}),
initial=False,
required=False)
check_subnet_range = True
class Meta(object):
name = _("Subnet")
help_text = _('Creates a subnet associated with the network.'
' You need to enter a valid "Network Address"'
' and "Gateway IP". If you did not enter the'
' "Gateway IP", the first value of a network'
' will be assigned by default. If you do not want'
' gateway please check the "Disable Gateway" checkbox.'
' Advanced configuration is available by clicking on'
' the "Subnet Details" tab.')
def __init__(self, request, context, *args, **kwargs):
super(CreateSubnetInfoAction, self).__init__(request, context, *args,
**kwargs)
if not setting_utils.get_dict_config('OPENSTACK_NEUTRON_NETWORK',
'enable_ipv6'):
self.fields['ip_version'].widget = forms.HiddenInput()
self.fields['ip_version'].initial = 4
try:
if api.neutron.is_extension_supported(request,
'subnet_allocation'):
self.fields['subnetpool'].choices = \
self.get_subnetpool_choices(request)
else:
self.hide_subnetpool_choices()
except Exception:
self.hide_subnetpool_choices()
msg = _('Unable to initialize subnetpools')
exceptions.handle(request, msg)
if len(self.fields['subnetpool'].choices) > 1:
# Pre-populate prefixlen choices to satisfy Django
# ChoiceField Validation. This is overridden w/data from
# subnetpool on select.
self.fields['prefixlen'].choices = \
zip(list(range(0, 128 + 1)),
list(range(0, 128 + 1)))
# Populate data-fields for switching the prefixlen field
# when user selects a subnetpool other than
# "Provider default pool"
for (id_, name) in self.fields['subnetpool'].choices:
if not id_:
continue
key = 'data-subnetpool-' + id_
self.fields['prefixlen'].widget.attrs[key] = \
_('Network Mask')
else:
self.hide_subnetpool_choices()
def get_subnetpool_choices(self, request):
subnetpool_choices = [('', _('Select a pool'))]
for subnetpool in api.neutron.subnetpool_list(request):
subnetpool_choices.append((subnetpool.id, subnetpool))
return subnetpool_choices
def hide_subnetpool_choices(self):
self.fields['address_source'].widget = forms.HiddenInput()
self.fields['subnetpool'].choices = []
self.fields['subnetpool'].widget = forms.HiddenInput()
self.fields['prefixlen'].widget = forms.HiddenInput()
def _check_subnet_range(self, subnet, allow_cidr):
allowed_net = netaddr.IPNetwork(allow_cidr)
return subnet in allowed_net
def _check_cidr_allowed(self, ip_version, subnet):
if not self.check_subnet_range:
return
allowed_cidr = settings.ALLOWED_PRIVATE_SUBNET_CIDR
version_str = 'ipv%s' % ip_version
allowed_ranges = allowed_cidr.get(version_str, [])
if allowed_ranges:
under_range = any(self._check_subnet_range(subnet, allowed_range)
for allowed_range in allowed_ranges)
if not under_range:
range_str = ', '.join(allowed_ranges)
msg = (_("CIDRs allowed for user private %(ip_ver)s "
"networks are %(allowed)s.") %
{'ip_ver': '%s' % version_str,
'allowed': range_str})
raise forms.ValidationError(msg)
def _check_subnet_data(self, cleaned_data):
cidr = cleaned_data.get('cidr')
ip_version = int(cleaned_data.get('ip_version'))
gateway_ip = cleaned_data.get('gateway_ip')
no_gateway = cleaned_data.get('no_gateway')
address_source = cleaned_data.get('address_source')
subnetpool = cleaned_data.get('subnetpool')
if not subnetpool and address_source == 'subnetpool':
msg = _('Specify "Address pool" or select '
'"Enter Network Address manually" and specify '
'"Network Address".')
raise forms.ValidationError(msg)
if not cidr and address_source != 'subnetpool':
msg = _('Specify "Network Address" or '
'clear "Create Subnet" checkbox in previous step.')
raise forms.ValidationError(msg)
if address_source == 'subnetpool' and 'cidr' in self._errors:
del self._errors['cidr']
elif cidr:
subnet = netaddr.IPNetwork(cidr)
if subnet.version != ip_version:
msg = _('Network Address and IP version are inconsistent.')
raise forms.ValidationError(msg)
if (ip_version == 4 and subnet.prefixlen == 32) or \
(ip_version == 6 and subnet.prefixlen == 128):
msg = _("The subnet in the Network Address is "
"too small (/%s).") % subnet.prefixlen
self._errors['cidr'] = self.error_class([msg])
self._check_cidr_allowed(ip_version, subnet)
if not no_gateway and gateway_ip:
if netaddr.IPAddress(gateway_ip).version is not ip_version:
msg = _('Gateway IP and IP version are inconsistent.')
raise forms.ValidationError(msg)
if no_gateway and 'gateway_ip' in self._errors:
del self._errors['gateway_ip']
def _remove_fields_errors(self):
self._errors = {}
def clean(self):
with_subnet = self.initial.get('with_subnet')
if not with_subnet:
self._remove_fields_errors()
return None
cleaned_data = super(CreateSubnetInfoAction, self).clean()
self._check_subnet_data(cleaned_data)
return cleaned_data
class CreateSubnetInfo(workflows.Step):
action_class = CreateSubnetInfoAction
contributes = ("subnet_name", "cidr", "ip_version",
"gateway_ip", "no_gateway", "subnetpool",
"prefixlen", "address_source")
class CreateSubnetDetailAction(workflows.Action):
enable_dhcp = forms.BooleanField(label=_("Enable DHCP"),
initial=True, required=False)
ipv6_modes = forms.ChoiceField(
label=_("IPv6 Address Configuration Mode"),
widget=forms.ThemableSelectWidget(attrs={
'class': 'switched',
'data-switch-on': 'ipversion',
'data-ipversion-6': _("IPv6 Address Configuration Mode"),
}),
initial=utils.IPV6_DEFAULT_MODE,
required=False,
help_text=_("Specifies how IPv6 addresses and additional information "
"are configured. We can specify SLAAC/DHCPv6 stateful/"
"DHCPv6 stateless provided by OpenStack, "
"or specify no option. "
"'No options specified' means addresses are configured "
"manually or configured by a non-OpenStack system."))
allocation_pools = forms.CharField(
widget=forms.Textarea(attrs={'rows': 4}),
label=_("Allocation Pools"),
help_text=_("IP address allocation pools. Each entry is: "
"start_ip_address,end_ip_address "
"(e.g., 192.168.1.100,192.168.1.120) "
"and one entry per line."),
required=False)
dns_nameservers = forms.CharField(
widget=forms.widgets.Textarea(attrs={'rows': 4}),
label=_("DNS Name Servers"),
help_text=_("IP address list of DNS name servers for this subnet. "
"One entry per line."),
required=False)
host_routes = forms.CharField(
widget=forms.widgets.Textarea(attrs={'rows': 4}),
label=_("Host Routes"),
help_text=_("Additional routes announced to the hosts. "
"Each entry is: destination_cidr,nexthop "
"(e.g., 192.168.200.0/24,10.56.1.254) "
"and one entry per line."),
required=False)
class Meta(object):
name = _("Subnet Details")
help_text = _('Specify additional attributes for the subnet.')
def __init__(self, request, context, *args, **kwargs):
super(CreateSubnetDetailAction, self).__init__(request, context,
*args, **kwargs)
if not setting_utils.get_dict_config('OPENSTACK_NEUTRON_NETWORK',
'enable_ipv6'):
self.fields['ipv6_modes'].widget = forms.HiddenInput()
def populate_ipv6_modes_choices(self, request, context):
return [(value, _("%s (Default)") % label)
if value == utils.IPV6_DEFAULT_MODE
else (value, label)
for value, label in utils.IPV6_MODE_CHOICES]
def _convert_ip_address(self, ip, field_name):
try:
return netaddr.IPAddress(ip)
except (netaddr.AddrFormatError, ValueError):
msg = (_('%(field_name)s: Invalid IP address (value=%(ip)s)')
% {'field_name': field_name, 'ip': ip})
raise forms.ValidationError(msg)
def _convert_ip_network(self, network, field_name):
try:
return netaddr.IPNetwork(network)
except (netaddr.AddrFormatError, ValueError):
msg = (_('%(field_name)s: Invalid IP address (value=%(network)s)')
% {'field_name': field_name, 'network': network})
raise forms.ValidationError(msg)
def _check_allocation_pools(self, allocation_pools):
for p in allocation_pools.splitlines():
p = p.strip()
if not p:
continue
pool = p.split(',')
if len(pool) != 2:
msg = _('Start and end addresses must be specified '
'(value=%s)') % p
raise forms.ValidationError(msg)
start, end = [self._convert_ip_address(ip, "allocation_pools")
for ip in pool]
if start > end:
msg = _('Start address is larger than end address '
'(value=%s)') % p
raise forms.ValidationError(msg)
def _check_dns_nameservers(self, dns_nameservers):
for ns in dns_nameservers.splitlines():
ns = ns.strip()
if not ns:
continue
self._convert_ip_address(ns, "dns_nameservers")
def _check_host_routes(self, host_routes):
for r in host_routes.splitlines():
r = r.strip()
if not r:
continue
route = r.split(',')
if len(route) != 2:
msg = _('Host Routes format error: '
'Destination CIDR and nexthop must be specified '
'(value=%s)') % r
raise forms.ValidationError(msg)
self._convert_ip_network(route[0], "host_routes")
self._convert_ip_address(route[1], "host_routes")
def clean(self):
cleaned_data = super(CreateSubnetDetailAction, self).clean()
self._check_allocation_pools(cleaned_data.get('allocation_pools'))
self._check_host_routes(cleaned_data.get('host_routes'))
self._check_dns_nameservers(cleaned_data.get('dns_nameservers'))
return cleaned_data
class CreateSubnetDetail(workflows.Step):
action_class = CreateSubnetDetailAction
contributes = ("enable_dhcp", "ipv6_modes", "allocation_pools",
"dns_nameservers", "host_routes")
class CreateNetwork(workflows.Workflow):
slug = "create_network"
name = _("Create Network")
finalize_button_name = _("Create")
success_message = _('Created network "%s".')
failure_message = _('Unable to create network "%s".')
default_steps = (CreateNetworkInfo,
CreateSubnetInfo,
CreateSubnetDetail)
wizard = True
def get_success_url(self):
return reverse("horizon:project:networks:index")
def get_failure_url(self):
return reverse("horizon:project:networks:index")
def format_status_message(self, message):
name = self.context.get('net_name') or self.context.get('net_id', '')
return message % name
def _create_network(self, request, data):
try:
params = {'name': data['net_name'],
'admin_state_up': data['admin_state'],
'shared': data['shared']}
if 'az_hints' in data and data['az_hints']:
params['availability_zone_hints'] = data['az_hints']
network = api.neutron.network_create(request, **params)
self.context['net_id'] = network.id
LOG.debug('Network "%s" was successfully created.',
network.name_or_id)
return network
except Exception as e:
LOG.info('Failed to create network: %s', e)
msg = (_('Failed to create network "%(network)s": %(reason)s') %
{"network": data['net_name'], "reason": e})
redirect = self.get_failure_url()
exceptions.handle(request, msg, redirect=redirect)
return False
def _setup_subnet_parameters(self, params, data, is_create=True):
"""Setup subnet parameters
This methods setups subnet parameters which are available
in both create and update.
"""
is_update = not is_create
params['enable_dhcp'] = data['enable_dhcp']
if int(data['ip_version']) == 6:
ipv6_modes = utils.get_ipv6_modes_attrs_from_menu(
data['ipv6_modes'])
if ipv6_modes[0] and is_create:
params['ipv6_ra_mode'] = ipv6_modes[0]
if ipv6_modes[1] and is_create:
params['ipv6_address_mode'] = ipv6_modes[1]
if data['allocation_pools']:
pools = [dict(zip(['start', 'end'], pool.strip().split(',')))
for pool in data['allocation_pools'].splitlines()
if pool.strip()]
params['allocation_pools'] = pools
if data['host_routes'] or is_update:
routes = [dict(zip(['destination', 'nexthop'],
route.strip().split(',')))
for route in data['host_routes'].splitlines()
if route.strip()]
params['host_routes'] = routes
if data['dns_nameservers'] or is_update:
nameservers = [ns.strip()
for ns in data['dns_nameservers'].splitlines()
if ns.strip()]
params['dns_nameservers'] = nameservers
def _create_subnet(self, request, data, network=None, tenant_id=None,
no_redirect=False):
if network:
network_id = network.id
network_name = network.name
else:
network_id = self.context.get('network_id')
network_name = self.context.get('network_name')
try:
params = {'network_id': network_id,
'name': data['subnet_name']}
if 'cidr' in data and data['cidr']:
params['cidr'] = data['cidr']
if 'ip_version' in data and data['ip_version']:
params['ip_version'] = int(data['ip_version'])
if tenant_id:
params['tenant_id'] = tenant_id
if data['no_gateway']:
params['gateway_ip'] = None
elif data['gateway_ip']:
params['gateway_ip'] = data['gateway_ip']
if 'subnetpool' in data and data['subnetpool']:
params['subnetpool_id'] = data['subnetpool']
if 'prefixlen' in data and data['prefixlen']:
params['prefixlen'] = data['prefixlen']
self._setup_subnet_parameters(params, data)
subnet = api.neutron.subnet_create(request, **params)
self.context['subnet_id'] = subnet.id
LOG.debug('Subnet "%s" was successfully created.', data['cidr'])
return subnet
except Exception as e:
if network_name:
msg = _('Failed to create subnet "%(sub)s" for network '
'"%(net)s": %(reason)s')
else:
msg = _('Failed to create subnet "%(sub)s": %(reason)s')
if no_redirect:
redirect = None
else:
redirect = self.get_failure_url()
exceptions.handle(request,
msg % {"sub": data['cidr'], "net": network_name,
"reason": e},
redirect=redirect)
return False
def _delete_network(self, request, network):
"""Delete the created network when subnet creation failed."""
try:
api.neutron.network_delete(request, network.id)
LOG.debug('Delete the created network %s '
'due to subnet creation failure.', network.id)
msg = _('Delete the created network "%s" '
'due to subnet creation failure.') % network.name
redirect = self.get_failure_url()
messages.info(request, msg)
raise exceptions.Http302(redirect)
except Exception as e:
LOG.info('Failed to delete network %(id)s: %(exc)s',
{'id': network.id, 'exc': e})
msg = _('Failed to delete network "%s"') % network.name
redirect = self.get_failure_url()
exceptions.handle(request, msg, redirect=redirect)
def handle(self, request, data):
network = self._create_network(request, data)
if not network:
return False
# If we do not need to create a subnet, return here.
if not data['with_subnet']:
return True
subnet = self._create_subnet(request, data, network, no_redirect=True,
tenant_id=network.tenant_id)
if subnet:
return True
else:
self._delete_network(request, network)
return False
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Database abstraction layer. Simplyfies database
handling a bit.
An example of common usecase could be as such:
# Import the module
from databaselayer import database
# Create the database
myDB = database.Database('SQLite', 'database.sql')
# Create a table
myDB.execute(
'CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY, username TEXT)'
)
# Insert a few people in the users table
myDB.insert('users', {'username': 'John'})
myDB.insert('users', {'username': 'Tom'})
"""
import threading
import sys
try:
import sqlite3
SQLITE = True
except ImportError:
# Fallback for sqlite3 (custom install)
try:
from pysqlite2 import dbapi2 as sqlite3
SQLITE = True
except ImportError:
SQLITE = False
try:
import MySQLdb
MYSQL = True
except ImportError:
MYSQL = False
class Database(threading.Thread):
"""
Higher level database abstraction layer.
Provides a database abstraction layer, for easy use with
multiple different database types, without the need to
think about SQL differences. If you want to execute raw SQL,
you can use the execute method.
Throughout the class, a lot of methods take in a filter argument.
The filter is in the format of {'field': 'value'}. The data
argument follows the same syntax.
The add argument is to add additional raw SQL to a constructed
query (e.g. add="ORDER BY time").
"""
def __init__(self, dbtype=None, dbname=None, dbserver=None, creden=None):
"""Sets the values for the database instance"""
threading.Thread.__init__(self)
try:
self.dbtype = dbtype
self.dbname = dbname
except NameError:
raise NameError('No database type or name specified!')
if dbserver is not None:
self.dbserver = dbserver
if creden is not None:
try:
self.user = creden['username']
except KeyError:
self.user = None
try:
self.passwd = creden['password']
except KeyError:
self.passwd = None
else:
self.user = None
self.passwd = None
self.temp_values = None
self.temp_insert_values = None
self.last_insert_id = None
self.conn = None
self.cursor = None
def connect(self):
"""Make the connection based on the type of database.
Types allowed:
SQLite
MySQL
"""
if SQLITE and self.dbtype == 'SQLite':
self.conn = sqlite3.connect(self.dbname)
self.cursor = self.conn.cursor()
elif MYSQL and self.dbtype == 'MySQL':
self.conn = MySQLdb.connect(host=self.dbserver, db=self.dbname,
user=self.user, passwd=self.passwd)
self.cursor = self.conn.cursor()
else:
raise NameError('No database available!')
def _keys_to_sql(self, keys=None, sep='AND '):
"""Construct the SQL filter from a dict"""
if keys is None:
keys = {}
filters = []
self.temp_values = ()
for field, value in list(keys.items()):
filters.append("%s = ? " % field)
self.temp_values = self.temp_values + (value,)
return sep.join(filters)
def _keys_to_insert_sql(self, keys=None, sep=', '):
"""Convert a dict into an SQL field value pair"""
if keys is None:
keys = {}
fields = []
values = []
self.temp_insert_values = ()
for field, value in list(keys.items()):
fields.append(field)
values.append('?')
self.temp_insert_values = self.temp_insert_values + (value,)
fields = '(' + sep.join(fields) + ') '
values = 'VALUES(' + sep.join(values) + ') '
return fields + values
def execute(self, sql=None):
"""Simply execute the given SQL"""
if sql is not None:
self.connect()
try:
self.cursor.execute(sql)
except sqlite3.OperationalError as error:
self.conn.rollback()
return 'SQL Error: %s' % error
else:
self.conn.commit()
self.cursor.close()
else:
raise NameError('There was no SQL to be parsed')
def fetchall(self, table=None, filters=None, add='', out='none'):
"""Fetches all rows from database based on the filters applied.
Arg [out] specifies what the output should be:
none : do nothing here (simply return)
output : send output to stdout
"""
if filters is None:
filters = {}
if table is not None:
# Construct the SQL
sql = 'SELECT * FROM ' + table + ' WHERE ' +\
self._keys_to_sql(filters)
self.connect()
try:
self.cursor.execute(sql + add, self.temp_values)
except sqlite3.OperationalError as error:
self.conn.rollback()
del self.temp_values
if out == 'output':
write("Error running SQL: %s" % (sql,))
return 'SQL Error: %s' % error
else:
if out == 'output':
write("Successfully ran: %s" % (sql,))
# Cleanup and return
del self.temp_values
result = self.cursor.fetchall()
self.cursor.close()
return result
else:
raise NameError('Table not specified!')
def fetchone(self, table=None, filters=None, out='none'):
"""Fetches the first row from database based on the filters applied.
Arg [out] specifies what the output should be:
none : do nothing here (simply return)
output : send output to stdout
"""
if filters is None:
filters = {}
if table is not None:
# Construct the SQL
sql = 'SELECT * FROM ' + table + ' WHERE ' +\
self._keys_to_sql(filters)
self.connect()
try:
self.cursor.execute(sql, self.temp_values)
except sqlite3.OperationalError as error:
del self.temp_values
self.conn.rollback()
if out == 'output':
write("Error running SQL: %s" % (sql,))
return 'SQL Error: %s' % error
else:
if out == 'output':
write("Successfully ran: %s" % (sql,))
# Cleanup and return
del self.temp_values
result = self.cursor.fetchone()
self.cursor.close()
return result
else:
raise NameError('Table not specified!')
def insert(self, table=None, data=None, out=None):
"""
Inserts specified data into the database
Arg [out] specifies what the output should be:
none : do nothing here (simply return)
output : send output to stdout
"""
if data is None:
data = {}
if table is not None:
sql = 'INSERT INTO ' + table + self._keys_to_insert_sql(data)
self.connect()
try:
self.cursor.execute(sql, self.temp_insert_values)
except sqlite3.OperationalError as error:
self.conn.rollback()
del self.temp_insert_values
if out == 'output':
write("Error running SQL: %s" % (sql,))
return 'SQL Error: %s' % error
else:
if out == 'output':
write("Successfully ran: %s" % (sql,))
write("With data : %s" % (self.temp_insert_values,))
del self.temp_insert_values
# TODO Fix the last insert id
# self.last_insert_id = self.cursor.lastrowid()
self.conn.commit()
self.cursor.close()
return True
else:
raise NameError('Table not specified!')
def update(self, table=None, data=None, filters=None, out=None):
"""
Updates rows where filters apply with, given data
Arg [out] specifies what the output should be:
none : do nothing here (simply return)
output : send output to stdout
"""
if data is None:
data = {}
if filters is None:
filters = {}
if table is not None:
values = []
data = self._keys_to_sql(data, sep=', ')
values = self.temp_values
if filters:
filters = ' WHERE ' + str(self._keys_to_sql(filters))
values = values + self.temp_values
else:
filters = ''
sql = 'UPDATE ' + table + ' SET ' + data + filters
self.connect()
try:
self.cursor.execute(sql, values)
except sqlite3.OperationalError as error:
self.conn.rollback()
del self.temp_values
if out == 'output':
write("Error running SQL: %s" % (sql,))
return 'SQL Error: %s' % error
else:
if out == 'output':
write("Successfully ran: %s" % (sql,))
del self.temp_values
# TODO Fix the last insert id
# self.last_insert_id = self.cursor.lastrowid()
self.conn.commit()
self.cursor.close()
return True
else:
raise NameError('Table not specified!')
def delete(self, table=None, filters=None):
"""Deletes rows where given filters apply"""
if filters is None:
filters = {}
if table is not None:
filters = self._keys_to_sql(filters)
sql = 'DELETE FROM ' + table + ' WHERE ' + filters
self.connect()
try:
self.cursor.execute(sql, self.temp_values)
except sqlite3.OperationalError as error:
self.conn.rollback()
del self.temp_values
return 'SQL Error: %s' % error
else:
del self.temp_values
self.conn.commit()
self.cursor.close()
return True
else:
raise NameError('Table not specified!')
def count(self, table=None, filters=None):
"""Counts the rows based on the given filters"""
if table is not None:
# Construct the SQL
sql = 'SELECT * FROM ' + table + ' WHERE '
sql += self._keys_to_sql(filters)
self.connect()
try:
self.cursor.execute(sql, self.temp_values)
except sqlite3.OperationalError as error:
self.conn.rollback()
del self.temp_values
return 'SQL Error: %s' % error
else:
# Cleanup and return
del self.temp_values
count = self.cursor.rowcount()
self.cursor.close()
if count < 0 or count is None:
count = 0
return count
else:
raise NameError('Table not specified!')
def write(text):
"""Handle the output from the IRC bot"""
text = str(text) + "\n"
sys.stdout.write(text)
sys.stdout.flush()
| |
# -*- coding: utf-8 -*-
"""
pygments.formatters.latex
~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for LaTeX fancyvrb output.
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import division
from pygments.formatter import Formatter
from pygments.lexer import Lexer
from pygments.token import Token, STANDARD_TYPES
from pygments.util import get_bool_opt, get_int_opt, StringIO, xrange, \
iteritems
__all__ = ['LatexFormatter']
def escape_tex(text, commandprefix):
return text.replace('\\', '\x00'). \
replace('{', '\x01'). \
replace('}', '\x02'). \
replace('\x00', r'\%sZbs{}' % commandprefix). \
replace('\x01', r'\%sZob{}' % commandprefix). \
replace('\x02', r'\%sZcb{}' % commandprefix). \
replace('^', r'\%sZca{}' % commandprefix). \
replace('_', r'\%sZus{}' % commandprefix). \
replace('&', r'\%sZam{}' % commandprefix). \
replace('<', r'\%sZlt{}' % commandprefix). \
replace('>', r'\%sZgt{}' % commandprefix). \
replace('#', r'\%sZsh{}' % commandprefix). \
replace('%', r'\%sZpc{}' % commandprefix). \
replace('$', r'\%sZdl{}' % commandprefix). \
replace('-', r'\%sZhy{}' % commandprefix). \
replace("'", r'\%sZsq{}' % commandprefix). \
replace('"', r'\%sZdq{}' % commandprefix). \
replace('~', r'\%sZti{}' % commandprefix)
DOC_TEMPLATE = r'''
\documentclass{%(docclass)s}
\usepackage{fancyvrb}
\usepackage{color}
\usepackage[%(encoding)s]{inputenc}
%(preamble)s
%(styledefs)s
\begin{document}
\section*{%(title)s}
%(code)s
\end{document}
'''
## Small explanation of the mess below :)
#
# The previous version of the LaTeX formatter just assigned a command to
# each token type defined in the current style. That obviously is
# problematic if the highlighted code is produced for a different style
# than the style commands themselves.
#
# This version works much like the HTML formatter which assigns multiple
# CSS classes to each <span> tag, from the most specific to the least
# specific token type, thus falling back to the parent token type if one
# is not defined. Here, the classes are there too and use the same short
# forms given in token.STANDARD_TYPES.
#
# Highlighted code now only uses one custom command, which by default is
# \PY and selectable by the commandprefix option (and in addition the
# escapes \PYZat, \PYZlb and \PYZrb which haven't been renamed for
# backwards compatibility purposes).
#
# \PY has two arguments: the classes, separated by +, and the text to
# render in that style. The classes are resolved into the respective
# style commands by magic, which serves to ignore unknown classes.
#
# The magic macros are:
# * \PY@it, \PY@bf, etc. are unconditionally wrapped around the text
# to render in \PY@do. Their definition determines the style.
# * \PY@reset resets \PY@it etc. to do nothing.
# * \PY@toks parses the list of classes, using magic inspired by the
# keyval package (but modified to use plusses instead of commas
# because fancyvrb redefines commas inside its environments).
# * \PY@tok processes one class, calling the \PY@tok@classname command
# if it exists.
# * \PY@tok@classname sets the \PY@it etc. to reflect the chosen style
# for its class.
# * \PY resets the style, parses the classnames and then calls \PY@do.
#
# Tip: to read this code, print it out in substituted form using e.g.
# >>> print STYLE_TEMPLATE % {'cp': 'PY'}
STYLE_TEMPLATE = r'''
\makeatletter
\def\%(cp)s@reset{\let\%(cp)s@it=\relax \let\%(cp)s@bf=\relax%%
\let\%(cp)s@ul=\relax \let\%(cp)s@tc=\relax%%
\let\%(cp)s@bc=\relax \let\%(cp)s@ff=\relax}
\def\%(cp)s@tok#1{\csname %(cp)s@tok@#1\endcsname}
\def\%(cp)s@toks#1+{\ifx\relax#1\empty\else%%
\%(cp)s@tok{#1}\expandafter\%(cp)s@toks\fi}
\def\%(cp)s@do#1{\%(cp)s@bc{\%(cp)s@tc{\%(cp)s@ul{%%
\%(cp)s@it{\%(cp)s@bf{\%(cp)s@ff{#1}}}}}}}
\def\%(cp)s#1#2{\%(cp)s@reset\%(cp)s@toks#1+\relax+\%(cp)s@do{#2}}
%(styles)s
\def\%(cp)sZbs{\char`\\}
\def\%(cp)sZus{\char`\_}
\def\%(cp)sZob{\char`\{}
\def\%(cp)sZcb{\char`\}}
\def\%(cp)sZca{\char`\^}
\def\%(cp)sZam{\char`\&}
\def\%(cp)sZlt{\char`\<}
\def\%(cp)sZgt{\char`\>}
\def\%(cp)sZsh{\char`\#}
\def\%(cp)sZpc{\char`\%%}
\def\%(cp)sZdl{\char`\$}
\def\%(cp)sZhy{\char`\-}
\def\%(cp)sZsq{\char`\'}
\def\%(cp)sZdq{\char`\"}
\def\%(cp)sZti{\char`\~}
%% for compatibility with earlier versions
\def\%(cp)sZat{@}
\def\%(cp)sZlb{[}
\def\%(cp)sZrb{]}
\makeatother
'''
def _get_ttype_name(ttype):
fname = STANDARD_TYPES.get(ttype)
if fname:
return fname
aname = ''
while fname is None:
aname = ttype[-1] + aname
ttype = ttype.parent
fname = STANDARD_TYPES.get(ttype)
return fname + aname
class LatexFormatter(Formatter):
r"""
Format tokens as LaTeX code. This needs the `fancyvrb` and `color`
standard packages.
Without the `full` option, code is formatted as one ``Verbatim``
environment, like this:
.. sourcecode:: latex
\begin{Verbatim}[commandchars=\\\{\}]
\PY{k}{def }\PY{n+nf}{foo}(\PY{n}{bar}):
\PY{k}{pass}
\end{Verbatim}
The special command used here (``\PY``) and all the other macros it needs
are output by the `get_style_defs` method.
With the `full` option, a complete LaTeX document is output, including
the command definitions in the preamble.
The `get_style_defs()` method of a `LatexFormatter` returns a string
containing ``\def`` commands defining the macros needed inside the
``Verbatim`` environments.
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`full`
Tells the formatter to output a "full" document, i.e. a complete
self-contained document (default: ``False``).
`title`
If `full` is true, the title that should be used to caption the
document (default: ``''``).
`docclass`
If the `full` option is enabled, this is the document class to use
(default: ``'article'``).
`preamble`
If the `full` option is enabled, this can be further preamble commands,
e.g. ``\usepackage`` (default: ``''``).
`linenos`
If set to ``True``, output line numbers (default: ``False``).
`linenostart`
The line number for the first line (default: ``1``).
`linenostep`
If set to a number n > 1, only every nth line number is printed.
`verboptions`
Additional options given to the Verbatim environment (see the *fancyvrb*
docs for possible values) (default: ``''``).
`commandprefix`
The LaTeX commands used to produce colored output are constructed
using this prefix and some letters (default: ``'PY'``).
.. versionadded:: 0.7
.. versionchanged:: 0.10
The default is now ``'PY'`` instead of ``'C'``.
`texcomments`
If set to ``True``, enables LaTeX comment lines. That is, LaTex markup
in comment tokens is not escaped so that LaTeX can render it (default:
``False``).
.. versionadded:: 1.2
`mathescape`
If set to ``True``, enables LaTeX math mode escape in comments. That
is, ``'$...$'`` inside a comment will trigger math mode (default:
``False``).
.. versionadded:: 1.2
`escapeinside`
If set to a string of length 2, enables escaping to LaTeX. Text
delimited by these 2 characters is read as LaTeX code and
typeset accordingly. It has no effect in string literals. It has
no effect in comments if `texcomments` or `mathescape` is
set. (default: ``''``).
.. versionadded:: 2.0
`envname`
Allows you to pick an alternative environment name replacing Verbatim.
The alternate environment still has to support Verbatim's option syntax.
(default: ``'Verbatim'``).
.. versionadded:: 2.0
"""
name = 'LaTeX'
aliases = ['latex', 'tex']
filenames = ['*.tex']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.docclass = options.get('docclass', 'article')
self.preamble = options.get('preamble', '')
self.linenos = get_bool_opt(options, 'linenos', False)
self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
self.verboptions = options.get('verboptions', '')
self.nobackground = get_bool_opt(options, 'nobackground', False)
self.commandprefix = options.get('commandprefix', 'PY')
self.texcomments = get_bool_opt(options, 'texcomments', False)
self.mathescape = get_bool_opt(options, 'mathescape', False)
self.escapeinside = options.get('escapeinside', '')
if len(self.escapeinside) == 2:
self.left = self.escapeinside[0]
self.right = self.escapeinside[1]
else:
self.escapeinside = ''
self.envname = options.get('envname', u'Verbatim')
self._create_stylesheet()
def _create_stylesheet(self):
t2n = self.ttype2name = {Token: ''}
c2d = self.cmd2def = {}
cp = self.commandprefix
def rgbcolor(col):
if col:
return ','.join(['%.2f' % (int(col[i] + col[i + 1], 16) / 255.0)
for i in (0, 2, 4)])
else:
return '1,1,1'
for ttype, ndef in self.style:
name = _get_ttype_name(ttype)
cmndef = ''
if ndef['bold']:
cmndef += r'\let\$$@bf=\textbf'
if ndef['italic']:
cmndef += r'\let\$$@it=\textit'
if ndef['underline']:
cmndef += r'\let\$$@ul=\underline'
if ndef['roman']:
cmndef += r'\let\$$@ff=\textrm'
if ndef['sans']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['mono']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['color']:
cmndef += (r'\def\$$@tc##1{\textcolor[rgb]{%s}{##1}}' %
rgbcolor(ndef['color']))
if ndef['border']:
cmndef += (r'\def\$$@bc##1{\setlength{\fboxsep}{0pt}'
r'\fcolorbox[rgb]{%s}{%s}{\strut ##1}}' %
(rgbcolor(ndef['border']),
rgbcolor(ndef['bgcolor'])))
elif ndef['bgcolor']:
cmndef += (r'\def\$$@bc##1{\setlength{\fboxsep}{0pt}'
r'\colorbox[rgb]{%s}{\strut ##1}}' %
rgbcolor(ndef['bgcolor']))
if cmndef == '':
continue
cmndef = cmndef.replace('$$', cp)
t2n[ttype] = name
c2d[name] = cmndef
def get_style_defs(self, arg=''):
"""
Return the command sequences needed to define the commands
used to format text in the verbatim environment. ``arg`` is ignored.
"""
cp = self.commandprefix
styles = []
for name, definition in iteritems(self.cmd2def):
styles.append(r'\expandafter\def\csname %s@tok@%s\endcsname{%s}' %
(cp, name, definition))
return STYLE_TEMPLATE % {'cp': self.commandprefix,
'styles': '\n'.join(styles)}
def format_unencoded(self, tokensource, outfile):
# TODO: add support for background colors
t2n = self.ttype2name
cp = self.commandprefix
if self.full:
realoutfile = outfile
outfile = StringIO()
outfile.write(u'\\begin{' + self.envname + u'}[commandchars=\\\\\\{\\}')
if self.linenos:
start, step = self.linenostart, self.linenostep
outfile.write(u',numbers=left' +
(start and u',firstnumber=%d' % start or u'') +
(step and u',stepnumber=%d' % step or u''))
if self.mathescape or self.texcomments or self.escapeinside:
outfile.write(u',codes={\\catcode`\\$=3\\catcode`\\^=7\\catcode`\\_=8}')
if self.verboptions:
outfile.write(u',' + self.verboptions)
outfile.write(u']\n')
for ttype, value in tokensource:
if ttype in Token.Comment:
if self.texcomments:
# Try to guess comment starting lexeme and escape it ...
start = value[0:1]
for i in xrange(1, len(value)):
if start[0] != value[i]:
break
start += value[i]
value = value[len(start):]
start = escape_tex(start, cp)
# ... but do not escape inside comment.
value = start + value
elif self.mathescape:
# Only escape parts not inside a math environment.
parts = value.split('$')
in_math = False
for i, part in enumerate(parts):
if not in_math:
parts[i] = escape_tex(part, cp)
in_math = not in_math
value = '$'.join(parts)
elif self.escapeinside:
text = value
value = ''
while text:
a, sep1, text = text.partition(self.left)
if sep1:
b, sep2, text = text.partition(self.right)
if sep2:
value += escape_tex(a, cp) + b
else:
value += escape_tex(a + sep1 + b, cp)
else:
value += escape_tex(a, cp)
else:
value = escape_tex(value, cp)
elif ttype not in Token.Escape:
value = escape_tex(value, cp)
styles = []
while ttype is not Token:
try:
styles.append(t2n[ttype])
except KeyError:
# not in current style
styles.append(_get_ttype_name(ttype))
ttype = ttype.parent
styleval = '+'.join(reversed(styles))
if styleval:
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write("\\%s{%s}{%s}" % (cp, styleval, line))
outfile.write('\n')
if spl[-1]:
outfile.write("\\%s{%s}{%s}" % (cp, styleval, spl[-1]))
else:
outfile.write(value)
outfile.write(u'\\end{' + self.envname + u'}\n')
if self.full:
encoding = self.encoding or 'utf8'
# map known existings encodings from LaTeX distribution
encoding = {
'utf_8': 'utf8',
'latin_1': 'latin1',
'iso_8859_1': 'latin1',
}.get(encoding.replace('-', '_'), encoding)
realoutfile.write(DOC_TEMPLATE %
dict(docclass = self.docclass,
preamble = self.preamble,
title = self.title,
encoding = encoding,
styledefs = self.get_style_defs(),
code = outfile.getvalue()))
class LatexEmbeddedLexer(Lexer):
"""
This lexer takes one lexer as argument, the lexer for the language
being formatted, and the left and right delimiters for escaped text.
First everything is scanned using the language lexer to obtain
strings and comments. All other consecutive tokens are merged and
the resulting text is scanned for escaped segments, which are given
the Token.Escape type. Finally text that is not escaped is scanned
again with the language lexer.
"""
def __init__(self, left, right, lang, **options):
self.left = left
self.right = right
self.lang = lang
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
buf = ''
idx = 0
for i, t, v in self.lang.get_tokens_unprocessed(text):
if t in Token.Comment or t in Token.String:
if buf:
for x in self.get_tokens_aux(idx, buf):
yield x
buf = ''
yield i, t, v
else:
if not buf:
idx = i
buf += v
if buf:
for x in self.get_tokens_aux(idx, buf):
yield x
def get_tokens_aux(self, index, text):
while text:
a, sep1, text = text.partition(self.left)
if a:
for i, t, v in self.lang.get_tokens_unprocessed(a):
yield index + i, t, v
index += len(a)
if sep1:
b, sep2, text = text.partition(self.right)
if sep2:
yield index + len(sep1), Token.Escape, b
index += len(sep1) + len(b) + len(sep2)
else:
yield index, Token.Error, sep1
index += len(sep1)
text = b
| |
#
"""Sliver manager API.
This module exposes an XMLRPC interface that allows PlanetLab users to
create/destroy slivers with delegated instantiation, start and stop
slivers, make resource loans, and examine resource allocations. The
XMLRPC is provided on a localhost-only TCP port as well as via a Unix
domain socket that is accessible by ssh-ing into a delegate account
with the forward_api_calls shell.
"""
import SimpleXMLRPCServer
import SocketServer
import errno
import os
import pwd
import socket
import struct
import threading
import xmlrpclib
try:
from PLC.Parameter import Parameter, Mixed
except:
def Parameter(a = None, b = None): pass
def Mixed(a = None, b = None, c = None): pass
import accounts
import logger
# TODO: These try/excepts are a hack to allow doc/DocBookLocal.py to
# import this file in order to extract the documentation from each
# exported function.
# A better approach will involve more extensive code splitting, I think.
try: import database
except: import logger as database
try: import sliver_vs
except: import logger as sliver_vs
import ticket as ticket_module
import tools
deliver_ticket = None # set in slivermanager.start()
api_method_dict = {}
nargs_dict = {}
def export_to_api(nargs):
def export(method):
nargs_dict[method.__name__] = nargs
api_method_dict[method.__name__] = method
return method
return export
def export_to_docbook(**kwargs):
keywords = {
"group" : "NMAPI",
"status" : "current",
"name": None,
"args": None,
"roles": [],
"accepts": [],
"returns": [],
}
def export(method):
def args():
# Inspect method. Remove self from the argument list.
max_args = method.func_code.co_varnames[0:method.func_code.co_argcount]
defaults = method.func_defaults
if defaults is None:
defaults = ()
min_args = max_args[0:len(max_args) - len(defaults)]
defaults = tuple([None for arg in min_args]) + defaults
return (min_args, max_args, defaults)
keywords['name'] = method.__name__
keywords['args'] = args
for arg in keywords:
method.__setattr__(arg, keywords[arg])
for arg in kwargs:
method.__setattr__(arg, kwargs[arg])
return method
return export
# status
# roles,
# accepts,
# returns
@export_to_docbook(roles=['self'],
accepts=[],
returns=Parameter([], 'A list of supported functions'))
@export_to_api(0)
def Help():
"""Get a list of functions currently supported by the Node Manager API"""
names=api_method_dict.keys()
names.sort()
return ''.join(['**** ' + api_method_dict[name].__name__ + '\n' + api_method_dict[name].__doc__ + '\n'
for name in names])
@export_to_docbook(roles=['self'],
accepts=[Parameter(str, 'A ticket returned from GetSliceTicket()')],
returns=Parameter(int, '1 if successful'))
@export_to_api(1)
def Ticket(ticket):
"""The Node Manager periodically polls the PLC API for a list of all
slices that are allowed to exist on the given node. Before
actions are performed on a delegated slice (such as creation),
a controller slice must deliver a valid slice ticket to NM.
This ticket is the value retured by PLC's GetSliceTicket() API call."""
try:
data = ticket_module.verify(ticket)
name = data['slivers'][0]['name']
if data != None:
deliver_ticket(data)
logger.log('api_calls: Ticket delivered for %s' % name)
Create(database.db.get(name))
except Exception, err:
raise xmlrpclib.Fault(102, 'Ticket error: ' + str(err))
@export_to_docbook(roles=['self'],
accepts=[Parameter(str, 'A ticket returned from GetSlivers()')],
returns=Parameter(int, '1 if successful'))
@export_to_api(1)
def AdminTicket(ticket):
"""Admin interface to create slivers based on ticket returned by GetSlivers()."""
try:
data, = xmlrpclib.loads(ticket)[0]
name = data['slivers'][0]['name']
if data != None:
deliver_ticket(data)
logger.log('api_calls: Admin Ticket delivered for %s' % name)
Create(database.db.get(name))
except Exception, err:
raise xmlrpclib.Fault(102, 'Ticket error: ' + str(err))
@export_to_docbook(roles=['self'],
accepts=[],
returns={'sliver_name' : Parameter(int, 'the associated xid')})
@export_to_api(0)
def GetXIDs():
"""Return an dictionary mapping Slice names to XIDs"""
return dict([(pwent[0], pwent[2]) for pwent in pwd.getpwall() if pwent[6] == sliver_vs.Sliver_VS.SHELL])
@export_to_docbook(roles=['self'],
accepts=[],
returns={ 'sliver_name' : Parameter(str, 'the associated SSHKey')})
@export_to_api(0)
def GetSSHKeys():
"""Return an dictionary mapping slice names to SSH keys"""
keydict = {}
for rec in database.db.itervalues():
if 'keys' in rec:
keydict[rec['name']] = rec['keys']
return keydict
@export_to_docbook(roles=['nm-controller', 'self'],
accepts=[Parameter(str, 'A sliver/slice name.')],
returns=Parameter(int, '1 if successful'))
@export_to_api(1)
def Create(sliver_name):
"""Create a non-PLC-instantiated sliver"""
rec = sliver_name
if rec['instantiation'] == 'delegated':
accounts.get(rec['name']).ensure_created(rec)
logger.log("api_calls: Create %s"%rec['name'])
else:
raise Exception, "Only PLC can create non delegated slivers."
@export_to_docbook(roles=['nm-controller', 'self'],
accepts=[Parameter(str, 'A sliver/slice name.')],
returns=Parameter(int, '1 if successful'))
@export_to_api(1)
def Destroy(sliver_name):
"""Destroy a non-PLC-instantiated sliver"""
rec = sliver_name
if rec['instantiation'] == 'delegated':
accounts.get(rec['name']).ensure_destroyed()
logger.log("api_calls: Destroy %s"%rec['name'])
else:
raise Exception, "Only PLC can destroy non delegated slivers."
@export_to_docbook(roles=['nm-controller', 'self'],
accepts=[Parameter(str, 'A sliver/slice name.')],
returns=Parameter(int, '1 if successful'))
@export_to_api(1)
def Start(sliver_name):
"""Configure and start sliver."""
rec = sliver_name
accounts.get(rec['name']).start(rec)
logger.log("api_calls: Start %s"%rec['name'])
@export_to_docbook(roles=['nm-controller', 'self'],
accepts=[Parameter(str, 'A sliver/slice name.')],
returns=Parameter(int, '1 if successful'))
@export_to_api(1)
def Stop(sliver_name):
"""Kill all processes belonging to the specified sliver"""
rec = sliver_name
accounts.get(rec['name']).stop()
logger.log("api_calls: Stop %s"%rec['name'])
@export_to_docbook(roles=['nm-controller', 'self'],
accepts=[Parameter(str, 'A sliver/slice name.')],
returns=Parameter(int, '1 if successful'))
@export_to_api(1)
def ReCreate(sliver_name):
"""Stop, Destroy, Create, Start sliver in order to reinstall it."""
rec = sliver_name
accounts.get(rec['name']).stop()
accounts.get(rec['name']).ensure_created(rec)
accounts.get(rec['name']).start(rec)
logger.log("api_calls: ReCreate %s"%rec['name'])
@export_to_docbook(roles=['nm-controller', 'self'],
accepts=[Parameter(str, 'A sliver/slice name.')],
returns=Parameter(dict, "A resource specification"))
@export_to_api(1)
def GetEffectiveRSpec(sliver_name):
"""Return the RSpec allocated to the specified sliver, including loans"""
rec = sliver_name
return rec.get('_rspec', {}).copy()
@export_to_docbook(roles=['nm-controller', 'self'],
accepts=[Parameter(str, 'A sliver/slice name.')],
returns={"resource name" : Parameter(int, "amount")})
@export_to_api(1)
def GetRSpec(sliver_name):
"""Return the RSpec allocated to the specified sliver, excluding loans"""
rec = sliver_name
return rec.get('rspec', {}).copy()
@export_to_docbook(roles=['nm-controller', 'self'],
accepts=[Parameter(str, 'A sliver/slice name.')],
returns=[Mixed(Parameter(str, 'recipient slice name'),
Parameter(str, 'resource name'),
Parameter(int, 'resource amount'))])
@export_to_api(1)
def GetLoans(sliver_name):
"""Return the list of loans made by the specified sliver"""
rec = sliver_name
return rec.get('_loans', [])[:]
def validate_loans(loans):
"""Check that <obj> is a list of valid loan specifications."""
def validate_loan(loan):
return (type(loan)==list or type(loan)==tuple) and len(loan)==3 \
and type(loan[0])==str and type(loan[1])==str and loan[1] in database.LOANABLE_RESOURCES and type(loan[2])==int and loan[2]>=0
return type(loans)==list and False not in [validate_loan(load) for loan in loans]
@export_to_docbook(roles=['nm-controller', 'self'],
accepts=[ Parameter(str, 'A sliver/slice name.'),
[Mixed(Parameter(str, 'recipient slice name'),
Parameter(str, 'resource name'),
Parameter(int, 'resource amount'))], ],
returns=Parameter(int, '1 if successful'))
@export_to_api(2)
def SetLoans(sliver_name, loans):
"""Overwrite the list of loans made by the specified sliver.
Also, note that SetLoans will not throw an error if more capacity than the
RSpec is handed out, but it will silently discard those loans that would
put it over capacity. This behavior may be replaced with error semantics
in the future. As well, there is currently no asynchronous notification
of loss of resources."""
rec = sliver_name
if not validate_loans(loans):
raise xmlrpclib.Fault(102, 'Invalid argument: the second argument must be a well-formed loan specification')
rec['_loans'] = loans
database.db.sync()
@export_to_docbook(roles=['nm-controller', 'self'],
returns=Parameter(dict, 'Record dictionary'))
@export_to_api(0)
def GetRecord(sliver_name):
"""Return sliver record"""
rec = sliver_name
return rec
| |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.dialogflow_v2.types import audio_config
from google.cloud.dialogflow_v2.types import fulfillment as gcd_fulfillment
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.dialogflow.v2",
manifest={
"Environment",
"TextToSpeechSettings",
"ListEnvironmentsRequest",
"ListEnvironmentsResponse",
"GetEnvironmentRequest",
"CreateEnvironmentRequest",
"UpdateEnvironmentRequest",
"DeleteEnvironmentRequest",
"GetEnvironmentHistoryRequest",
"EnvironmentHistory",
},
)
class Environment(proto.Message):
r"""You can create multiple versions of your agent and publish them to
separate environments.
When you edit an agent, you are editing the draft agent. At any
point, you can save the draft agent as an agent version, which is an
immutable snapshot of your agent.
When you save the draft agent, it is published to the default
environment. When you create agent versions, you can publish them to
custom environments. You can create a variety of custom environments
for:
- testing
- development
- production
- etc.
For more information, see the `versions and environments
guide <https://cloud.google.com/dialogflow/docs/agents-versions>`__.
Attributes:
name (str):
Output only. The unique identifier of this agent
environment. Supported formats:
- ``projects/<Project ID>/agent/environments/<Environment ID>``
- ``projects/<Project ID>/locations/<Location ID>/agent/environments/<Environment ID>``
The environment ID for the default environment is ``-``.
description (str):
Optional. The developer-provided description
for this environment. The maximum length is 500
characters. If exceeded, the request is
rejected.
agent_version (str):
Optional. The agent version loaded into this environment.
Supported formats:
- ``projects/<Project ID>/agent/versions/<Version ID>``
- ``projects/<Project ID>/locations/<Location ID>/agent/versions/<Version ID>``
state (google.cloud.dialogflow_v2.types.Environment.State):
Output only. The state of this environment.
This field is read-only, i.e., it cannot be set
by create and update methods.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The last update time of this
environment. This field is read-only, i.e., it
cannot be set by create and update methods.
text_to_speech_settings (google.cloud.dialogflow_v2.types.TextToSpeechSettings):
Optional. Text to speech settings for this
environment.
fulfillment (google.cloud.dialogflow_v2.types.Fulfillment):
Optional. The fulfillment settings to use for
this environment.
"""
class State(proto.Enum):
r"""Represents an environment state. When an environment is pointed to a
new agent version, the environment is temporarily set to the
``LOADING`` state. During that time, the environment keeps on
serving the previous version of the agent. After the new agent
version is done loading, the environment is set back to the
``RUNNING`` state.
"""
STATE_UNSPECIFIED = 0
STOPPED = 1
LOADING = 2
RUNNING = 3
name = proto.Field(proto.STRING, number=1,)
description = proto.Field(proto.STRING, number=2,)
agent_version = proto.Field(proto.STRING, number=3,)
state = proto.Field(proto.ENUM, number=4, enum=State,)
update_time = proto.Field(proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp,)
text_to_speech_settings = proto.Field(
proto.MESSAGE, number=7, message="TextToSpeechSettings",
)
fulfillment = proto.Field(
proto.MESSAGE, number=8, message=gcd_fulfillment.Fulfillment,
)
class TextToSpeechSettings(proto.Message):
r"""Instructs the speech synthesizer on how to generate the
output audio content.
Attributes:
enable_text_to_speech (bool):
Optional. Indicates whether text to speech is
enabled. Even when this field is false, other
settings in this proto are still retained.
output_audio_encoding (google.cloud.dialogflow_v2.types.OutputAudioEncoding):
Required. Audio encoding of the synthesized
audio content.
sample_rate_hertz (int):
Optional. The synthesis sample rate (in
hertz) for this audio. If not provided, then the
synthesizer will use the default sample rate
based on the audio encoding. If this is
different from the voice's natural sample rate,
then the synthesizer will honor this request by
converting to the desired sample rate (which
might result in worse audio quality).
synthesize_speech_configs (Sequence[google.cloud.dialogflow_v2.types.TextToSpeechSettings.SynthesizeSpeechConfigsEntry]):
Optional. Configuration of how speech should
be synthesized, mapping from language
(https://cloud.google.com/dialogflow/docs/reference/language)
to SynthesizeSpeechConfig.
"""
enable_text_to_speech = proto.Field(proto.BOOL, number=1,)
output_audio_encoding = proto.Field(
proto.ENUM, number=2, enum=audio_config.OutputAudioEncoding,
)
sample_rate_hertz = proto.Field(proto.INT32, number=3,)
synthesize_speech_configs = proto.MapField(
proto.STRING,
proto.MESSAGE,
number=4,
message=audio_config.SynthesizeSpeechConfig,
)
class ListEnvironmentsRequest(proto.Message):
r"""The request message for
[Environments.ListEnvironments][google.cloud.dialogflow.v2.Environments.ListEnvironments].
Attributes:
parent (str):
Required. The agent to list all environments from. Format:
- ``projects/<Project ID>/agent``
- ``projects/<Project ID>/locations/<Location ID>/agent``
page_size (int):
Optional. The maximum number of items to
return in a single page. By default 100 and at
most 1000.
page_token (str):
Optional. The next_page_token value returned from a previous
list request.
"""
parent = proto.Field(proto.STRING, number=1,)
page_size = proto.Field(proto.INT32, number=2,)
page_token = proto.Field(proto.STRING, number=3,)
class ListEnvironmentsResponse(proto.Message):
r"""The response message for
[Environments.ListEnvironments][google.cloud.dialogflow.v2.Environments.ListEnvironments].
Attributes:
environments (Sequence[google.cloud.dialogflow_v2.types.Environment]):
The list of agent environments. There will be a maximum
number of items returned based on the page_size field in the
request.
next_page_token (str):
Token to retrieve the next page of results,
or empty if there are no more results in the
list.
"""
@property
def raw_page(self):
return self
environments = proto.RepeatedField(proto.MESSAGE, number=1, message="Environment",)
next_page_token = proto.Field(proto.STRING, number=2,)
class GetEnvironmentRequest(proto.Message):
r"""The request message for
[Environments.GetEnvironment][google.cloud.dialogflow.v2.Environments.GetEnvironment].
Attributes:
name (str):
Required. The name of the environment. Supported formats:
- ``projects/<Project ID>/agent/environments/<Environment ID>``
- ``projects/<Project ID>/locations/<Location ID>/agent/environments/<Environment ID>``
The environment ID for the default environment is ``-``.
"""
name = proto.Field(proto.STRING, number=1,)
class CreateEnvironmentRequest(proto.Message):
r"""The request message for
[Environments.CreateEnvironment][google.cloud.dialogflow.v2.Environments.CreateEnvironment].
Attributes:
parent (str):
Required. The agent to create an environment for. Supported
formats:
- ``projects/<Project ID>/agent``
- ``projects/<Project ID>/locations/<Location ID>/agent``
environment (google.cloud.dialogflow_v2.types.Environment):
Required. The environment to create.
environment_id (str):
Required. The unique id of the new
environment.
"""
parent = proto.Field(proto.STRING, number=1,)
environment = proto.Field(proto.MESSAGE, number=2, message="Environment",)
environment_id = proto.Field(proto.STRING, number=3,)
class UpdateEnvironmentRequest(proto.Message):
r"""The request message for
[Environments.UpdateEnvironment][google.cloud.dialogflow.v2.Environments.UpdateEnvironment].
Attributes:
environment (google.cloud.dialogflow_v2.types.Environment):
Required. The environment to update.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The mask to control which fields
get updated.
allow_load_to_draft_and_discard_changes (bool):
Optional. This field is used to prevent accidental overwrite
of the default environment, which is an operation that
cannot be undone. To confirm that the caller desires this
overwrite, this field must be explicitly set to true when
updating the default environment (environment ID = ``-``).
"""
environment = proto.Field(proto.MESSAGE, number=1, message="Environment",)
update_mask = proto.Field(
proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask,
)
allow_load_to_draft_and_discard_changes = proto.Field(proto.BOOL, number=3,)
class DeleteEnvironmentRequest(proto.Message):
r"""The request message for
[Environments.DeleteEnvironment][google.cloud.dialogflow.v2.Environments.DeleteEnvironment].
Attributes:
name (str):
Required. The name of the environment to delete. / Format:
- ``projects/<Project ID>/agent/environments/<Environment ID>``
- ``projects/<Project ID>/locations/<Location ID>/agent/environments/<Environment ID>``
The environment ID for the default environment is ``-``.
"""
name = proto.Field(proto.STRING, number=1,)
class GetEnvironmentHistoryRequest(proto.Message):
r"""The request message for
[Environments.GetEnvironmentHistory][google.cloud.dialogflow.v2.Environments.GetEnvironmentHistory].
Attributes:
parent (str):
Required. The name of the environment to retrieve history
for. Supported formats:
- ``projects/<Project ID>/agent/environments/<Environment ID>``
- ``projects/<Project ID>/locations/<Location ID>/agent/environments/<Environment ID>``
The environment ID for the default environment is ``-``.
page_size (int):
Optional. The maximum number of items to
return in a single page. By default 100 and at
most 1000.
page_token (str):
Optional. The next_page_token value returned from a previous
list request.
"""
parent = proto.Field(proto.STRING, number=1,)
page_size = proto.Field(proto.INT32, number=2,)
page_token = proto.Field(proto.STRING, number=3,)
class EnvironmentHistory(proto.Message):
r"""The response message for
[Environments.GetEnvironmentHistory][google.cloud.dialogflow.v2.Environments.GetEnvironmentHistory].
Attributes:
parent (str):
Output only. The name of the environment this history is
for. Supported formats:
- ``projects/<Project ID>/agent/environments/<Environment ID>``
- ``projects/<Project ID>/locations/<Location ID>/agent/environments/<Environment ID>``
The environment ID for the default environment is ``-``.
entries (Sequence[google.cloud.dialogflow_v2.types.EnvironmentHistory.Entry]):
Output only. The list of agent environments. There will be a
maximum number of items returned based on the page_size
field in the request.
next_page_token (str):
Output only. Token to retrieve the next page
of results, or empty if there are no more
results in the list.
"""
class Entry(proto.Message):
r"""Represents an environment history entry.
Attributes:
agent_version (str):
The agent version loaded into this
environment history entry.
description (str):
The developer-provided description for this
environment history entry.
create_time (google.protobuf.timestamp_pb2.Timestamp):
The creation time of this environment history
entry.
"""
agent_version = proto.Field(proto.STRING, number=1,)
description = proto.Field(proto.STRING, number=2,)
create_time = proto.Field(
proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,
)
@property
def raw_page(self):
return self
parent = proto.Field(proto.STRING, number=1,)
entries = proto.RepeatedField(proto.MESSAGE, number=2, message=Entry,)
next_page_token = proto.Field(proto.STRING, number=3,)
__all__ = tuple(sorted(__protobuf__.manifest))
| |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class servicegroup_lbmonitor_binding(base_resource) :
""" Binding class showing the lbmonitor that can be bound to servicegroup.
"""
def __init__(self) :
self._monitor_name = ""
self._monweight = 0
self._monstate = ""
self._weight = 0
self._passive = False
self._servicegroupname = ""
self._port = 0
self._customserverid = ""
self._serverid = 0
self._state = ""
self._hashid = 0
self.___count = 0
@property
def servicegroupname(self) :
ur"""Name of the service group.<br/>Minimum length = 1.
"""
try :
return self._servicegroupname
except Exception as e:
raise e
@servicegroupname.setter
def servicegroupname(self, servicegroupname) :
ur"""Name of the service group.<br/>Minimum length = 1
"""
try :
self._servicegroupname = servicegroupname
except Exception as e:
raise e
@property
def port(self) :
ur"""Port number of the service. Each service must have a unique port number.<br/>Range 1 - 65535.
"""
try :
return self._port
except Exception as e:
raise e
@port.setter
def port(self, port) :
ur"""Port number of the service. Each service must have a unique port number.<br/>Range 1 - 65535
"""
try :
self._port = port
except Exception as e:
raise e
@property
def state(self) :
ur"""Initial state of the service after binding.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._state
except Exception as e:
raise e
@state.setter
def state(self, state) :
ur"""Initial state of the service after binding.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._state = state
except Exception as e:
raise e
@property
def hashid(self) :
ur"""Unique numerical identifier used by hash based load balancing methods to identify a service.<br/>Minimum value = 1.
"""
try :
return self._hashid
except Exception as e:
raise e
@hashid.setter
def hashid(self, hashid) :
ur"""Unique numerical identifier used by hash based load balancing methods to identify a service.<br/>Minimum value = 1
"""
try :
self._hashid = hashid
except Exception as e:
raise e
@property
def serverid(self) :
ur"""The identifier for the service. This is used when the persistency type is set to Custom Server ID.
"""
try :
return self._serverid
except Exception as e:
raise e
@serverid.setter
def serverid(self, serverid) :
ur"""The identifier for the service. This is used when the persistency type is set to Custom Server ID.
"""
try :
self._serverid = serverid
except Exception as e:
raise e
@property
def customserverid(self) :
ur"""Unique service identifier. Used when the persistency type for the virtual server is set to Custom Server ID.<br/>Default value: "None".
"""
try :
return self._customserverid
except Exception as e:
raise e
@customserverid.setter
def customserverid(self, customserverid) :
ur"""Unique service identifier. Used when the persistency type for the virtual server is set to Custom Server ID.<br/>Default value: "None"
"""
try :
self._customserverid = customserverid
except Exception as e:
raise e
@property
def weight(self) :
ur"""Weight to assign to the servers in the service group. Specifies the capacity of the servers relative to the other servers in the load balancing configuration. The higher the weight, the higher the percentage of requests sent to the service.<br/>Minimum value = 1<br/>Maximum value = 100.
"""
try :
return self._weight
except Exception as e:
raise e
@weight.setter
def weight(self, weight) :
ur"""Weight to assign to the servers in the service group. Specifies the capacity of the servers relative to the other servers in the load balancing configuration. The higher the weight, the higher the percentage of requests sent to the service.<br/>Minimum value = 1<br/>Maximum value = 100
"""
try :
self._weight = weight
except Exception as e:
raise e
@property
def monitor_name(self) :
ur"""Monitor name.
"""
try :
return self._monitor_name
except Exception as e:
raise e
@monitor_name.setter
def monitor_name(self, monitor_name) :
ur"""Monitor name.
"""
try :
self._monitor_name = monitor_name
except Exception as e:
raise e
@property
def passive(self) :
ur"""Indicates if load monitor is passive. A passive load monitor does not remove service from LB decision when threshold is breached.
"""
try :
return self._passive
except Exception as e:
raise e
@passive.setter
def passive(self, passive) :
ur"""Indicates if load monitor is passive. A passive load monitor does not remove service from LB decision when threshold is breached.
"""
try :
self._passive = passive
except Exception as e:
raise e
@property
def monstate(self) :
ur"""Monitor state.<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._monstate
except Exception as e:
raise e
@monstate.setter
def monstate(self, monstate) :
ur"""Monitor state.<br/>Possible values = ENABLED, DISABLED
"""
try :
self._monstate = monstate
except Exception as e:
raise e
@property
def monweight(self) :
ur"""weight of the monitor that is bound to servicegroup.
"""
try :
return self._monweight
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(servicegroup_lbmonitor_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.servicegroup_lbmonitor_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.servicegroupname is not None :
return str(self.servicegroupname)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = servicegroup_lbmonitor_binding()
updateresource.servicegroupname = resource.servicegroupname
updateresource.port = resource.port
updateresource.monitor_name = resource.monitor_name
updateresource.monstate = resource.monstate
updateresource.passive = resource.passive
updateresource.weight = resource.weight
updateresource.customserverid = resource.customserverid
updateresource.serverid = resource.serverid
updateresource.state = resource.state
updateresource.hashid = resource.hashid
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [servicegroup_lbmonitor_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].servicegroupname = resource[i].servicegroupname
updateresources[i].port = resource[i].port
updateresources[i].monitor_name = resource[i].monitor_name
updateresources[i].monstate = resource[i].monstate
updateresources[i].passive = resource[i].passive
updateresources[i].weight = resource[i].weight
updateresources[i].customserverid = resource[i].customserverid
updateresources[i].serverid = resource[i].serverid
updateresources[i].state = resource[i].state
updateresources[i].hashid = resource[i].hashid
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = servicegroup_lbmonitor_binding()
deleteresource.servicegroupname = resource.servicegroupname
deleteresource.port = resource.port
deleteresource.monitor_name = resource.monitor_name
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [servicegroup_lbmonitor_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].servicegroupname = resource[i].servicegroupname
deleteresources[i].port = resource[i].port
deleteresources[i].monitor_name = resource[i].monitor_name
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, servicegroupname) :
ur""" Use this API to fetch servicegroup_lbmonitor_binding resources.
"""
try :
obj = servicegroup_lbmonitor_binding()
obj.servicegroupname = servicegroupname
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, servicegroupname, filter_) :
ur""" Use this API to fetch filtered set of servicegroup_lbmonitor_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = servicegroup_lbmonitor_binding()
obj.servicegroupname = servicegroupname
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, servicegroupname) :
ur""" Use this API to count servicegroup_lbmonitor_binding resources configued on NetScaler.
"""
try :
obj = servicegroup_lbmonitor_binding()
obj.servicegroupname = servicegroupname
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, servicegroupname, filter_) :
ur""" Use this API to count the filtered set of servicegroup_lbmonitor_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = servicegroup_lbmonitor_binding()
obj.servicegroupname = servicegroupname
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Monstate:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class State:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class servicegroup_lbmonitor_binding_response(base_response) :
def __init__(self, length=1) :
self.servicegroup_lbmonitor_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.servicegroup_lbmonitor_binding = [servicegroup_lbmonitor_binding() for _ in range(length)]
| |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils as json
from six.moves.urllib import parse as urllib
from tempest.lib.api_schema.response.compute.v2_1 import flavors as schema
from tempest.lib.api_schema.response.compute.v2_1 import flavors_access \
as schema_access
from tempest.lib.api_schema.response.compute.v2_1 import flavors_extra_specs \
as schema_extra_specs
from tempest.lib.common import rest_client
from tempest.lib.services.compute import base_compute_client
class FlavorsClient(base_compute_client.BaseComputeClient):
def list_flavors(self, detail=False, **params):
"""Lists flavors.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref-compute-v2.1.html#listFlavors
"""
url = 'flavors'
_schema = schema.list_flavors
if detail:
url += '/detail'
_schema = schema.list_flavors_details
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(_schema, resp, body)
return rest_client.ResponseBody(resp, body)
def show_flavor(self, flavor_id):
"""Shows details for a flavor.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref-compute-v2.1.html#showFlavor
"""
resp, body = self.get("flavors/%s" % flavor_id)
body = json.loads(body)
self.validate_response(schema.create_get_flavor_details, resp, body)
return rest_client.ResponseBody(resp, body)
def create_flavor(self, **kwargs):
"""Create a new flavor or instance type.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref-compute-v2.1.html#createFlavor
"""
if kwargs.get('ephemeral'):
kwargs['OS-FLV-EXT-DATA:ephemeral'] = kwargs.pop('ephemeral')
if kwargs.get('is_public'):
kwargs['os-flavor-access:is_public'] = kwargs.pop('is_public')
post_body = json.dumps({'flavor': kwargs})
resp, body = self.post('flavors', post_body)
body = json.loads(body)
self.validate_response(schema.create_get_flavor_details, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_flavor(self, flavor_id):
"""Delete the given flavor.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref-compute-v2.1.html#deleteFlavor
"""
resp, body = self.delete("flavors/{0}".format(flavor_id))
self.validate_response(schema.delete_flavor, resp, body)
return rest_client.ResponseBody(resp, body)
def is_resource_deleted(self, id):
# Did not use show_flavor(id) for verification as it gives
# 200 ok even for deleted id. LP #981263
# we can remove the loop here and use get by ID when bug gets sortedout
flavors = self.list_flavors(detail=True)['flavors']
for flavor in flavors:
if flavor['id'] == id:
return False
return True
@property
def resource_type(self):
"""Return the primary type of resource this client works with."""
return 'flavor'
def set_flavor_extra_spec(self, flavor_id, **kwargs):
"""Set extra Specs to the mentioned flavor.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref-compute-v2.1.html#createFlavorExtraSpec
"""
post_body = json.dumps({'extra_specs': kwargs})
resp, body = self.post('flavors/%s/os-extra_specs' % flavor_id,
post_body)
body = json.loads(body)
self.validate_response(schema_extra_specs.set_get_flavor_extra_specs,
resp, body)
return rest_client.ResponseBody(resp, body)
def list_flavor_extra_specs(self, flavor_id):
"""Get extra Specs details of the mentioned flavor.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref-compute-v2.1.html#listFlavorExtraSpecs
"""
resp, body = self.get('flavors/%s/os-extra_specs' % flavor_id)
body = json.loads(body)
self.validate_response(schema_extra_specs.set_get_flavor_extra_specs,
resp, body)
return rest_client.ResponseBody(resp, body)
def show_flavor_extra_spec(self, flavor_id, key):
"""Get extra Specs key-value of the mentioned flavor and key.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref-compute-v2.1.html#showFlavorExtraSpec
"""
resp, body = self.get('flavors/%s/os-extra_specs/%s' % (flavor_id,
key))
body = json.loads(body)
self.validate_response(
schema_extra_specs.set_get_flavor_extra_specs_key,
resp, body)
return rest_client.ResponseBody(resp, body)
def update_flavor_extra_spec(self, flavor_id, key, **kwargs):
"""Update specified extra Specs of the mentioned flavor and key.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref-compute-v2.1.html#updateFlavorExtraSpec
"""
resp, body = self.put('flavors/%s/os-extra_specs/%s' %
(flavor_id, key), json.dumps(kwargs))
body = json.loads(body)
self.validate_response(
schema_extra_specs.set_get_flavor_extra_specs_key,
resp, body)
return rest_client.ResponseBody(resp, body)
def unset_flavor_extra_spec(self, flavor_id, key): # noqa
# NOTE: This noqa is for passing T111 check and we cannot rename
# to keep backwards compatibility.
"""Unset extra Specs from the mentioned flavor.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref-compute-v2.1.html#deleteFlavorExtraSpec
"""
resp, body = self.delete('flavors/%s/os-extra_specs/%s' %
(flavor_id, key))
self.validate_response(schema.unset_flavor_extra_specs, resp, body)
return rest_client.ResponseBody(resp, body)
def list_flavor_access(self, flavor_id):
"""Get flavor access information given the flavor id.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref-compute-v2.1.html#listFlavorAccess
"""
resp, body = self.get('flavors/%s/os-flavor-access' % flavor_id)
body = json.loads(body)
self.validate_response(schema_access.add_remove_list_flavor_access,
resp, body)
return rest_client.ResponseBody(resp, body)
def add_flavor_access(self, flavor_id, tenant_id):
"""Add flavor access for the specified tenant.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref-compute-v2.1.html#addFlavorAccess
"""
post_body = {
'addTenantAccess': {
'tenant': tenant_id
}
}
post_body = json.dumps(post_body)
resp, body = self.post('flavors/%s/action' % flavor_id, post_body)
body = json.loads(body)
self.validate_response(schema_access.add_remove_list_flavor_access,
resp, body)
return rest_client.ResponseBody(resp, body)
def remove_flavor_access(self, flavor_id, tenant_id):
"""Remove flavor access from the specified tenant.
For a full list of available parameters, please refer to the official
API reference:
http://developer.openstack.org/api-ref-compute-v2.1.html#removeFlavorAccess
"""
post_body = {
'removeTenantAccess': {
'tenant': tenant_id
}
}
post_body = json.dumps(post_body)
resp, body = self.post('flavors/%s/action' % flavor_id, post_body)
body = json.loads(body)
self.validate_response(schema_access.add_remove_list_flavor_access,
resp, body)
return rest_client.ResponseBody(resp, body)
| |
"""
Classes that represent alignments between multiple sequences.
"""
import random
import weakref
from bx.misc.readlengths import read_lengths_file
# DNA reverse complement table
# DNA_COMP = " - " \
# " TVGH CD M KN YSA BWXR tvgh cd m kn ysa bwxr " \
# " " \
# " "
DNA_COMP = str.maketrans("ACGTacgt", "TGCAtgca")
class Alignment:
def __init__(self, score=0, attributes=None, species_to_lengths=None):
# species_to_lengths is needed only for file formats that don't provide
# chromosome lengths; it maps each species name to one of these:
# - the name of a file that contains a list of chromosome length pairs
# - a dict mapping chromosome names to their length
# - a single length value (useful when we just have one sequence and no chromosomes)
# internally a file name is replaced by a dict, but only on an "as
# needed" basis
if attributes is None:
attributes = {}
self.score = score
self.text_size = 0
self.attributes = attributes
if species_to_lengths is None:
self.species_to_lengths = {}
else:
self.species_to_lengths = species_to_lengths
self.components = []
def add_component(self, component):
component._alignment = weakref.ref(self)
self.components.append(component)
if component.text is not None:
if self.text_size == 0:
self.text_size = len(component.text)
elif self.text_size != len(component.text):
raise Exception("Components must have same text length")
def get_score(self):
return self.__score
def set_score(self, score):
if isinstance(score, str):
try:
score = int(score)
except ValueError:
try:
score = float(score)
except ValueError:
pass
self.__score = score
score = property(fget=get_score, fset=set_score)
def __str__(self):
s = "a score=" + str(self.score)
for key in self.attributes:
s += " {}={}".format(key, self.attributes[key])
s += "\n"
# Components
for c in self.components:
s += str(c)
s += "\n"
return s
def src_size(self, src):
species, chrom = src_split(src)
if species in self.species_to_lengths:
chrom_to_length = self.species_to_lengths[species]
elif chrom in self.species_to_lengths:
chrom_to_length = self.species_to_lengths
else:
raise ValueError("no src_size (no length file for %s)" % species)
if isinstance(chrom_to_length, int): # (if it's a single length)
return chrom_to_length
if isinstance(chrom_to_length, str): # (if it's a file name)
chrom_to_length = read_lengths_file(chrom_to_length)
self.species_to_lengths[species] = chrom_to_length
if chrom not in chrom_to_length:
raise ValueError(f"no src_size ({species} has no length for {chrom})")
return chrom_to_length[chrom]
def get_component_by_src(self, src):
for c in self.components:
if c.src == src:
return c
return None
def get_components_by_src(self, src):
for c in self.components:
if c.src == src:
yield c
def get_component_by_src_start(self, src):
for c in self.components:
if c.src.startswith(src):
return c
return None
def slice(self, start, end):
new = Alignment(score=self.score, attributes=self.attributes)
for component in self.components:
new.components.append(component.slice(start, end))
new.text_size = end - start
return new
def reverse_complement(self):
new = Alignment(score=self.score, attributes=self.attributes)
for component in self.components:
new.components.append(component.reverse_complement())
new.text_size = self.text_size
return new
def slice_by_component(self, component_index, start, end):
"""
Return a slice of the alignment, corresponding to an coordinate interval in a specific component.
component_index is one of
an integer offset into the components list
a string indicating the src of the desired component
a component
start and end are relative to the + strand, regardless of the component's strand.
"""
if isinstance(component_index, int):
ref = self.components[component_index]
elif isinstance(component_index, str):
ref = self.get_component_by_src(component_index)
elif isinstance(component_index, Component):
ref = component_index
else:
raise ValueError("can't figure out what to do")
start_col = ref.coord_to_col(start)
end_col = ref.coord_to_col(end)
if ref.strand == '-':
(start_col, end_col) = (end_col, start_col)
return self.slice(start_col, end_col)
def column_iter(self):
# FIXME: The empty component are not present
# in column_iter.
# Maybe it would be good to use - and =
for i in range(self.text_size):
yield [c.text[i] for c in self.components if not c.empty]
def limit_to_species(self, species):
new = Alignment(score=self.score, attributes=self.attributes)
new.text_size = self.text_size
for component in self.components:
if component.src.split('.')[0] in species:
new.add_component(component)
return new
def remove_all_gap_columns(self):
"""
Remove any columns containing only gaps from alignment components,
text of components is modified IN PLACE.
"""
seqs = []
for c in self.components:
if c.empty:
seqs.append(None)
try:
seqs.append(list(c.text))
except TypeError:
seqs.append(None)
i = 0
text_size = self.text_size
while i < text_size:
all_gap = True
for seq in seqs:
if seq is None:
continue
if seq[i] != '-':
all_gap = False
if all_gap:
for seq in seqs:
if seq is None:
continue
del seq[i]
text_size -= 1
else:
i += 1
for i in range(len(self.components)):
if seqs[i] is None:
continue
self.components[i].text = ''.join(seqs[i])
self.text_size = text_size
def __eq__(self, other):
if other is None or not isinstance(other, type(self)):
return False
if self.score != other.score:
return False
if self.attributes != other.attributes:
return False
if len(self.components) != len(other.components):
return False
for c1, c2 in zip(self.components, other.components):
if c1 != c2:
return False
return True
def __ne__(self, other):
return not(self.__eq__(other))
def __deepcopy__(self, memo):
from copy import deepcopy
new = Alignment(score=self.score, attributes=deepcopy(self.attributes), species_to_lengths=deepcopy(self.species_to_lengths))
for component in self.components:
new.add_component(deepcopy(component))
return new
class Component:
def __init__(self, src='', start=0, size=0, strand=None, src_size=None, text=''):
self._alignment = None
self.src = src
self.start = start # Nota Bene: start,size,strand are as they
self.size = size # .. appear in a MAF file-- origin-zero, end
self.strand = strand # .. excluded, and minus strand counts from
self._src_size = src_size # .. end of sequence
self.text = text
self.quality = None
# Optional fields to keep track of synteny status (only makes sense
# when the alignment is part of an ordered set)
self.synteny_left = None
self.synteny_right = None
self.synteny_empty = None
# If true, this component actually represents a non-aligning region,
# and text is None.
self.empty = False
# Index maps a coordinate (distance along + strand from + start) to alignment column
self.index = None
def __str__(self):
if self.empty:
rval = "e %s %d %d %s %d %s" % (
self.src, self.start, self.size, self.strand, self.src_size, self.synteny_empty)
else:
rval = "s %s %d %d %s %d %s" % (
self.src, self.start, self.size, self.strand, self.src_size, self.text)
if self.synteny_left and self.synteny_right:
rval += "\ni %s %s %d %s %d" % (
self.src, self.synteny_left[0], self.synteny_left[1],
self.synteny_right[0], self.synteny_right[1])
return rval
def get_end(self):
return self.start + self.size
end = property(fget=get_end)
def get_src_size(self):
if self._src_size is None:
if self._alignment is None:
raise Exception("component has no src_size")
self._src_size = self._alignment().src_size(self.src)
return self._src_size
def set_src_size(self, src_size):
self._src_size = src_size
src_size = property(fget=get_src_size, fset=set_src_size)
def get_forward_strand_start(self):
if self.strand == '-':
return self.src_size - self.end
else:
return self.start
forward_strand_start = property(fget=get_forward_strand_start)
def get_forward_strand_end(self):
if self.strand == '-':
return self.src_size - self.start
else:
return self.end
forward_strand_end = property(fget=get_forward_strand_end)
def reverse_complement(self):
start = self.src_size - self.end
if self.strand == "+":
strand = "-"
else:
strand = "+"
if self.empty:
text = None
else:
comp = [ch for ch in self.text.translate(DNA_COMP)]
comp.reverse()
text = "".join(comp)
new = Component(self.src, start, self.size, strand, self._src_size, text)
if self.empty:
new.empty = True
new.synteny_empty = self.synteny_empty
# Propagate supplementary info
if self.synteny_left:
new.synteny_right = self.synteny_left
if self.synteny_right:
new.synteny_left = self.synteny_right
new._alignment = self._alignment
return new
def slice(self, start, end):
new = Component(src=self.src, start=self.start, strand=self.strand, src_size=self._src_size)
new._alignment = self._alignment
if self.empty:
new.empty = True
new.size = self.size
new.text = None
new.synteny_empty = self.synteny_empty
return new
new.text = self.text[start:end]
# for i in range( 0, start ):
# if self.text[i] != '-': new.start += 1
# for c in new.text:
# if c != '-': new.size += 1
new.start += start - self.text.count('-', 0, start)
new.size = len(new.text) - new.text.count('-')
# FIXME: This annotation probably means nothing after slicing if
# one of the ends changes. In general the 'i' rows of a MAF only
# make sense in context (relative to the previous and next alignments
# in a stream, slicing breaks that).
# LD: Indeed, I think it is wrong to keep them. Let's keep the info
# only when the boundaries are kept.
if self.synteny_left:
if start == 0:
new.synteny_left = self.synteny_left
if self.synteny_right:
if end == len(self.text):
new.synteny_right = self.synteny_right
return new
def slice_by_coord(self, start, end):
"""
Return the slice of the component corresponding to a coordinate interval.
start and end are relative to the + strand, regardless of the component's strand.
"""
start_col = self.coord_to_col(start)
end_col = self.coord_to_col(end)
if (self.strand == '-'):
(start_col, end_col) = (end_col, start_col)
return self.slice(start_col, end_col)
def coord_to_col(self, pos):
"""
Return the alignment column index corresponding to coordinate pos.
pos is relative to the + strand, regardless of the component's strand.
"""
if self.empty:
raise ValueError("There is no column index. It is empty.")
start, end = self.get_forward_strand_start(), self.get_forward_strand_end()
if pos < start or pos > end:
raise ValueError("Range error: %d not in %d-%d" % (pos, start, end))
if not self.index:
self.index = list()
if self.strand == '-':
# nota bene: for - strand self.index[x] maps to one column
# higher than is actually associated with the position; thus
# when slice_by_component() and slice_by_coord() flip the ends,
# the resulting slice is correct
for x in range(len(self.text)-1, -1, -1):
if not self.text[x] == '-':
self.index.append(x + 1)
self.index.append(0)
else:
for x in range(len(self.text)):
if not self.text[x] == '-':
self.index.append(x)
self.index.append(len(self.text))
x = None
try:
x = self.index[pos - start]
except IndexError:
raise Exception("Error in index.")
return x
def __eq__(self, other):
if other is None or not isinstance(other, type(self)):
return False
return (self.src == other.src
and self.start == other.start
and self.size == other.size
and self.strand == other.strand
and self._src_size == other._src_size
and self.text == other.text
and self.synteny_left == other.synteny_left
and self.synteny_right == other.synteny_right
and self.synteny_empty == other.synteny_empty
and self.empty == other.empty)
def __ne__(self, other):
return not(self.__eq__(other))
def __deepcopy__(self, memo):
new = Component(src=self.src, start=self.start, size=self.size, strand=self.strand, src_size=self._src_size, text=self.text)
new._alignment = self._alignment
new.quality = self.quality
new.synteny_left = self.synteny_left
new.synteny_right = self.synteny_right
new.synteny_empty = self.synteny_empty
new.empty = self.empty
new.index = self.index
return new
def get_reader(format, infile, species_to_lengths=None):
import bx.align.axt
import bx.align.lav
import bx.align.maf
if format == "maf":
return bx.align.maf.Reader(infile, species_to_lengths)
elif format == "axt":
return bx.align.axt.Reader(infile, species_to_lengths)
elif format == "lav":
return bx.align.lav.Reader(infile)
else:
raise ValueError("Unknown alignment format %s" % format)
def get_writer(format, outfile, attributes=None):
import bx.align.axt
import bx.align.lav
import bx.align.maf
if attributes is None:
attributes = {}
if format == "maf":
return bx.align.maf.Writer(outfile, attributes)
elif format == "axt":
return bx.align.axt.Writer(outfile, attributes)
elif format == "lav":
return bx.align.lav.Writer(outfile, attributes)
else:
raise ValueError("Unknown alignment format %s" % format)
def get_indexed(format, filename, index_filename=None, keep_open=False, species_to_lengths=None):
import bx.align.axt
import bx.align.lav
import bx.align.maf
if format == "maf":
return bx.align.maf.Indexed(filename, index_filename, keep_open, species_to_lengths)
elif format == "axt":
return bx.align.axt.Indexed(filename, index_filename, keep_open, species_to_lengths)
elif format == "lav":
raise Exception("LAV support for Indexed has not been implemented")
else:
raise ValueError("Unknown alignment format %s" % format)
def shuffle_columns(a):
"""Randomize the columns of an alignment"""
mask = list(range(a.text_size))
random.shuffle(mask)
for c in a.components:
if not c.empty:
c.text = ''.join([c.text[i] for i in mask])
def src_split(src): # splits src into species,chrom
dot = src.rfind(".")
if dot == -1:
return None, src
else:
return src[:dot], src[dot+1:]
def src_merge(species, chrom, contig=None): # creates src (inverse of src_split)
if species is None:
src = chrom
else:
src = species + "." + chrom
if contig is not None:
src += "[%s]" % contig
return src
# ---- Read C extension if available ---------------------------------------
try:
from ._core import coord_to_col
except ImportError:
def coord_to_col(start, text, pos):
col = 0
while start < pos:
if text[col] != '-':
start += 1
col += 1
return col
| |
#!/usr/bin/env python3
# coding: utf-8
"""
Script to manage communication between analog gear and main computer running
Cockos Reaper.
MIDI & OSC & Sensors In - OSC Out
"""
__author__ = "Arnaud E"
__version__ = "0.5"
__license__ = "MIT"
import serial
from time import sleep
import datetime
import threading
import argparse
import RPi.GPIO as GPIO
# https://github.com/attwad/python-osc
from pythonosc import dispatcher
from pythonosc import osc_server, udp_client
import Adafruit_Trellis
#So the official Trellis lib is not up to date, I host a fixed one
# https://github.com/ArnaudE/Adafruit_Trellis_Python_Fixed
# Global variables to track the state of Reaper
isPlaying = False
isRecording = False
def calcTempo(ticks, client):
bpm = round(60 / (sum(ticks) / 1000000))
# print(bpm)
# Not a great way to do it actually
# client.send_message("/tempo/raw", bpm)
del ticks[:]
def readingMidi(ser, event, client, grid):
'''
Get MIDI In messages and communicate with Reaper the tempo and Play / Stop
The Play message, if reaper already in Play, make it go to the beginning of
the loop, note the project.
Is thread.
'''
message = [0, 0, 0]
compteur = 0
pad = 0
timeElapsed = [0.5]
now = datetime.datetime.now()
lastNow = now
channelPlayed = 0
notePlayed = 0
global isPlaying
while event.is_set():
i = 0
while i < 3 and event.is_set():
try:
data = ord(ser.read(1))
except TypeError:
# When timed out, ord(serial) outputs a TypeError
# Might be specific to MIDI in this way, but it is a MIDI
# specific script
# done _ make the loop exit without actually being considered
message = [0, 0, 0]
break
if data >> 7 != 0:
# If the first bite is not 1, it is a status message
# see https://www.midi.org/specifications/item/table-1-summary-of-midi-message
if data == 0xf8:
# Tick message
# The clock is 24 tick per beat
# Actual division
# todo _ Need to add a controlled divider
# done _ get Tempo and send it to Reaper
now = datetime.datetime.now()
delay = now - lastNow
timeElapsed.append(delay.microseconds)
lastNow = now
if not compteur:
calcTempo(timeElapsed, client)
grid.clrLED((pad - 1)%16)
grid.setLED(pad)
grid.writeDisplay()
# todo _ send the number of pads to the function
# todo _ Controller for the modulo of pads
pad = (pad + 1)%16
compteur = (compteur + 1 )%24
elif data == 0xfc:
# Stop message
client.send_message("/stop", 1.0)
grid.clrLED((pad - 1)%16)
grid.writeDisplay()
isPlaying = False
compteur = 0
pad = 0
elif data == 0xfa:
# Play message
if not isPlaying:
client.send_message("/play", 1.0)
isPlaying = True
else:
# If already Playing, go back to beginning of loop
client.send_message("/action", 40632)
i = 0
message[i] = data
i += 1
if i == 2 and message[0] >> 4 == 12:
message[2] = 0
i = 3
print("{0} - {1} - {2}".format(hex(message[0]), hex(message[1]), hex(message[2])))
messageType = message[0] >> 4
# The latency is super huge, it is not usable
if messageType == 9:
channelPlayed = message[0] & 15
notePlayed = message[1]
client.send_message("/vkb_midi/{0}/note/{1}".format(channelPlayed, notePlayed), message[2])
elif messageType == 15:
client.send_message("/vkb_midi/{0}/note/{1}".format(channelPlayed, notePlayed), message[2])
print("Exit MIDI reading")
def print_play(unused_addr, args, state):
'''
LED status of Reaper transport - Green LED on Play
'''
# todo _ send the LED pin to the function instead of writing it in
if state:
GPIO.output(11, True)
else:
GPIO.output(11, False)
def print_record(unused_addr, args, state):
'''
LED status of Reaper transport - Red LED on Stop
'''
# todo _ send the LED pin to the function instead of writing it in
if state:
GPIO.output(13, True)
else:
GPIO.output(13, False)
def polling_buttons(event, client):
'''
Two buttons, one Play, one Record, both at the same time Stop
Is thread
'''
# There they are
global isPlaying
global isRecording
while event.is_set():
# todo _ send the input pins to the function instead of writing it in
input_play = GPIO.input(15)
input_record = GPIO.input(16)
# Pull up, so inverse reading
if not input_play and input_record:
# Both
if isPlaying:
client.send_message("/action", 40632)
elif not isRecording or not isPlaying:
client.send_message("/play", 1.0)
isPlaying = True
elif input_play and not input_record:
# input_record pressed
client.send_message("/record", 1.0)
elif not input_play and not input_record:
# input_play pressed
client.send_message("/stop", 1.0)
isPlaying = False
# todo _ real delay and not sleep, but it's a thread so ?
sleep(0.1)
print("Stop polling")
if __name__ == '__main__':
# Create the matrix, at the time of writing only one tile of 4x4
matrix0 = Adafruit_Trellis.Adafruit_Trellis()
trellis = Adafruit_Trellis.Adafruit_TrellisSet(matrix0)
I2C_BUS = 1
trellis.begin((0x70, I2C_BUS))
# Create serial - the baud rate is actually the MIDI 31250
ser = serial.Serial('/dev/ttyAMA0', baudrate=38400, timeout=10)
# Use physical number of pin
# https://fr.pinout.xyz/pinout/masse
GPIO.setmode(GPIO.BOARD)
# Play LED
GPIO.setup(11, GPIO.OUT)
GPIO.output(11, False)
# Record LED
GPIO.setup(13, GPIO.OUT)
GPIO.output(13, False)
# Play Button
GPIO.setup(15, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(16, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# Setup OSC receive message and callbacks
dispatcher = dispatcher.Dispatcher()
dispatcher.map("/play", print_play, "PLAY")
dispatcher.map("/record", print_record, "RECORD")
# Setup OSC listening server
parser = argparse.ArgumentParser()
parser.add_argument("--ip", default="192.168.0.23", help="The ip to listen on")
parser.add_argument("--port", type=int, default=9000, help="The port to listen on")
args = parser.parse_args()
server = osc_server.ForkingOSCUDPServer((args.ip, args.port), dispatcher)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.start()
print("Serving on {}".format(server.server_address))
# Setup OSC sending client
parser = argparse.ArgumentParser()
parser.add_argument("--ip", default="192.168.0.10", help="The ip to send to")
parser.add_argument("--port", type=int, default=8000, help="The port to send on")
args = parser.parse_args()
client = udp_client.SimpleUDPClient(args.ip, args.port)
# Start MIDI input thread
e = threading.Event()
e.set()
thread = threading.Thread(target=readingMidi, args=(ser,e,client, trellis))
thread.start()
thread_polling = threading.Thread(target=polling_buttons, args=(e,client))
thread_polling.start()
while True:
try:
sleep(2)
except (KeyboardInterrupt, SystemExit):
print("\nLet's quit")
break
server.shutdown()
server_thread.join()
print(".. osc")
e.clear()
print(".. midi")
thread.join()
thread_polling.join()
GPIO.cleanup()
print("Bye from Main")
| |
# Copyright 2011 OpenStack Foundation # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler Host Filters.
"""
import httplib
import mock
from oslo.config import cfg
import six
import stubout
from nova import context
from nova import db
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova.pci import pci_stats
from nova.scheduler import filters
from nova.scheduler.filters import extra_specs_ops
from nova.scheduler.filters import ram_filter
from nova.scheduler.filters import trusted_filter
from nova import servicegroup
from nova import test
from nova.tests.scheduler import fakes
from nova import utils
CONF = cfg.CONF
CONF.import_opt('my_ip', 'nova.netconf')
class TestFilter(filters.BaseHostFilter):
pass
class TestBogusFilter(object):
"""Class that doesn't inherit from BaseHostFilter."""
pass
class ExtraSpecsOpsTestCase(test.NoDBTestCase):
def _do_extra_specs_ops_test(self, value, req, matches):
assertion = self.assertTrue if matches else self.assertFalse
assertion(extra_specs_ops.match(value, req))
def test_extra_specs_matches_simple(self):
self._do_extra_specs_ops_test(
value='1',
req='1',
matches=True)
def test_extra_specs_fails_simple(self):
self._do_extra_specs_ops_test(
value='',
req='1',
matches=False)
def test_extra_specs_fails_simple2(self):
self._do_extra_specs_ops_test(
value='3',
req='1',
matches=False)
def test_extra_specs_fails_simple3(self):
self._do_extra_specs_ops_test(
value='222',
req='2',
matches=False)
def test_extra_specs_fails_with_bogus_ops(self):
self._do_extra_specs_ops_test(
value='4',
req='> 2',
matches=False)
def test_extra_specs_matches_with_op_eq(self):
self._do_extra_specs_ops_test(
value='123',
req='= 123',
matches=True)
def test_extra_specs_matches_with_op_eq2(self):
self._do_extra_specs_ops_test(
value='124',
req='= 123',
matches=True)
def test_extra_specs_fails_with_op_eq(self):
self._do_extra_specs_ops_test(
value='34',
req='= 234',
matches=False)
def test_extra_specs_fails_with_op_eq3(self):
self._do_extra_specs_ops_test(
value='34',
req='=',
matches=False)
def test_extra_specs_matches_with_op_seq(self):
self._do_extra_specs_ops_test(
value='123',
req='s== 123',
matches=True)
def test_extra_specs_fails_with_op_seq(self):
self._do_extra_specs_ops_test(
value='1234',
req='s== 123',
matches=False)
def test_extra_specs_matches_with_op_sneq(self):
self._do_extra_specs_ops_test(
value='1234',
req='s!= 123',
matches=True)
def test_extra_specs_fails_with_op_sneq(self):
self._do_extra_specs_ops_test(
value='123',
req='s!= 123',
matches=False)
def test_extra_specs_fails_with_op_sge(self):
self._do_extra_specs_ops_test(
value='1000',
req='s>= 234',
matches=False)
def test_extra_specs_fails_with_op_sle(self):
self._do_extra_specs_ops_test(
value='1234',
req='s<= 1000',
matches=False)
def test_extra_specs_fails_with_op_sl(self):
self._do_extra_specs_ops_test(
value='2',
req='s< 12',
matches=False)
def test_extra_specs_fails_with_op_sg(self):
self._do_extra_specs_ops_test(
value='12',
req='s> 2',
matches=False)
def test_extra_specs_matches_with_op_in(self):
self._do_extra_specs_ops_test(
value='12311321',
req='<in> 11',
matches=True)
def test_extra_specs_matches_with_op_in2(self):
self._do_extra_specs_ops_test(
value='12311321',
req='<in> 12311321',
matches=True)
def test_extra_specs_matches_with_op_in3(self):
self._do_extra_specs_ops_test(
value='12311321',
req='<in> 12311321 <in>',
matches=True)
def test_extra_specs_fails_with_op_in(self):
self._do_extra_specs_ops_test(
value='12310321',
req='<in> 11',
matches=False)
def test_extra_specs_fails_with_op_in2(self):
self._do_extra_specs_ops_test(
value='12310321',
req='<in> 11 <in>',
matches=False)
def test_extra_specs_matches_with_op_or(self):
self._do_extra_specs_ops_test(
value='12',
req='<or> 11 <or> 12',
matches=True)
def test_extra_specs_matches_with_op_or2(self):
self._do_extra_specs_ops_test(
value='12',
req='<or> 11 <or> 12 <or>',
matches=True)
def test_extra_specs_fails_with_op_or(self):
self._do_extra_specs_ops_test(
value='13',
req='<or> 11 <or> 12',
matches=False)
def test_extra_specs_fails_with_op_or2(self):
self._do_extra_specs_ops_test(
value='13',
req='<or> 11 <or> 12 <or>',
matches=False)
def test_extra_specs_matches_with_op_le(self):
self._do_extra_specs_ops_test(
value='2',
req='<= 10',
matches=True)
def test_extra_specs_fails_with_op_le(self):
self._do_extra_specs_ops_test(
value='3',
req='<= 2',
matches=False)
def test_extra_specs_matches_with_op_ge(self):
self._do_extra_specs_ops_test(
value='3',
req='>= 1',
matches=True)
def test_extra_specs_fails_with_op_ge(self):
self._do_extra_specs_ops_test(
value='2',
req='>= 3',
matches=False)
class HostFiltersTestCase(test.NoDBTestCase):
"""Test case for host filters."""
# FIXME(sirp): These tests still require DB access until we can separate
# the testing of the DB API code from the host-filter code.
USES_DB = True
def fake_oat_request(self, *args, **kwargs):
"""Stubs out the response from OAT service."""
self.oat_attested = True
self.oat_hosts = args[2]
return httplib.OK, self.oat_data
def setUp(self):
super(HostFiltersTestCase, self).setUp()
self.oat_data = ''
self.oat_attested = False
self.stubs = stubout.StubOutForTesting()
self.stubs.Set(trusted_filter.AttestationService, '_request',
self.fake_oat_request)
self.context = context.RequestContext('fake', 'fake')
self.json_query = jsonutils.dumps(
['and', ['>=', '$free_ram_mb', 1024],
['>=', '$free_disk_mb', 200 * 1024]])
filter_handler = filters.HostFilterHandler()
classes = filter_handler.get_matching_classes(
['nova.scheduler.filters.all_filters'])
self.class_map = {}
for cls in classes:
self.class_map[cls.__name__] = cls
def test_all_filters(self):
# Double check at least a couple of known filters exist
self.assertIn('AllHostsFilter', self.class_map)
self.assertIn('ComputeFilter', self.class_map)
def test_all_host_filter(self):
filt_cls = self.class_map['AllHostsFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, {}))
def _stub_service_is_up(self, ret_value):
def fake_service_is_up(self, service):
return ret_value
self.stubs.Set(servicegroup.API, 'service_is_up', fake_service_is_up)
def test_affinity_different_filter_passes(self):
filt_cls = self.class_map['DifferentHostFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'different_host': [instance_uuid], }}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_different_filter_no_list_passes(self):
filt_cls = self.class_map['DifferentHostFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'different_host': instance_uuid}}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_different_filter_fails(self):
filt_cls = self.class_map['DifferentHostFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host1'})
instance_uuid = instance.uuid
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'different_host': [instance_uuid], }}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_affinity_different_filter_handles_none(self):
filt_cls = self.class_map['DifferentHostFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': None}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_different_filter_handles_deleted_instance(self):
filt_cls = self.class_map['DifferentHostFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host1'})
instance_uuid = instance.uuid
db.instance_destroy(self.context, instance_uuid)
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'different_host': [instance_uuid], }}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_no_list_passes(self):
filt_cls = self.class_map['SameHostFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host1'})
instance_uuid = instance.uuid
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'same_host': instance_uuid}}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_passes(self):
filt_cls = self.class_map['SameHostFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host1'})
instance_uuid = instance.uuid
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'same_host': [instance_uuid], }}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_fails(self):
filt_cls = self.class_map['SameHostFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'same_host': [instance_uuid], }}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_handles_none(self):
filt_cls = self.class_map['SameHostFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': None}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_handles_deleted_instance(self):
filt_cls = self.class_map['SameHostFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host1'})
instance_uuid = instance.uuid
db.instance_destroy(self.context, instance_uuid)
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'same_host': [instance_uuid], }}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_affinity_simple_cidr_filter_passes(self):
filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
host.host_ip = '10.8.1.1'
affinity_ip = "10.8.1.100"
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'cidr': '/24',
'build_near_host_ip': affinity_ip}}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_simple_cidr_filter_fails(self):
filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
host.host_ip = '10.8.1.1'
affinity_ip = "10.8.1.100"
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'cidr': '/32',
'build_near_host_ip': affinity_ip}}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_affinity_simple_cidr_filter_handles_none(self):
filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
affinity_ip = CONF.my_ip.split('.')[0:3]
affinity_ip.append('100')
affinity_ip = str.join('.', affinity_ip)
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': None}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_passes(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024}}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_type_filter(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['TypeAffinityFilter']()
filter_properties = {'context': self.context,
'instance_type': {'id': 1}}
filter2_properties = {'context': self.context,
'instance_type': {'id': 2}}
service = {'disabled': False}
host = fakes.FakeHostState('fake_host', 'fake_node',
{'service': service})
# True since empty
self.assertTrue(filt_cls.host_passes(host, filter_properties))
fakes.FakeInstance(context=self.context,
params={'host': 'fake_host', 'instance_type_id': 1})
# True since same type
self.assertTrue(filt_cls.host_passes(host, filter_properties))
# False since different type
self.assertFalse(filt_cls.host_passes(host, filter2_properties))
# False since node not homogeneous
fakes.FakeInstance(context=self.context,
params={'host': 'fake_host', 'instance_type_id': 2})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_aggregate_type_filter(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['AggregateTypeAffinityFilter']()
filter_properties = {'context': self.context,
'instance_type': {'name': 'fake1'}}
filter2_properties = {'context': self.context,
'instance_type': {'name': 'fake2'}}
service = {'disabled': False}
host = fakes.FakeHostState('fake_host', 'fake_node',
{'service': service})
# True since no aggregates
self.assertTrue(filt_cls.host_passes(host, filter_properties))
# True since type matches aggregate, metadata
self._create_aggregate_with_host(name='fake_aggregate',
hosts=['fake_host'], metadata={'instance_type': 'fake1'})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
# False since type matches aggregate, metadata
self.assertFalse(filt_cls.host_passes(host, filter2_properties))
def test_ram_filter_fails_on_memory(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['RamFilter']()
ram_filter.RamFilter.ram_allocation_ratio = 1.0
filter_properties = {'instance_type': {'memory_mb': 1024}}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1023, 'total_usable_ram_mb': 1024,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_ram_filter_passes(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['RamFilter']()
ram_filter.RamFilter.ram_allocation_ratio = 1.0
filter_properties = {'instance_type': {'memory_mb': 1024}}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'total_usable_ram_mb': 1024,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_ram_filter_oversubscribe(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['RamFilter']()
ram_filter.RamFilter.ram_allocation_ratio = 2.0
filter_properties = {'instance_type': {'memory_mb': 1024}}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': -1024, 'total_usable_ram_mb': 2048,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
self.assertEqual(2048 * 2.0, host.limits['memory_mb'])
def test_aggregate_ram_filter_value_error(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['AggregateRamFilter']()
self.flags(ram_allocation_ratio=1.0)
filter_properties = {'context': self.context,
'instance_type': {'memory_mb': 1024}}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'total_usable_ram_mb': 1024,
'service': service})
self._create_aggregate_with_host(name='fake_aggregate',
hosts=['host1'],
metadata={'ram_allocation_ratio': 'XXX'})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
self.assertEqual(1024 * 1.0, host.limits['memory_mb'])
def test_aggregate_ram_filter_default_value(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['AggregateRamFilter']()
self.flags(ram_allocation_ratio=1.0)
filter_properties = {'context': self.context,
'instance_type': {'memory_mb': 1024}}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1023, 'total_usable_ram_mb': 1024,
'service': service})
# False: fallback to default flag w/o aggregates
self.assertFalse(filt_cls.host_passes(host, filter_properties))
self._create_aggregate_with_host(name='fake_aggregate',
hosts=['host1'],
metadata={'ram_allocation_ratio': '2.0'})
# True: use ratio from aggregates
self.assertTrue(filt_cls.host_passes(host, filter_properties))
self.assertEqual(1024 * 2.0, host.limits['memory_mb'])
def test_aggregate_ram_filter_conflict_values(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['AggregateRamFilter']()
self.flags(ram_allocation_ratio=1.0)
filter_properties = {'context': self.context,
'instance_type': {'memory_mb': 1024}}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1023, 'total_usable_ram_mb': 1024,
'service': service})
self._create_aggregate_with_host(name='fake_aggregate1',
hosts=['host1'],
metadata={'ram_allocation_ratio': '1.5'})
self._create_aggregate_with_host(name='fake_aggregate2',
hosts=['host1'],
metadata={'ram_allocation_ratio': '2.0'})
# use the minimum ratio from aggregates
self.assertTrue(filt_cls.host_passes(host, filter_properties))
self.assertEqual(1024 * 1.5, host.limits['memory_mb'])
def test_disk_filter_passes(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['DiskFilter']()
self.flags(disk_allocation_ratio=1.0)
filter_properties = {'instance_type': {'root_gb': 1,
'ephemeral_gb': 1, 'swap': 512}}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_disk_filter_fails(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['DiskFilter']()
self.flags(disk_allocation_ratio=1.0)
filter_properties = {'instance_type': {'root_gb': 10,
'ephemeral_gb': 1, 'swap': 1024}}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_disk_filter_oversubscribe(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['DiskFilter']()
self.flags(disk_allocation_ratio=10.0)
filter_properties = {'instance_type': {'root_gb': 100,
'ephemeral_gb': 18, 'swap': 1024}}
service = {'disabled': False}
# 1GB used... so 119GB allowed...
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
self.assertEqual(12 * 10.0, host.limits['disk_gb'])
def test_disk_filter_oversubscribe_fail(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['DiskFilter']()
self.flags(disk_allocation_ratio=10.0)
filter_properties = {'instance_type': {'root_gb': 100,
'ephemeral_gb': 19, 'swap': 1024}}
service = {'disabled': False}
# 1GB used... so 119GB allowed...
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def _test_compute_filter_fails_on_service_disabled(self,
reason=None):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024}}
service = {'disabled': True}
if reason:
service['disabled_reason'] = reason
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_fails_on_service_disabled_no_reason(self):
self._test_compute_filter_fails_on_service_disabled()
def test_compute_filter_fails_on_service_disabled(self):
self._test_compute_filter_fails_on_service_disabled(reason='Test')
def test_compute_filter_fails_on_service_down(self):
self._stub_service_is_up(False)
filt_cls = self.class_map['ComputeFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024}}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_image_properties_filter_passes_same_inst_props_and_version(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ImagePropertiesFilter']()
img_props = {'properties': {'_architecture': 'x86_64',
'hypervisor_type': 'kvm',
'vm_mode': 'hvm',
'hypervisor_version_requires': '>=6.0,<6.2'
}}
filter_properties = {'request_spec': {'image': img_props}}
hypervisor_version = utils.convert_version_to_int('6.0.0')
capabilities = {'supported_instances': [('x86_64', 'kvm', 'hvm')],
'hypervisor_version': hypervisor_version}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_image_properties_filter_fails_different_inst_props(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ImagePropertiesFilter']()
img_props = {'properties': {'architecture': 'arm',
'hypervisor_type': 'qemu',
'vm_mode': 'hvm'}}
filter_properties = {'request_spec': {'image': img_props}}
hypervisor_version = utils.convert_version_to_int('6.0.0')
capabilities = {'supported_instances': [('x86_64', 'kvm', 'hvm')],
'hypervisor_version': hypervisor_version}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_image_properties_filter_fails_different_hyper_version(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ImagePropertiesFilter']()
img_props = {'properties': {'architecture': 'x86_64',
'hypervisor_type': 'kvm',
'vm_mode': 'hvm',
'hypervisor_version_requires': '>=6.2'}}
filter_properties = {'request_spec': {'image': img_props}}
hypervisor_version = utils.convert_version_to_int('6.0.0')
capabilities = {'enabled': True,
'supported_instances': [('x86_64', 'kvm', 'hvm')],
'hypervisor_version': hypervisor_version}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_image_properties_filter_passes_partial_inst_props(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ImagePropertiesFilter']()
img_props = {'properties': {'architecture': 'x86_64',
'vm_mode': 'hvm'}}
filter_properties = {'request_spec': {'image': img_props}}
hypervisor_version = utils.convert_version_to_int('6.0.0')
capabilities = {'supported_instances': [('x86_64', 'kvm', 'hvm')],
'hypervisor_version': hypervisor_version}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_image_properties_filter_fails_partial_inst_props(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ImagePropertiesFilter']()
img_props = {'properties': {'architecture': 'x86_64',
'vm_mode': 'hvm'}}
filter_properties = {'request_spec': {'image': img_props}}
hypervisor_version = utils.convert_version_to_int('6.0.0')
capabilities = {'supported_instances': [('x86_64', 'xen', 'xen')],
'hypervisor_version': hypervisor_version}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_image_properties_filter_passes_without_inst_props(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ImagePropertiesFilter']()
filter_properties = {'request_spec': {}}
hypervisor_version = utils.convert_version_to_int('6.0.0')
capabilities = {'supported_instances': [('x86_64', 'kvm', 'hvm')],
'hypervisor_version': hypervisor_version}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_image_properties_filter_fails_without_host_props(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ImagePropertiesFilter']()
img_props = {'properties': {'architecture': 'x86_64',
'hypervisor_type': 'kvm',
'vm_mode': 'hvm'}}
filter_properties = {'request_spec': {'image': img_props}}
hypervisor_version = utils.convert_version_to_int('6.0.0')
capabilities = {'enabled': True,
'hypervisor_version': hypervisor_version}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_image_properties_filter_passes_without_hyper_version(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ImagePropertiesFilter']()
img_props = {'properties': {'architecture': 'x86_64',
'hypervisor_type': 'kvm',
'vm_mode': 'hvm',
'hypervisor_version_requires': '>=6.0'}}
filter_properties = {'request_spec': {'image': img_props}}
capabilities = {'enabled': True,
'supported_instances': [('x86_64', 'kvm', 'hvm')]}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_image_properties_filter_fails_with_unsupported_hyper_ver(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ImagePropertiesFilter']()
img_props = {'properties': {'architecture': 'x86_64',
'hypervisor_type': 'kvm',
'vm_mode': 'hvm',
'hypervisor_version_requires': '>=6.0'}}
filter_properties = {'request_spec': {'image': img_props}}
capabilities = {'enabled': True,
'supported_instances': [('x86_64', 'kvm', 'hvm')],
'hypervisor_version': 5000}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_image_properties_filter_pv_mode_compat(self):
# if an old image has 'pv' for a vm_mode it should be treated as xen
self._stub_service_is_up(True)
filt_cls = self.class_map['ImagePropertiesFilter']()
img_props = {'properties': {'vm_mode': 'pv'}}
filter_properties = {'request_spec': {'image': img_props}}
hypervisor_version = utils.convert_version_to_int('6.0.0')
capabilities = {'supported_instances': [('x86_64', 'xapi', 'xen')],
'hypervisor_version': hypervisor_version}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_image_properties_filter_hvm_mode_compat(self):
# if an old image has 'hv' for a vm_mode it should be treated as xen
self._stub_service_is_up(True)
filt_cls = self.class_map['ImagePropertiesFilter']()
img_props = {'properties': {'vm_mode': 'hv'}}
filter_properties = {'request_spec': {'image': img_props}}
hypervisor_version = utils.convert_version_to_int('6.0.0')
capabilities = {'supported_instances': [('x86_64', 'kvm', 'hvm')],
'hypervisor_version': hypervisor_version}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def _do_test_compute_filter_extra_specs(self, ecaps, especs, passes):
"""In real OpenStack runtime environment,compute capabilities
value may be number, so we should use number to do unit test.
"""
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeCapabilitiesFilter']()
capabilities = {}
capabilities.update(ecaps)
service = {'disabled': False}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': especs}}
host_state = {'free_ram_mb': 1024, 'service': service}
host_state.update(capabilities)
host = fakes.FakeHostState('host1', 'node1', host_state)
assertion = self.assertTrue if passes else self.assertFalse
assertion(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_pass_cpu_info_as_text_type(self):
cpu_info = """ { "vendor": "Intel", "model": "core2duo",
"arch": "i686","features": ["lahf_lm", "rdtscp"], "topology":
{"cores": 1, "threads":1, "sockets": 1}} """
cpu_info = six.text_type(cpu_info)
self._do_test_compute_filter_extra_specs(
ecaps={'cpu_info': cpu_info},
especs={'capabilities:cpu_info:vendor': 'Intel'},
passes=True)
def test_compute_filter_fail_cpu_info_as_text_type_not_valid(self):
cpu_info = "cpu_info"
cpu_info = six.text_type(cpu_info)
self._do_test_compute_filter_extra_specs(
ecaps={'cpu_info': cpu_info},
especs={'capabilities:cpu_info:vendor': 'Intel'},
passes=False)
def test_compute_filter_passes_extra_specs_simple(self):
self._do_test_compute_filter_extra_specs(
ecaps={'stats': {'opt1': 1, 'opt2': 2}},
especs={'opt1': '1', 'opt2': '2', 'trust:trusted_host': 'true'},
passes=True)
def test_compute_filter_fails_extra_specs_simple(self):
self._do_test_compute_filter_extra_specs(
ecaps={'stats': {'opt1': 1, 'opt2': 2}},
especs={'opt1': '1', 'opt2': '222', 'trust:trusted_host': 'true'},
passes=False)
def test_compute_filter_pass_extra_specs_simple_with_scope(self):
self._do_test_compute_filter_extra_specs(
ecaps={'stats': {'opt1': 1, 'opt2': 2}},
especs={'capabilities:opt1': '1',
'trust:trusted_host': 'true'},
passes=True)
def test_compute_filter_pass_extra_specs_same_as_scope(self):
# Make sure this still works even if the key is the same as the scope
self._do_test_compute_filter_extra_specs(
ecaps={'capabilities': 1},
especs={'capabilities': '1'},
passes=True)
def test_compute_filter_extra_specs_simple_with_wrong_scope(self):
self._do_test_compute_filter_extra_specs(
ecaps={'opt1': 1, 'opt2': 2},
especs={'wrong_scope:opt1': '1',
'trust:trusted_host': 'true'},
passes=True)
def test_compute_filter_extra_specs_pass_multi_level_with_scope(self):
self._do_test_compute_filter_extra_specs(
ecaps={'stats': {'opt1': {'a': 1, 'b': {'aa': 2}}, 'opt2': 2}},
especs={'opt1:a': '1', 'capabilities:opt1:b:aa': '2',
'trust:trusted_host': 'true'},
passes=True)
def test_aggregate_filter_passes_no_extra_specs(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['AggregateInstanceExtraSpecsFilter']()
capabilities = {'opt1': 1, 'opt2': 2}
filter_properties = {'context': self.context, 'instance_type':
{'memory_mb': 1024}}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def _create_aggregate_with_host(self, name='fake_aggregate',
metadata=None,
hosts=['host1']):
values = {'name': name}
if metadata:
metadata['availability_zone'] = 'fake_avail_zone'
else:
metadata = {'availability_zone': 'fake_avail_zone'}
result = db.aggregate_create(self.context.elevated(), values, metadata)
for host in hosts:
db.aggregate_host_add(self.context.elevated(), result['id'], host)
return result
def _do_test_aggregate_filter_extra_specs(self, emeta, especs, passes):
self._stub_service_is_up(True)
filt_cls = self.class_map['AggregateInstanceExtraSpecsFilter']()
self._create_aggregate_with_host(name='fake2', metadata=emeta)
filter_properties = {'context': self.context,
'instance_type': {'memory_mb': 1024, 'extra_specs': especs}}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024})
assertion = self.assertTrue if passes else self.assertFalse
assertion(filt_cls.host_passes(host, filter_properties))
def test_aggregate_filter_fails_extra_specs_deleted_host(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['AggregateInstanceExtraSpecsFilter']()
extra_specs = {'opt1': 's== 1', 'opt2': 's== 2',
'trust:trusted_host': 'true'}
self._create_aggregate_with_host(metadata={'opt1': '1'})
agg2 = self._create_aggregate_with_host(name='fake2',
metadata={'opt2': '2'})
filter_properties = {'context': self.context, 'instance_type':
{'memory_mb': 1024, 'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024})
db.aggregate_host_delete(self.context.elevated(), agg2['id'], 'host1')
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_aggregate_filter_passes_extra_specs_simple(self):
especs = {
# Un-scoped extra spec
'opt1': '1',
# Scoped extra spec that applies to this filter
'aggregate_instance_extra_specs:opt2': '2',
# Scoped extra spec that does not apply to this filter
'trust:trusted_host': 'true',
}
self._do_test_aggregate_filter_extra_specs(
emeta={'opt1': '1', 'opt2': '2'}, especs=especs, passes=True)
def test_aggregate_filter_passes_with_key_same_as_scope(self):
especs = {
# Un-scoped extra spec, make sure we don't blow up if it
# happens to match our scope.
'aggregate_instance_extra_specs': '1',
}
self._do_test_aggregate_filter_extra_specs(
emeta={'aggregate_instance_extra_specs': '1'},
especs=especs, passes=True)
def test_aggregate_filter_fails_extra_specs_simple(self):
self._do_test_aggregate_filter_extra_specs(
emeta={'opt1': '1', 'opt2': '2'},
especs={'opt1': '1', 'opt2': '222',
'trust:trusted_host': 'true'},
passes=False)
def _do_test_isolated_hosts(self, host_in_list, image_in_list,
set_flags=True,
restrict_isolated_hosts_to_isolated_images=True):
if set_flags:
self.flags(isolated_images=['isolated_image'],
isolated_hosts=['isolated_host'],
restrict_isolated_hosts_to_isolated_images=
restrict_isolated_hosts_to_isolated_images)
host_name = 'isolated_host' if host_in_list else 'free_host'
image_ref = 'isolated_image' if image_in_list else 'free_image'
filter_properties = {
'request_spec': {
'instance_properties': {'image_ref': image_ref}
}
}
filt_cls = self.class_map['IsolatedHostsFilter']()
host = fakes.FakeHostState(host_name, 'node', {})
return filt_cls.host_passes(host, filter_properties)
def test_isolated_hosts_fails_isolated_on_non_isolated(self):
self.assertFalse(self._do_test_isolated_hosts(False, True))
def test_isolated_hosts_fails_non_isolated_on_isolated(self):
self.assertFalse(self._do_test_isolated_hosts(True, False))
def test_isolated_hosts_passes_isolated_on_isolated(self):
self.assertTrue(self._do_test_isolated_hosts(True, True))
def test_isolated_hosts_passes_non_isolated_on_non_isolated(self):
self.assertTrue(self._do_test_isolated_hosts(False, False))
def test_isolated_hosts_no_config(self):
# If there are no hosts nor isolated images in the config, it should
# not filter at all. This is the default config.
self.assertTrue(self._do_test_isolated_hosts(False, True, False))
self.assertTrue(self._do_test_isolated_hosts(True, False, False))
self.assertTrue(self._do_test_isolated_hosts(True, True, False))
self.assertTrue(self._do_test_isolated_hosts(False, False, False))
def test_isolated_hosts_no_hosts_config(self):
self.flags(isolated_images=['isolated_image'])
# If there are no hosts in the config, it should only filter out
# images that are listed
self.assertFalse(self._do_test_isolated_hosts(False, True, False))
self.assertTrue(self._do_test_isolated_hosts(True, False, False))
self.assertFalse(self._do_test_isolated_hosts(True, True, False))
self.assertTrue(self._do_test_isolated_hosts(False, False, False))
def test_isolated_hosts_no_images_config(self):
self.flags(isolated_hosts=['isolated_host'])
# If there are no images in the config, it should only filter out
# isolated_hosts
self.assertTrue(self._do_test_isolated_hosts(False, True, False))
self.assertFalse(self._do_test_isolated_hosts(True, False, False))
self.assertFalse(self._do_test_isolated_hosts(True, True, False))
self.assertTrue(self._do_test_isolated_hosts(False, False, False))
def test_isolated_hosts_less_restrictive(self):
# If there are isolated hosts and non isolated images
self.assertTrue(self._do_test_isolated_hosts(True, False, True, False))
# If there are isolated hosts and isolated images
self.assertTrue(self._do_test_isolated_hosts(True, True, True, False))
# If there are non isolated hosts and non isolated images
self.assertTrue(self._do_test_isolated_hosts(False, False, True,
False))
# If there are non isolated hosts and isolated images
self.assertFalse(self._do_test_isolated_hosts(False, True, True,
False))
def test_json_filter_passes(self):
filt_cls = self.class_map['JsonFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024,
'free_disk_mb': 200 * 1024})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_json_filter_passes_with_no_query(self):
filt_cls = self.class_map['JsonFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0}}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 0,
'free_disk_mb': 0})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_json_filter_fails_on_memory(self):
filt_cls = self.class_map['JsonFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1023,
'free_disk_mb': 200 * 1024})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_fails_on_disk(self):
filt_cls = self.class_map['JsonFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024,
'free_disk_mb': (200 * 1024) - 1})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_fails_on_service_disabled(self):
filt_cls = self.class_map['JsonFilter']()
json_query = jsonutils.dumps(
['and', ['>=', '$free_ram_mb', 1024],
['>=', '$free_disk_mb', 200 * 1024],
['not', '$service.disabled']])
filter_properties = {'instance_type': {'memory_mb': 1024,
'local_gb': 200},
'scheduler_hints': {'query': json_query}}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024,
'free_disk_mb': 200 * 1024})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_happy_day(self):
# Test json filter more thoroughly.
filt_cls = self.class_map['JsonFilter']()
raw = ['and',
'$capabilities.enabled',
['=', '$capabilities.opt1', 'match'],
['or',
['and',
['<', '$free_ram_mb', 30],
['<', '$free_disk_mb', 300]],
['and',
['>', '$free_ram_mb', 30],
['>', '$free_disk_mb', 300]]]]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
# Passes
capabilities = {'opt1': 'match'}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 10,
'free_disk_mb': 200,
'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
# Passes
capabilities = {'opt1': 'match'}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 40,
'free_disk_mb': 400,
'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
# Fails due to capabilities being disabled
capabilities = {'enabled': False, 'opt1': 'match'}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 40,
'free_disk_mb': 400,
'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
# Fails due to being exact memory/disk we don't want
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 30,
'free_disk_mb': 300,
'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
# Fails due to memory lower but disk higher
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 20,
'free_disk_mb': 400,
'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
# Fails due to capabilities 'opt1' not equal
capabilities = {'enabled': True, 'opt1': 'no-match'}
service = {'enabled': True}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 20,
'free_disk_mb': 400,
'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_basic_operators(self):
filt_cls = self.class_map['JsonFilter']()
host = fakes.FakeHostState('host1', 'node1',
{})
# (operator, arguments, expected_result)
ops_to_test = [
['=', [1, 1], True],
['=', [1, 2], False],
['<', [1, 2], True],
['<', [1, 1], False],
['<', [2, 1], False],
['>', [2, 1], True],
['>', [2, 2], False],
['>', [2, 3], False],
['<=', [1, 2], True],
['<=', [1, 1], True],
['<=', [2, 1], False],
['>=', [2, 1], True],
['>=', [2, 2], True],
['>=', [2, 3], False],
['in', [1, 1], True],
['in', [1, 1, 2, 3], True],
['in', [4, 1, 2, 3], False],
['not', [True], False],
['not', [False], True],
['or', [True, False], True],
['or', [False, False], False],
['and', [True, True], True],
['and', [False, False], False],
['and', [True, False], False],
# Nested ((True or False) and (2 > 1)) == Passes
['and', [['or', True, False], ['>', 2, 1]], True]]
for (op, args, expected) in ops_to_test:
raw = [op] + args
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertEqual(expected,
filt_cls.host_passes(host, filter_properties))
# This results in [False, True, False, True] and if any are True
# then it passes...
raw = ['not', True, False, True, False]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
# This results in [False, False, False] and if any are True
# then it passes...which this doesn't
raw = ['not', True, True, True]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_unknown_operator_raises(self):
filt_cls = self.class_map['JsonFilter']()
raw = ['!=', 1, 2]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
host = fakes.FakeHostState('host1', 'node1',
{})
self.assertRaises(KeyError,
filt_cls.host_passes, host, filter_properties)
def test_json_filter_empty_filters_pass(self):
filt_cls = self.class_map['JsonFilter']()
host = fakes.FakeHostState('host1', 'node1',
{})
raw = []
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
raw = {}
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_json_filter_invalid_num_arguments_fails(self):
filt_cls = self.class_map['JsonFilter']()
host = fakes.FakeHostState('host1', 'node1',
{})
raw = ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
raw = ['>', 1]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_unknown_variable_ignored(self):
filt_cls = self.class_map['JsonFilter']()
host = fakes.FakeHostState('host1', 'node1',
{})
raw = ['=', '$........', 1, 1]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
raw = ['=', '$foo', 2, 2]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_default_passes(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['TrustedFilter']()
filter_properties = {'context': self.context.elevated(),
'instance_type': {'memory_mb': 1024}}
host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_trusted_and_trusted_passes(self):
self.oat_data = {"hosts": [{"host_name": "node1",
"trust_lvl": "trusted",
"vtime": timeutils.isotime()}]}
self._stub_service_is_up(True)
filt_cls = self.class_map['TrustedFilter']()
extra_specs = {'trust:trusted_host': 'trusted'}
filter_properties = {'context': self.context.elevated(),
'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_trusted_and_untrusted_fails(self):
self.oat_data = {"hosts": [{"host_name": "node1",
"trust_lvl": "untrusted",
"vtime": timeutils.isotime()}]}
self._stub_service_is_up(True)
filt_cls = self.class_map['TrustedFilter']()
extra_specs = {'trust:trusted_host': 'trusted'}
filter_properties = {'context': self.context.elevated(),
'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'node1', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_untrusted_and_trusted_fails(self):
self.oat_data = {"hosts": [{"host_name": "node",
"trust_lvl": "trusted",
"vtime": timeutils.isotime()}]}
self._stub_service_is_up(True)
filt_cls = self.class_map['TrustedFilter']()
extra_specs = {'trust:trusted_host': 'untrusted'}
filter_properties = {'context': self.context.elevated(),
'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'node1', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_untrusted_and_untrusted_passes(self):
self.oat_data = {"hosts": [{"host_name": "node1",
"trust_lvl": "untrusted",
"vtime": timeutils.isotime()}]}
self._stub_service_is_up(True)
filt_cls = self.class_map['TrustedFilter']()
extra_specs = {'trust:trusted_host': 'untrusted'}
filter_properties = {'context': self.context.elevated(),
'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_update_cache(self):
self.oat_data = {"hosts": [{"host_name": "node1",
"trust_lvl": "untrusted",
"vtime": timeutils.isotime()}]}
filt_cls = self.class_map['TrustedFilter']()
extra_specs = {'trust:trusted_host': 'untrusted'}
filter_properties = {'context': self.context.elevated(),
'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'node1', {})
filt_cls.host_passes(host, filter_properties) # Fill the caches
self.oat_attested = False
filt_cls.host_passes(host, filter_properties)
self.assertFalse(self.oat_attested)
self.oat_attested = False
timeutils.set_time_override(timeutils.utcnow())
timeutils.advance_time_seconds(
CONF.trusted_computing.attestation_auth_timeout + 80)
filt_cls.host_passes(host, filter_properties)
self.assertTrue(self.oat_attested)
timeutils.clear_time_override()
def test_trusted_filter_update_cache_timezone(self):
self.oat_data = {"hosts": [{"host_name": "node1",
"trust_lvl": "untrusted",
"vtime": "2012-09-09T05:10:40-04:00"}]}
filt_cls = self.class_map['TrustedFilter']()
extra_specs = {'trust:trusted_host': 'untrusted'}
filter_properties = {'context': self.context.elevated(),
'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'node1', {})
timeutils.set_time_override(
timeutils.normalize_time(
timeutils.parse_isotime("2012-09-09T09:10:40Z")))
filt_cls.host_passes(host, filter_properties) # Fill the caches
self.oat_attested = False
filt_cls.host_passes(host, filter_properties)
self.assertFalse(self.oat_attested)
self.oat_attested = False
timeutils.advance_time_seconds(
CONF.trusted_computing.attestation_auth_timeout - 10)
filt_cls.host_passes(host, filter_properties)
self.assertFalse(self.oat_attested)
timeutils.clear_time_override()
@mock.patch('nova.db.compute_node_get_all')
def test_trusted_filter_combine_hosts(self, mockdb):
self.oat_data = {"hosts": [{"host_name": "node1",
"trust_lvl": "untrusted",
"vtime": "2012-09-09T05:10:40-04:00"}]}
fake_compute_nodes = [
{'hypervisor_hostname': 'node1',
'service': {'host': 'host1'},
},
{'hypervisor_hostname': 'node2',
'service': {'host': 'host2'},
}, ]
mockdb.return_value = fake_compute_nodes
filt_cls = self.class_map['TrustedFilter']()
extra_specs = {'trust:trusted_host': 'trusted'}
filter_properties = {'context': self.context.elevated(),
'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'node1', {})
filt_cls.host_passes(host, filter_properties) # Fill the caches
self.assertEqual(set(self.oat_hosts), set(['node1', 'node2']))
def test_core_filter_passes(self):
filt_cls = self.class_map['CoreFilter']()
filter_properties = {'instance_type': {'vcpus': 1}}
self.flags(cpu_allocation_ratio=2)
host = fakes.FakeHostState('host1', 'node1',
{'vcpus_total': 4, 'vcpus_used': 7})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_core_filter_fails_safe(self):
filt_cls = self.class_map['CoreFilter']()
filter_properties = {'instance_type': {'vcpus': 1}}
host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_core_filter_fails(self):
filt_cls = self.class_map['CoreFilter']()
filter_properties = {'instance_type': {'vcpus': 1}}
self.flags(cpu_allocation_ratio=2)
host = fakes.FakeHostState('host1', 'node1',
{'vcpus_total': 4, 'vcpus_used': 8})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_aggregate_core_filter_value_error(self):
filt_cls = self.class_map['AggregateCoreFilter']()
filter_properties = {'context': self.context,
'instance_type': {'vcpus': 1}}
self.flags(cpu_allocation_ratio=2)
host = fakes.FakeHostState('host1', 'node1',
{'vcpus_total': 4, 'vcpus_used': 7})
self._create_aggregate_with_host(name='fake_aggregate',
hosts=['host1'],
metadata={'cpu_allocation_ratio': 'XXX'})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
self.assertEqual(4 * 2, host.limits['vcpu'])
def test_aggregate_core_filter_default_value(self):
filt_cls = self.class_map['AggregateCoreFilter']()
filter_properties = {'context': self.context,
'instance_type': {'vcpus': 1}}
self.flags(cpu_allocation_ratio=2)
host = fakes.FakeHostState('host1', 'node1',
{'vcpus_total': 4, 'vcpus_used': 8})
# False: fallback to default flag w/o aggregates
self.assertFalse(filt_cls.host_passes(host, filter_properties))
self._create_aggregate_with_host(name='fake_aggregate',
hosts=['host1'],
metadata={'cpu_allocation_ratio': '3'})
# True: use ratio from aggregates
self.assertTrue(filt_cls.host_passes(host, filter_properties))
self.assertEqual(4 * 3, host.limits['vcpu'])
def test_aggregate_core_filter_conflict_values(self):
filt_cls = self.class_map['AggregateCoreFilter']()
filter_properties = {'context': self.context,
'instance_type': {'vcpus': 1}}
self.flags(cpu_allocation_ratio=1)
host = fakes.FakeHostState('host1', 'node1',
{'vcpus_total': 4, 'vcpus_used': 8})
self._create_aggregate_with_host(name='fake_aggregate1',
hosts=['host1'],
metadata={'cpu_allocation_ratio': '2'})
self._create_aggregate_with_host(name='fake_aggregate2',
hosts=['host1'],
metadata={'cpu_allocation_ratio': '3'})
# use the minimum ratio from aggregates
self.assertFalse(filt_cls.host_passes(host, filter_properties))
self.assertEqual(4 * 2, host.limits['vcpu'])
@staticmethod
def _make_zone_request(zone, is_admin=False):
ctxt = context.RequestContext('fake', 'fake', is_admin=is_admin)
return {
'context': ctxt,
'request_spec': {
'instance_properties': {
'availability_zone': zone
}
}
}
def test_availability_zone_filter_same(self):
filt_cls = self.class_map['AvailabilityZoneFilter']()
service = {'availability_zone': 'nova'}
request = self._make_zone_request('nova')
host = fakes.FakeHostState('host1', 'node1',
{'service': service})
self.assertTrue(filt_cls.host_passes(host, request))
def test_availability_zone_filter_different(self):
filt_cls = self.class_map['AvailabilityZoneFilter']()
service = {'availability_zone': 'nova'}
request = self._make_zone_request('bad')
host = fakes.FakeHostState('host1', 'node1',
{'service': service})
self.assertFalse(filt_cls.host_passes(host, request))
def test_retry_filter_disabled(self):
# Test case where retry/re-scheduling is disabled.
filt_cls = self.class_map['RetryFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_retry_filter_pass(self):
# Node not previously tried.
filt_cls = self.class_map['RetryFilter']()
host = fakes.FakeHostState('host1', 'nodeX', {})
retry = dict(num_attempts=2,
hosts=[['host1', 'node1'], # same host, different node
['host2', 'node2'], # different host and node
])
filter_properties = dict(retry=retry)
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_retry_filter_fail(self):
# Node was already tried.
filt_cls = self.class_map['RetryFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
retry = dict(num_attempts=1,
hosts=[['host1', 'node1']])
filter_properties = dict(retry=retry)
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_filter_num_iops_passes(self):
self.flags(max_io_ops_per_host=8)
filt_cls = self.class_map['IoOpsFilter']()
host = fakes.FakeHostState('host1', 'node1',
{'num_io_ops': 7})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_filter_num_iops_fails(self):
self.flags(max_io_ops_per_host=8)
filt_cls = self.class_map['IoOpsFilter']()
host = fakes.FakeHostState('host1', 'node1',
{'num_io_ops': 8})
filter_properties = {}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_filter_num_instances_passes(self):
self.flags(max_instances_per_host=5)
filt_cls = self.class_map['NumInstancesFilter']()
host = fakes.FakeHostState('host1', 'node1',
{'num_instances': 4})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_filter_num_instances_fails(self):
self.flags(max_instances_per_host=5)
filt_cls = self.class_map['NumInstancesFilter']()
host = fakes.FakeHostState('host1', 'node1',
{'num_instances': 5})
filter_properties = {}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def _test_group_anti_affinity_filter_passes(self, cls, policy):
filt_cls = self.class_map[cls]()
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
filter_properties = {'group_policies': ['affinity']}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
filter_properties = {'group_policies': [policy]}
filter_properties['group_hosts'] = []
self.assertTrue(filt_cls.host_passes(host, filter_properties))
filter_properties['group_hosts'] = ['host2']
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_group_anti_affinity_filter_passes(self):
self._test_group_anti_affinity_filter_passes(
'ServerGroupAntiAffinityFilter', 'anti-affinity')
def test_group_anti_affinity_filter_passes_legacy(self):
self._test_group_anti_affinity_filter_passes(
'GroupAntiAffinityFilter', 'legacy')
def _test_group_anti_affinity_filter_fails(self, cls, policy):
filt_cls = self.class_map[cls]()
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {'group_policies': [policy],
'group_hosts': ['host1']}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_group_anti_affinity_filter_fails(self):
self._test_group_anti_affinity_filter_fails(
'ServerGroupAntiAffinityFilter', 'anti-affinity')
def test_group_anti_affinity_filter_fails_legacy(self):
self._test_group_anti_affinity_filter_fails(
'GroupAntiAffinityFilter', 'legacy')
def _test_group_affinity_filter_passes(self, cls, policy):
filt_cls = self.class_map['ServerGroupAffinityFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
filter_properties = {'group_policies': ['anti-affinity']}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
filter_properties = {'group_policies': ['affinity'],
'group_hosts': ['host1']}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_group_affinity_filter_passes(self):
self._test_group_affinity_filter_passes(
'ServerGroupAffinityFilter', 'affinity')
def test_group_affinity_filter_passes_legacy(self):
self._test_group_affinity_filter_passes(
'GroupAffinityFilter', 'legacy')
def _test_group_affinity_filter_fails(self, cls, policy):
filt_cls = self.class_map[cls]()
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {'group_policies': [policy],
'group_hosts': ['host2']}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_group_affinity_filter_fails(self):
self._test_group_affinity_filter_fails(
'ServerGroupAffinityFilter', 'affinity')
def test_group_affinity_filter_fails_legacy(self):
self._test_group_affinity_filter_fails(
'GroupAffinityFilter', 'legacy')
def test_aggregate_multi_tenancy_isolation_with_meta_passes(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['AggregateMultiTenancyIsolation']()
aggr_meta = {'filter_tenant_id': 'my_tenantid'}
self._create_aggregate_with_host(name='fake1', metadata=aggr_meta,
hosts=['host1'])
filter_properties = {'context': self.context,
'request_spec': {
'instance_properties': {
'project_id': 'my_tenantid'}}}
host = fakes.FakeHostState('host1', 'compute', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_aggregate_multi_tenancy_isolation_fails(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['AggregateMultiTenancyIsolation']()
aggr_meta = {'filter_tenant_id': 'other_tenantid'}
self._create_aggregate_with_host(name='fake1', metadata=aggr_meta,
hosts=['host1'])
filter_properties = {'context': self.context,
'request_spec': {
'instance_properties': {
'project_id': 'my_tenantid'}}}
host = fakes.FakeHostState('host1', 'compute', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_aggregate_multi_tenancy_isolation_no_meta_passes(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['AggregateMultiTenancyIsolation']()
aggr_meta = {}
self._create_aggregate_with_host(name='fake1', metadata=aggr_meta,
hosts=['host1'])
filter_properties = {'context': self.context,
'request_spec': {
'instance_properties': {
'project_id': 'my_tenantid'}}}
host = fakes.FakeHostState('host1', 'compute', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def _fake_pci_support_requests(self, pci_requests):
self.pci_requests = pci_requests
return self.pci_request_result
def test_pci_passthrough_pass(self):
filt_cls = self.class_map['PciPassthroughFilter']()
requests = [{'count': 1, 'spec': [{'vendor_id': '8086'}]}]
filter_properties = {'pci_requests': requests}
self.stubs.Set(pci_stats.PciDeviceStats, 'support_requests',
self._fake_pci_support_requests)
host = fakes.FakeHostState(
'host1', 'node1',
attribute_dict={'pci_stats': pci_stats.PciDeviceStats()})
self.pci_request_result = True
self.assertTrue(filt_cls.host_passes(host, filter_properties))
self.assertEqual(self.pci_requests, requests)
def test_pci_passthrough_fail(self):
filt_cls = self.class_map['PciPassthroughFilter']()
requests = [{'count': 1, 'spec': [{'vendor_id': '8086'}]}]
filter_properties = {'pci_requests': requests}
self.stubs.Set(pci_stats.PciDeviceStats, 'support_requests',
self._fake_pci_support_requests)
host = fakes.FakeHostState(
'host1', 'node1',
attribute_dict={'pci_stats': pci_stats.PciDeviceStats()})
self.pci_request_result = False
self.assertFalse(filt_cls.host_passes(host, filter_properties))
self.assertEqual(self.pci_requests, requests)
def test_pci_passthrough_no_pci_request(self):
filt_cls = self.class_map['PciPassthroughFilter']()
filter_properties = {}
host = fakes.FakeHostState('h1', 'n1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_pci_passthrough_comopute_stats(self):
filt_cls = self.class_map['PciPassthroughFilter']()
requests = [{'count': 1, 'spec': [{'vendor_id': '8086'}]}]
filter_properties = {'pci_requests': requests}
self.stubs.Set(pci_stats.PciDeviceStats, 'support_requests',
self._fake_pci_support_requests)
host = fakes.FakeHostState(
'host1', 'node1',
attribute_dict={})
self.pci_request_result = True
self.assertRaises(AttributeError, filt_cls.host_passes,
host, filter_properties)
def test_aggregate_image_properties_isolation_passes(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['AggregateImagePropertiesIsolation']()
aggr_meta = {'foo': 'bar'}
self._create_aggregate_with_host(name='fake1',
metadata=aggr_meta,
hosts=['host1'])
filter_properties = {'context': self.context,
'request_spec': {
'image': {
'properties': {'foo': 'bar'}}}}
host = fakes.FakeHostState('host1', 'compute', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_aggregate_image_properties_isolation_multi_props_passes(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['AggregateImagePropertiesIsolation']()
aggr_meta = {'foo': 'bar', 'foo2': 'bar2'}
self._create_aggregate_with_host(name='fake1',
metadata=aggr_meta,
hosts=['host1'])
filter_properties = {'context': self.context,
'request_spec': {
'image': {
'properties': {'foo': 'bar',
'foo2': 'bar2'}}}}
host = fakes.FakeHostState('host1', 'compute', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_aggregate_image_properties_isolation_props_with_meta_passes(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['AggregateImagePropertiesIsolation']()
aggr_meta = {'foo': 'bar'}
self._create_aggregate_with_host(name='fake1',
metadata=aggr_meta,
hosts=['host1'])
filter_properties = {'context': self.context,
'request_spec': {
'image': {
'properties': {}}}}
host = fakes.FakeHostState('host1', 'compute', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_aggregate_image_properties_isolation_props_imgprops_passes(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['AggregateImagePropertiesIsolation']()
aggr_meta = {}
self._create_aggregate_with_host(name='fake1',
metadata=aggr_meta,
hosts=['host1'])
filter_properties = {'context': self.context,
'request_spec': {
'image': {
'properties': {'foo': 'bar'}}}}
host = fakes.FakeHostState('host1', 'compute', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_aggregate_image_properties_isolation_props_not_match_fails(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['AggregateImagePropertiesIsolation']()
aggr_meta = {'foo': 'bar'}
self._create_aggregate_with_host(name='fake1',
metadata=aggr_meta,
hosts=['host1'])
filter_properties = {'context': self.context,
'request_spec': {
'image': {
'properties': {'foo': 'no-bar'}}}}
host = fakes.FakeHostState('host1', 'compute', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_aggregate_image_properties_isolation_props_not_match2_fails(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['AggregateImagePropertiesIsolation']()
aggr_meta = {'foo': 'bar', 'foo2': 'bar2'}
self._create_aggregate_with_host(name='fake1',
metadata=aggr_meta,
hosts=['host1'])
filter_properties = {'context': self.context,
'request_spec': {
'image': {
'properties': {'foo': 'bar',
'foo2': 'bar3'}}}}
host = fakes.FakeHostState('host1', 'compute', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_aggregate_image_properties_isolation_props_namespace(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['AggregateImagePropertiesIsolation']()
self.flags(aggregate_image_properties_isolation_namespace="np")
aggr_meta = {'np.foo': 'bar', 'foo2': 'bar2'}
self._create_aggregate_with_host(name='fake1',
metadata=aggr_meta,
hosts=['host1'])
filter_properties = {'context': self.context,
'request_spec': {
'image': {
'properties': {'np.foo': 'bar',
'foo2': 'bar3'}}}}
host = fakes.FakeHostState('host1', 'compute', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_metrics_filter_pass(self):
self.flags(weight_setting=['foo=1', 'bar=2'], group='metrics')
metrics = dict(foo=1, bar=2)
host = fakes.FakeHostState('host1', 'node1',
attribute_dict={'metrics': metrics})
filt_cls = self.class_map['MetricsFilter']()
self.assertTrue(filt_cls.host_passes(host, None))
def test_metrics_filter_missing_metrics(self):
self.flags(weight_setting=['foo=1', 'bar=2'], group='metrics')
metrics = dict(foo=1)
host = fakes.FakeHostState('host1', 'node1',
attribute_dict={'metrics': metrics})
filt_cls = self.class_map['MetricsFilter']()
self.assertFalse(filt_cls.host_passes(host, None))
| |
__author__ = "Christian Kongsgaard"
__license__ = "MIT"
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules:
import os
import yaml
import numpy as np
# RiBuild Modules:
from delphin_6_automation.database_interactions.db_templates import delphin_entry as delphin_db, delphin_entry
from delphin_6_automation.database_interactions.db_templates import result_raw_entry as result_db
from delphin_6_automation.database_interactions.db_templates import weather_entry as weather_db
from delphin_6_automation.database_interactions.db_templates import material_entry as material_db
from delphin_6_automation.database_interactions import delphin_interactions
from delphin_6_automation.database_interactions import material_interactions
from delphin_6_automation.database_interactions import weather_interactions
from delphin_6_automation.logging.ribuild_logger import ribuild_logger
# Logger
logger = ribuild_logger()
# -------------------------------------------------------------------------------------------------------------------- #
# MATERIAL INTERACTIONS
def download_raw_result(result_id: str, download_path: str) -> bool:
"""
Downloads a result entry from the database.rst.
:param result_id: Database entry id
:param download_path: Path where the result should be written
:return: True
"""
result_obj = result_db.Result.objects(id=result_id).first()
download_path = os.path.join(download_path, str(result_id))
if not os.path.exists(download_path):
os.mkdir(download_path)
# delphin_parser.write_log_files(result_obj, download_path)
delphin_interactions.download_result_files(result_obj, download_path)
return True
def queue_priorities(priority: str) -> int:
"""
Generate a queue priority number.
:param priority: High, medium or low priority
:return: Priority number
"""
priority_list = [obj.queue_priority
for obj in delphin_db.Delphin.objects.order_by('queue_priority')]
if not priority_list:
return 1
else:
min_priority = min(priority_list)
max_priority = max(priority_list)
span = max_priority - min_priority
if priority == 'high':
priority_number = int(max_priority)
elif priority == 'medium':
priority_number = int(span + 0.5 * min_priority)
elif priority == 'low':
priority_number = int(span + 0.25 * min_priority)
else:
raise ValueError('priority has to be: high, medium or low. Value given was: ' + str(priority))
return priority_number
def add_to_simulation_queue(delphin_file: str, priority: str) -> str:
"""
Uploads and adds a Delphin project file to the simulation queue.
:param delphin_file: Delphin 6 project file path
:param priority: High, medium or low priority
:return: Database entry id
"""
priority_number = queue_priorities(priority)
simulation_id = delphin_interactions.upload_delphin_to_database(delphin_file, priority_number)
logger.debug(f'Added Delphin project with ID: {simulation_id} to queue with priority: {priority}')
return simulation_id
def is_simulation_finished(sim_id: str) -> bool:
"""
Checks if a Delphin project entry is simulated or not.
:param sim_id: Database entry to check
:return: True if it is simulated otherwise returns False.
"""
object_ = delphin_db.Delphin.objects(id=sim_id).first()
if object_.simulated:
logger.debug(f'Delphin project with ID: {sim_id} is finished simulating')
return True
else:
logger.debug(f'Delphin project with ID: {sim_id} is not finished simulating')
return False
def list_finished_simulations() -> list:
"""Returns a list with Delphin entry ID's for simulated entries."""
finished_list = [document.id
for document in delphin_db.Delphin.objects()
if document.simulated]
return finished_list
def download_full_project_from_database(document_id: str, folder: str) -> bool:
"""
Downloads a Delphin project file from the database.rst with all of its materials and weather.
:param document_id: Database entry id
:param folder: Path where the files should be written.
:return: True
"""
delphin_document = delphin_db.Delphin.objects(id=document_id).first()
material_interactions.download_materials(delphin_document, os.path.join(folder, 'materials'))
weather_interactions.download_weather(delphin_document, os.path.join(folder, 'weather'))
delphin_interactions.download_delphin_entry(delphin_document, folder)
if delphin_document.restart_data:
delphin_interactions.download_restart_data(delphin_document, os.path.join(folder, document_id))
logger.debug(f'Download Delphin project with ID: {document_id} from database with weather and materials.')
return True
def list_weather_stations() -> dict:
"""List the weather stations currently in database"""
weather_stations = dict()
for document in weather_db.Weather.objects():
if document.location_name in weather_stations.keys():
weather_stations[document.location_name]['years'].append(document.year)
else:
weather_stations[str(document.location_name)] = dict()
weather_stations[str(document.location_name)]['location'] = document.location
weather_stations[str(document.location_name)]['years'] = [document.year, ]
return weather_stations
def print_weather_stations_dict(weather_station_dict):
for key in weather_station_dict.keys():
print(f'Weather Station: {key} at location: {weather_station_dict[key]["location"]} contains '
f'{len(weather_station_dict[key]["years"])} years.\n'
f'\t The years are: {weather_station_dict[key]["years"]}')
def list_materials():
"""List materials currently in the database"""
materials = dict()
for document in material_db.Material.objects():
materials[str(document.material_name)] = dict()
materials[str(document.material_name)]['material_id'] = document.material_id
materials[str(document.material_name)]['database_id'] = document.id
return materials
def print_material_dict(materials):
for key in materials.keys():
print(f'Material:\n'
f'\tName: {key}\n'
f'\tDelphin Material ID: {materials[key]["material_id"]}\n'
f'\tDatabase ID: {materials[key]["database_id"]}\n')
def does_simulation_exists(sim_id: str) -> bool:
"""
Checks if a Delphin project entry is in the database or not.
:param sim_id: Database entry to check
:return: True if it is in database otherwise returns False.
"""
object_ = delphin_db.Delphin.objects(id=sim_id).first()
if object_:
return True
else:
return False
def compute_simulation_time(sim_id: str) -> int:
"""
Get the average time for this type of construction (2D or 1D)
:param sim_id: Delphin entry id from database
:return: Average simulation time in minutes
"""
sim_obj = delphin_entry.Delphin.objects(id=sim_id).first()
dimension = sim_obj.dimensions
predicted_time = sim_obj.estimated_simulation_time
if predicted_time:
logger.debug(f'Predicted simulation time for Delphin project in {dimension}D: {predicted_time}min')
return predicted_time
else:
sim_time = delphin_entry.Delphin.objects(dimensions=dimension, simulation_time__exists=True).average(
'simulation_time')
if sim_time:
avg_time = int(np.ceil(sim_time / 60))
logger.debug(f'Average simulation time for Delphin projects in {dimension}D: {avg_time}min')
return avg_time
elif dimension == 2:
logger.debug(f'No previous simulations found. Setting time to 180min for a 2D simulation')
return 240
else:
logger.debug(f'No previous simulations found. Setting time to 60min for a 1D simulation')
return 120
def download_sample_data(delphin_id, folder):
delphin_obj = delphin_db.Delphin.objects(id=delphin_id).first()
# download_path = os.path.join(folder, str(delphin_id))
download_path = folder
if not os.path.exists(download_path):
os.mkdir(download_path)
sample_data = dict(delphin_obj.sample_data)
sample_data['design_option'] = dict(sample_data['design_option'])
with open(os.path.join(folder, 'sample_data.txt'), 'w') as file:
yaml.dump(sample_data, file)
return None
| |
# -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
import pytest
from pandas.compat import lrange, string_types
from pandas import DataFrame, Series
import pandas.util.testing as tm
@pytest.mark.parametrize('subset', ['a', ['a'], ['a', 'B']])
def test_duplicated_with_misspelled_column_name(subset):
# GH 19730
df = DataFrame({'A': [0, 0, 1],
'B': [0, 0, 1],
'C': [0, 0, 1]})
with pytest.raises(KeyError):
df.duplicated(subset)
with pytest.raises(KeyError):
df.drop_duplicates(subset)
@pytest.mark.slow
def test_duplicated_do_not_fail_on_wide_dataframes():
# gh-21524
# Given the wide dataframe with a lot of columns
# with different (important!) values
data = {'col_{0:02d}'.format(i): np.random.randint(0, 1000, 30000)
for i in range(100)}
df = DataFrame(data).T
result = df.duplicated()
# Then duplicates produce the bool Series as a result and don't fail during
# calculation. Actual values doesn't matter here, though usually it's all
# False in this case
assert isinstance(result, Series)
assert result.dtype == np.bool
@pytest.mark.parametrize('keep, expected', [
('first', Series([False, False, True, False, True])),
('last', Series([True, True, False, False, False])),
(False, Series([True, True, True, False, True]))
])
def test_duplicated_keep(keep, expected):
df = DataFrame({'A': [0, 1, 1, 2, 0], 'B': ['a', 'b', 'b', 'c', 'a']})
result = df.duplicated(keep=keep)
tm.assert_series_equal(result, expected)
@pytest.mark.xfail(reason="GH#21720; nan/None falsely considered equal")
@pytest.mark.parametrize('keep, expected', [
('first', Series([False, False, True, False, True])),
('last', Series([True, True, False, False, False])),
(False, Series([True, True, True, False, True]))
])
def test_duplicated_nan_none(keep, expected):
df = DataFrame({'C': [np.nan, 3, 3, None, np.nan]}, dtype=object)
result = df.duplicated(keep=keep)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('keep', ['first', 'last', False])
@pytest.mark.parametrize('subset', [None, ['A', 'B'], 'A'])
def test_duplicated_subset(subset, keep):
df = DataFrame({'A': [0, 1, 1, 2, 0],
'B': ['a', 'b', 'b', 'c', 'a'],
'C': [np.nan, 3, 3, None, np.nan]})
if subset is None:
subset = list(df.columns)
elif isinstance(subset, string_types):
# need to have a DataFrame, not a Series
# -> select columns with singleton list, not string
subset = [subset]
expected = df[subset].duplicated(keep=keep)
result = df.duplicated(keep=keep, subset=subset)
tm.assert_series_equal(result, expected)
def test_drop_duplicates():
df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
result = df.drop_duplicates('AAA')
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep='last')
expected = df.loc[[6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep=False)
expected = df.loc[[]]
tm.assert_frame_equal(result, expected)
assert len(result) == 0
# multi column
expected = df.loc[[0, 1, 2, 3]]
result = df.drop_duplicates(np.array(['AAA', 'B']))
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'])
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(('AAA', 'B'), keep='last')
expected = df.loc[[0, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(('AAA', 'B'), keep=False)
expected = df.loc[[0]]
tm.assert_frame_equal(result, expected)
# consider everything
df2 = df.loc[:, ['AAA', 'B', 'C']]
result = df2.drop_duplicates()
# in this case only
expected = df2.drop_duplicates(['AAA', 'B'])
tm.assert_frame_equal(result, expected)
result = df2.drop_duplicates(keep='last')
expected = df2.drop_duplicates(['AAA', 'B'], keep='last')
tm.assert_frame_equal(result, expected)
result = df2.drop_duplicates(keep=False)
expected = df2.drop_duplicates(['AAA', 'B'], keep=False)
tm.assert_frame_equal(result, expected)
# integers
result = df.drop_duplicates('C')
expected = df.iloc[[0, 2]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep='last')
expected = df.iloc[[-2, -1]]
tm.assert_frame_equal(result, expected)
df['E'] = df['C'].astype('int8')
result = df.drop_duplicates('E')
expected = df.iloc[[0, 2]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('E', keep='last')
expected = df.iloc[[-2, -1]]
tm.assert_frame_equal(result, expected)
# GH 11376
df = DataFrame({'x': [7, 6, 3, 3, 4, 8, 0],
'y': [0, 6, 5, 5, 9, 1, 2]})
expected = df.loc[df.index != 3]
tm.assert_frame_equal(df.drop_duplicates(), expected)
df = DataFrame([[1, 0], [0, 2]])
tm.assert_frame_equal(df.drop_duplicates(), df)
df = DataFrame([[-2, 0], [0, -4]])
tm.assert_frame_equal(df.drop_duplicates(), df)
x = np.iinfo(np.int64).max / 3 * 2
df = DataFrame([[-x, x], [0, x + 4]])
tm.assert_frame_equal(df.drop_duplicates(), df)
df = DataFrame([[-x, x], [x, x + 4]])
tm.assert_frame_equal(df.drop_duplicates(), df)
# GH 11864
df = DataFrame([i] * 9 for i in range(16))
df = df.append([[1] + [0] * 8], ignore_index=True)
for keep in ['first', 'last', False]:
assert df.duplicated(keep=keep).sum() == 0
def test_duplicated_on_empty_frame():
# GH 25184
df = DataFrame(columns=['a', 'b'])
dupes = df.duplicated('a')
result = df[dupes]
expected = df.copy()
tm.assert_frame_equal(result, expected)
def test_drop_duplicates_with_duplicate_column_names():
# GH17836
df = DataFrame([
[1, 2, 5],
[3, 4, 6],
[3, 4, 7]
], columns=['a', 'a', 'b'])
result0 = df.drop_duplicates()
tm.assert_frame_equal(result0, df)
result1 = df.drop_duplicates('a')
expected1 = df[:2]
tm.assert_frame_equal(result1, expected1)
def test_drop_duplicates_for_take_all():
df = DataFrame({'AAA': ['foo', 'bar', 'baz', 'bar',
'foo', 'bar', 'qux', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
result = df.drop_duplicates('AAA')
expected = df.iloc[[0, 1, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep='last')
expected = df.iloc[[2, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep=False)
expected = df.iloc[[2, 6]]
tm.assert_frame_equal(result, expected)
# multiple columns
result = df.drop_duplicates(['AAA', 'B'])
expected = df.iloc[[0, 1, 2, 3, 4, 6]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'], keep='last')
expected = df.iloc[[0, 1, 2, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'], keep=False)
expected = df.iloc[[0, 1, 2, 6]]
tm.assert_frame_equal(result, expected)
def test_drop_duplicates_tuple():
df = DataFrame({('AA', 'AB'): ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
result = df.drop_duplicates(('AA', 'AB'))
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(('AA', 'AB'), keep='last')
expected = df.loc[[6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(('AA', 'AB'), keep=False)
expected = df.loc[[]] # empty df
assert len(result) == 0
tm.assert_frame_equal(result, expected)
# multi column
expected = df.loc[[0, 1, 2, 3]]
result = df.drop_duplicates((('AA', 'AB'), 'B'))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('df', [
DataFrame(),
DataFrame(columns=[]),
DataFrame(columns=['A', 'B', 'C']),
DataFrame(index=[]),
DataFrame(index=['A', 'B', 'C'])
])
def test_drop_duplicates_empty(df):
# GH 20516
result = df.drop_duplicates()
tm.assert_frame_equal(result, df)
result = df.copy()
result.drop_duplicates(inplace=True)
tm.assert_frame_equal(result, df)
def test_drop_duplicates_NA():
# none
df = DataFrame({'A': [None, None, 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.],
'D': lrange(8)})
# single column
result = df.drop_duplicates('A')
expected = df.loc[[0, 2, 3]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep='last')
expected = df.loc[[1, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep=False)
expected = df.loc[[]] # empty df
tm.assert_frame_equal(result, expected)
assert len(result) == 0
# multi column
result = df.drop_duplicates(['A', 'B'])
expected = df.loc[[0, 2, 3, 6]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['A', 'B'], keep='last')
expected = df.loc[[1, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['A', 'B'], keep=False)
expected = df.loc[[6]]
tm.assert_frame_equal(result, expected)
# nan
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.],
'D': lrange(8)})
# single column
result = df.drop_duplicates('C')
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep='last')
expected = df.loc[[3, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep=False)
expected = df.loc[[]] # empty df
tm.assert_frame_equal(result, expected)
assert len(result) == 0
# multi column
result = df.drop_duplicates(['C', 'B'])
expected = df.loc[[0, 1, 2, 4]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['C', 'B'], keep='last')
expected = df.loc[[1, 3, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['C', 'B'], keep=False)
expected = df.loc[[1]]
tm.assert_frame_equal(result, expected)
def test_drop_duplicates_NA_for_take_all():
# none
df = DataFrame({'A': [None, None, 'foo', 'bar',
'foo', 'baz', 'bar', 'qux'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 2., 3, 1.]})
# single column
result = df.drop_duplicates('A')
expected = df.iloc[[0, 2, 3, 5, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep='last')
expected = df.iloc[[1, 4, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep=False)
expected = df.iloc[[5, 7]]
tm.assert_frame_equal(result, expected)
# nan
# single column
result = df.drop_duplicates('C')
expected = df.iloc[[0, 1, 5, 6]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep='last')
expected = df.iloc[[3, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep=False)
expected = df.iloc[[5, 6]]
tm.assert_frame_equal(result, expected)
def test_drop_duplicates_inplace():
orig = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
df = orig.copy()
df.drop_duplicates('A', inplace=True)
expected = orig[:2]
result = df
tm.assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates('A', keep='last', inplace=True)
expected = orig.loc[[6, 7]]
result = df
tm.assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates('A', keep=False, inplace=True)
expected = orig.loc[[]]
result = df
tm.assert_frame_equal(result, expected)
assert len(df) == 0
# multi column
df = orig.copy()
df.drop_duplicates(['A', 'B'], inplace=True)
expected = orig.loc[[0, 1, 2, 3]]
result = df
tm.assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates(['A', 'B'], keep='last', inplace=True)
expected = orig.loc[[0, 5, 6, 7]]
result = df
tm.assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates(['A', 'B'], keep=False, inplace=True)
expected = orig.loc[[0]]
result = df
tm.assert_frame_equal(result, expected)
# consider everything
orig2 = orig.loc[:, ['A', 'B', 'C']].copy()
df2 = orig2.copy()
df2.drop_duplicates(inplace=True)
# in this case only
expected = orig2.drop_duplicates(['A', 'B'])
result = df2
tm.assert_frame_equal(result, expected)
df2 = orig2.copy()
df2.drop_duplicates(keep='last', inplace=True)
expected = orig2.drop_duplicates(['A', 'B'], keep='last')
result = df2
tm.assert_frame_equal(result, expected)
df2 = orig2.copy()
df2.drop_duplicates(keep=False, inplace=True)
expected = orig2.drop_duplicates(['A', 'B'], keep=False)
result = df2
tm.assert_frame_equal(result, expected)
| |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains a Salesforce Hook
which allows you to connect to your Salesforce instance,
retrieve data from it, and write that data to a file
for other uses.
NOTE: this hook also relies on the simple_salesforce package:
https://github.com/simple-salesforce/simple-salesforce
"""
from simple_salesforce import Salesforce
from airflow.hooks.base_hook import BaseHook
import json
import pandas as pd
import time
from airflow.utils.log.logging_mixin import LoggingMixin
class SalesforceHook(BaseHook, LoggingMixin):
def __init__(
self,
conn_id,
*args,
**kwargs
):
"""
Create new connection to Salesforce
and allows you to pull data out of SFDC and save it to a file.
You can then use that file with other
Airflow operators to move the data into another data source
:param conn_id: the name of the connection that has the parameters
we need to connect to Salesforce.
The conenction shoud be type `http` and include a
user's security token in the `Extras` field.
.. note::
For the HTTP connection type, you can include a
JSON structure in the `Extras` field.
We need a user's security token to connect to Salesforce.
So we define it in the `Extras` field as:
`{"security_token":"YOUR_SECRUITY_TOKEN"}`
"""
self.conn_id = conn_id
self._args = args
self._kwargs = kwargs
# get the connection parameters
self.connection = self.get_connection(conn_id)
self.extras = self.connection.extra_dejson
def sign_in(self):
"""
Sign into Salesforce.
If we have already signed it, this will just return the original object
"""
if hasattr(self, 'sf'):
return self.sf
# connect to Salesforce
sf = Salesforce(
username=self.connection.login,
password=self.connection.password,
security_token=self.extras['security_token'],
instance_url=self.connection.host
)
self.sf = sf
return sf
def make_query(self, query):
"""
Make a query to Salesforce. Returns result in dictionary
:param query: The query to make to Salesforce
"""
self.sign_in()
self.log.info("Querying for all objects")
query = self.sf.query_all(query)
self.log.info(
"Received results: Total size: %s; Done: %s",
query['totalSize'], query['done']
)
query = json.loads(json.dumps(query))
return query
def describe_object(self, obj):
"""
Get the description of an object from Salesforce.
This description is the object's schema
and some extra metadata that Salesforce stores for each object
:param obj: Name of the Salesforce object
that we are getting a description of.
"""
self.sign_in()
return json.loads(json.dumps(self.sf.__getattr__(obj).describe()))
def get_available_fields(self, obj):
"""
Get a list of all available fields for an object.
This only returns the names of the fields.
"""
self.sign_in()
desc = self.describe_object(obj)
return [f['name'] for f in desc['fields']]
def _build_field_list(self, fields):
# join all of the fields in a comma separated list
return ",".join(fields)
def get_object_from_salesforce(self, obj, fields):
"""
Get all instances of the `object` from Salesforce.
For each model, only get the fields specified in fields.
All we really do underneath the hood is run:
SELECT <fields> FROM <obj>;
"""
field_string = self._build_field_list(fields)
query = "SELECT {0} FROM {1}".format(field_string, obj)
self.log.info(
"Making query to Salesforce: %s",
query if len(query) < 30 else " ... ".join([query[:15], query[-15:]])
)
return self.make_query(query)
@classmethod
def _to_timestamp(cls, col):
"""
Convert a column of a dataframe to UNIX timestamps if applicable
:param col: A Series object representing a column of a dataframe.
"""
# try and convert the column to datetimes
# the column MUST have a four digit year somewhere in the string
# there should be a better way to do this,
# but just letting pandas try and convert every column without a format
# caused it to convert floats as well
# For example, a column of integers
# between 0 and 10 are turned into timestamps
# if the column cannot be converted,
# just return the original column untouched
try:
col = pd.to_datetime(col)
except ValueError:
log = LoggingMixin().log
log.warning(
"Could not convert field to timestamps: %s", col.name
)
return col
# now convert the newly created datetimes into timestamps
# we have to be careful here
# because NaT cannot be converted to a timestamp
# so we have to return NaN
converted = []
for i in col:
try:
converted.append(i.timestamp())
except ValueError:
converted.append(pd.np.NaN)
except AttributeError:
converted.append(pd.np.NaN)
# return a new series that maintains the same index as the original
return pd.Series(converted, index=col.index)
def write_object_to_file(
self,
query_results,
filename,
fmt="csv",
coerce_to_timestamp=False,
record_time_added=False
):
"""
Write query results to file.
Acceptable formats are:
- csv:
comma-separated-values file. This is the default format.
- json:
JSON array. Each element in the array is a different row.
- ndjson:
JSON array but each element is new-line delimited
instead of comma delimited like in `json`
This requires a significant amount of cleanup.
Pandas doesn't handle output to CSV and json in a uniform way.
This is especially painful for datetime types.
Pandas wants to write them as strings in CSV,
but as millisecond Unix timestamps.
By default, this function will try and leave all values as
they are represented in Salesforce.
You use the `coerce_to_timestamp` flag to force all datetimes
to become Unix timestamps (UTC).
This is can be greatly beneficial as it will make all of your
datetime fields look the same,
and makes it easier to work with in other database environments
:param query_results: the results from a SQL query
:param filename: the name of the file where the data
should be dumped to
:param fmt: the format you want the output in.
*Default:* csv.
:param coerce_to_timestamp: True if you want all datetime fields to be
converted into Unix timestamps.
False if you want them to be left in the
same format as they were in Salesforce.
Leaving the value as False will result
in datetimes being strings.
*Defaults to False*
:param record_time_added: *(optional)* True if you want to add a
Unix timestamp field to the resulting data
that marks when the data
was fetched from Salesforce.
*Default: False*.
"""
fmt = fmt.lower()
if fmt not in ['csv', 'json', 'ndjson']:
raise ValueError("Format value is not recognized: {0}".format(fmt))
# this line right here will convert all integers to floats if there are
# any None/np.nan values in the column
# that's because None/np.nan cannot exist in an integer column
# we should write all of our timestamps as FLOATS in our final schema
df = pd.DataFrame.from_records(query_results, exclude=["attributes"])
df.columns = [c.lower() for c in df.columns]
# convert columns with datetime strings to datetimes
# not all strings will be datetimes, so we ignore any errors that occur
# we get the object's definition at this point and only consider
# features that are DATE or DATETIME
if coerce_to_timestamp and df.shape[0] > 0:
# get the object name out of the query results
# it's stored in the "attributes" dictionary
# for each returned record
object_name = query_results[0]['attributes']['type']
self.log.info("Coercing timestamps for: %s", object_name)
schema = self.describe_object(object_name)
# possible columns that can be convereted to timestamps
# are the ones that are either date or datetime types
# strings are too general and we risk unintentional conversion
possible_timestamp_cols = [
i['name'].lower()
for i in schema['fields']
if i['type'] in ["date", "datetime"] and
i['name'].lower() in df.columns
]
df[possible_timestamp_cols] = df[possible_timestamp_cols].apply(
lambda x: self._to_timestamp(x)
)
if record_time_added:
fetched_time = time.time()
df["time_fetched_from_salesforce"] = fetched_time
# write the CSV or JSON file depending on the option
# NOTE:
# datetimes here are an issue.
# There is no good way to manage the difference
# for to_json, the options are an epoch or a ISO string
# but for to_csv, it will be a string output by datetime
# For JSON we decided to output the epoch timestamp in seconds
# (as is fairly standard for JavaScript)
# And for csv, we do a string
if fmt == "csv":
# there are also a ton of newline objects
# that mess up our ability to write to csv
# we remove these newlines so that the output is a valid CSV format
self.log.info("Cleaning data and writing to CSV")
possible_strings = df.columns[df.dtypes == "object"]
df[possible_strings] = df[possible_strings].apply(
lambda x: x.str.replace("\r\n", "")
)
df[possible_strings] = df[possible_strings].apply(
lambda x: x.str.replace("\n", "")
)
# write the dataframe
df.to_csv(filename, index=False)
elif fmt == "json":
df.to_json(filename, "records", date_unit="s")
elif fmt == "ndjson":
df.to_json(filename, "records", lines=True, date_unit="s")
return df
| |
#!/usr/bin/env python3
from serverpanel import create_app
from serverpanel.ext.serverinfo import ServerInfo
from flask_testing import TestCase
import unittest
import json
import unittest.mock as mock
import tempfile
import os
from collections import namedtuple
Request = namedtuple('Request', ['text'])
valid_ip_data = Request(text='{"ip": "127.0.0.1", "country": "BE"}')
valid_pihole_data = Request(text='{"domains_being_blocked": 1,"dns_queries_today":2, '
'"ads_blocked_today": 3, "ads_percentage_today": 50.0}')
class MyTest(TestCase):
def create_app(self):
app = create_app('config')
app.config['DEBUG'] = False
return app
def test_creation(self):
self.app.config['ENABLE_PIHOLE'] = False
server_info = ServerInfo(self.app)
self.assertFalse(server_info.pihole_enabled)
self.app.config['ENABLE_PIHOLE'] = True
self.app.config['PIHOLE_API'] = None
server_info = ServerInfo(self.app)
self.assertTrue(server_info.pihole_enabled)
def test_main(self):
# check if route returns code 200
response = self.client.get('/')
self.assert_template_used('main.html')
self.assert200(response)
response = self.client.get('/network/')
self.assert_template_used('main.html')
self.assert200(response)
def test_api(self):
# check if route returns code 200
response = self.client.get('/api/')
self.assert200(response)
data = json.loads(response.data.decode('utf-8'))
self.assertTrue('version' in data.keys())
self.assertTrue('server' in data.keys())
self.assertTrue('system' in data.keys())
self.assertTrue('network' in data.keys())
self.assertTrue('pihole' in data.keys())
def test_api_details(self):
# check if route returns code 200
response = self.client.get('/api/version')
self.assert200(response)
data = json.loads(response.data.decode('utf-8'))
self.assertTrue('name' in data.keys())
self.assertTrue('version' in data.keys())
def test_api_server(self):
# check if route returns code 200
response = self.client.get('/api/server')
self.assert200(response)
data = json.loads(response.data.decode('utf-8'))
self.assertTrue('hostname' in data.keys())
self.assertTrue('os' in data.keys())
self.assertTrue('uptime' in data.keys())
def test_route_hostname(self):
# check if route returns code 200
response = self.client.get('/api/server/hostname')
self.assert200(response)
# check if object returned contains the desired data
data = json.loads(response.data.decode('utf-8'))
self.assertTrue('hostname' in data.keys())
def test_route_os(self):
# check if route returns code 200
response = self.client.get('/api/server/os')
self.assert200(response)
# check if object returned contains the desired data
data = json.loads(response.data.decode('utf-8'))
self.assertTrue('os_name' in data.keys())
def test_route_uptime(self):
# check if route returns code 200
response = self.client.get('/api/server/uptime')
self.assert200(response)
# check if object returned contains the desired data
data = json.loads(response.data.decode('utf-8'))
self.assertTrue('uptime' in data.keys())
def test_route_cpu_cores(self):
# check if route returns code 200
response = self.client.get('/api/system/cpu/cores')
self.assert200(response)
# check if object returned contains the desired data
data = json.loads(response.data.decode('utf-8'))
self.assertTrue('logical_cores' in data.keys())
self.assertTrue('physical_cores' in data.keys())
def test_route_cpu_load(self):
# check if route returns code 200
response = self.client.get('/api/system/cpu/load')
self.assert200(response)
# check if object returned contains the desired data
data = json.loads(response.data.decode('utf-8'))
self.assertTrue(len(data) > 0)
def test_route_memory(self):
# check if route returns code 200
response = self.client.get('/api/system/memory')
self.assert200(response)
# check if object returned contains the desired data
data = json.loads(response.data.decode('utf-8'))
self.assertTrue('available' in data.keys())
self.assertTrue('free' in data.keys())
self.assertTrue('percent' in data.keys())
self.assertTrue('total' in data.keys())
self.assertTrue('used' in data.keys())
def test_route_swap(self):
# check if route returns code 200
response = self.client.get('/api/system/swap')
self.assert200(response)
# check if object returned contains the desired data
data = json.loads(response.data.decode('utf-8'))
self.assertTrue('sin' in data.keys())
self.assertTrue('sout' in data.keys())
self.assertTrue('free' in data.keys())
self.assertTrue('percent' in data.keys())
self.assertTrue('total' in data.keys())
self.assertTrue('used' in data.keys())
def test_route_disk_space(self):
# check if route returns code 200
response = self.client.get('/api/system/disk/space')
self.assert200(response)
# check if object returned contains the desired data
data = json.loads(response.data.decode('utf-8'))
for disk in data:
self.assertTrue('device' in disk.keys())
self.assertTrue('fstype' in disk.keys())
self.assertTrue('mountpoint' in disk.keys())
self.assertTrue('opts' in disk.keys())
self.assertTrue('usage' in disk.keys())
self.assertTrue('free' in disk['usage'].keys())
self.assertTrue('percent' in disk['usage'].keys())
self.assertTrue('total' in disk['usage'].keys())
self.assertTrue('used' in disk['usage'].keys())
def test_route_disk_io(self):
# check if route returns code 200
response = self.client.get('/api/system/disk/io')
self.assert200(response)
# check if object returned contains the desired data
data = json.loads(response.data.decode('utf-8'))
for disk in data:
self.assertTrue('device' in disk.keys())
self.assertTrue('io' in disk.keys())
self.assertTrue('read_bytes' in disk['io'].keys())
self.assertTrue('read_count' in disk['io'].keys())
self.assertTrue('read_time' in disk['io'].keys())
self.assertTrue('write_bytes' in disk['io'].keys())
self.assertTrue('write_count' in disk['io'].keys())
self.assertTrue('write_time' in disk['io'].keys())
def test_route_network_io(self):
# check if route returns code 200
response = self.client.get('/api/network/io')
self.assert200(response)
# check if object returned contains the desired data
data = json.loads(response.data.decode('utf-8'))
for network in data:
self.assertTrue('device' in network.keys())
self.assertTrue('address' in network.keys())
self.assertTrue('io' in network.keys())
self.assertTrue('bytes_recv' in network['io'].keys())
self.assertTrue('bytes_sent' in network['io'].keys())
self.assertTrue('dropin' in network['io'].keys())
self.assertTrue('dropout' in network['io'].keys())
self.assertTrue('errin' in network['io'].keys())
self.assertTrue('errout' in network['io'].keys())
self.assertTrue('packets_recv' in network['io'].keys())
self.assertTrue('packets_sent' in network['io'].keys())
@mock.patch('serverpanel.ext.serverinfo.get', return_value=valid_ip_data)
def test_route_network_external_success(self, mocked_get):
# check if route returns code 200
response = self.client.get('/api/network/external')
self.assert200(response)
# check if object returned contains the desired data
data = json.loads(response.data.decode('utf-8'))
self.assertTrue('ip' in data.keys())
self.assertTrue('country' in data.keys())
self.assertEqual(data['ip'], '127.0.0.1')
self.assertEqual(data['country'], 'BE')
@mock.patch('serverpanel.ext.serverinfo.get', return_value=None)
def test_route_network_external_fail(self, mocked_get):
# check if route returns code 200
response = self.client.get('/api/network/external')
self.assert200(response)
# check if object returned contains the desired data
data = json.loads(response.data.decode('utf-8'))
self.assertTrue('ip' in data.keys())
self.assertTrue('country' in data.keys())
self.assertEqual(data['ip'], 'Unknown')
self.assertEqual(data['country'], 'Unknown')
def test_route_temperature_success(self):
fd, path = tempfile.mkstemp()
with open(path, "w") as f:
f.write('100000')
self.app.extensions['flask-serverinfo'].cpu_temp = path
# check if route returns code 200
response = self.client.get('/api/system/temp')
self.assert200(response)
# check if object returned contains the desired data
data = json.loads(response.data.decode('utf-8'))
self.assertTrue('cpu' in data.keys())
self.assertEqual(data['cpu'], 100)
os.close(fd)
def test_route_temperature_fail(self):
fd, path = tempfile.mkstemp()
with open(path, "w") as f:
f.write('not a number')
self.app.extensions['flask-serverinfo'].cpu_temp = path
# check if route returns code 200
response = self.client.get('/api/system/temp')
self.assert200(response)
# check if object returned contains the desired data
data = json.loads(response.data.decode('utf-8'))
self.assertTrue('cpu' in data.keys())
self.assertEqual(data['cpu'], -1)
os.close(fd)
def test_route_processes(self):
# check if route returns code 200
response = self.client.get('/api/system/processes')
self.assert200(response)
# check if object returned contains the desired data
data = json.loads(response.data.decode('utf-8'))
for proc in data:
self.assertTrue('pid' in proc.keys())
self.assertTrue('name' in proc.keys())
self.assertTrue('cpu_percentage' in proc.keys())
def test_pihole_disabled(self):
self.app.extensions['flask-serverinfo'].pihole_enabled = False
response = self.client.get('/api/pihole/stats')
self.assert200(response)
data = json.loads(response.data.decode('utf-8'))
self.assertTrue('enabled' in data.keys())
self.assertEqual(data['enabled'], False)
self.assertEqual(data['error'], False)
@mock.patch('serverpanel.ext.serverinfo.get', return_value=valid_pihole_data)
def test_pihole_enabled_success(self, mocked_get):
self.app.extensions['flask-serverinfo'].pihole_enabled = True
self.app.extensions['flask-serverinfo'].pihole_api = ''
response = self.client.get('/api/pihole/stats')
self.assert200(response)
data = json.loads(response.data.decode('utf-8'))
self.assertTrue('enabled' in data.keys())
self.assertEqual(data['error'], False)
@mock.patch('serverpanel.ext.serverinfo.get', return_value=None)
def test_pihole_enabled_fail(self, mocked_get):
self.app.extensions['flask-serverinfo'].pihole_enabled = True
self.app.extensions['flask-serverinfo'].pihole_api = ''
response = self.client.get('/api/pihole/stats')
self.assert200(response)
data = json.loads(response.data.decode('utf-8'))
self.assertTrue('enabled' in data.keys())
self.assertEqual(data['error'], True)
if __name__ == '__main__':
unittest.main()
| |
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import bt_surface_description1564
except ImportError:
bt_surface_description1564 = sys.modules[
"onshape_client.oas.models.bt_surface_description1564"
]
try:
from onshape_client.oas.models import bt_torus_description1834_all_of
except ImportError:
bt_torus_description1834_all_of = sys.modules[
"onshape_client.oas.models.bt_torus_description1834_all_of"
]
try:
from onshape_client.oas.models import bt_vector3d389
except ImportError:
bt_vector3d389 = sys.modules["onshape_client.oas.models.bt_vector3d389"]
class BTTorusDescription1834(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
("type",): {
"PLANE": "PLANE",
"CYLINDER": "CYLINDER",
"CONE": "CONE",
"SPHERE": "SPHERE",
"TORUS": "TORUS",
"SPUN": "SPUN",
"SWEEP": "SWEEP",
"OFFSET": "OFFSET",
"BLEND": "BLEND",
"BSURFACE": "BSURFACE",
"OTHER": "OTHER",
"UNKNOWN": "UNKNOWN",
},
}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"axis": (bt_vector3d389.BTVector3d389,), # noqa: E501
"bt_type": (str,), # noqa: E501
"major_radius": (float,), # noqa: E501
"minor_radius": (float,), # noqa: E501
"origin": (bt_vector3d389.BTVector3d389,), # noqa: E501
"type": (str,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"axis": "axis", # noqa: E501
"bt_type": "btType", # noqa: E501
"major_radius": "majorRadius", # noqa: E501
"minor_radius": "minorRadius", # noqa: E501
"origin": "origin", # noqa: E501
"type": "type", # noqa: E501
}
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
"_composed_instances",
"_var_name_to_model_instances",
"_additional_properties_model_instances",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_torus_description1834.BTTorusDescription1834 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
axis (bt_vector3d389.BTVector3d389): [optional] # noqa: E501
bt_type (str): [optional] # noqa: E501
major_radius (float): [optional] # noqa: E501
minor_radius (float): [optional] # noqa: E501
origin (bt_vector3d389.BTVector3d389): [optional] # noqa: E501
type (str): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
constant_args = {
"_check_type": _check_type,
"_path_to_item": _path_to_item,
"_from_server": _from_server,
"_configuration": _configuration,
}
required_args = {}
# remove args whose value is Null because they are unset
required_arg_names = list(required_args.keys())
for required_arg_name in required_arg_names:
if required_args[required_arg_name] is nulltype.Null:
del required_args[required_arg_name]
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in six.iteritems(kwargs):
if (
var_name in unused_args
and self._configuration is not None
and self._configuration.discard_unknown_keys
and not self._additional_properties_model_instances
):
# discard variable.
continue
setattr(self, var_name, var_value)
@staticmethod
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
return {
"anyOf": [],
"allOf": [
bt_surface_description1564.BTSurfaceDescription1564,
bt_torus_description1834_all_of.BTTorusDescription1834AllOf,
],
"oneOf": [],
}
| |
""" The tests below don't use translation at all. They run the GCs by
instantiating them and asking them to allocate memory by calling their
methods directly. The tests need to maintain by hand what the GC should
see as the list of roots (stack and prebuilt objects).
"""
# XXX VERY INCOMPLETE, low coverage
import py
from rpython.rtyper.lltypesystem import lltype, llmemory
from rpython.memory.gctypelayout import TypeLayoutBuilder, FIN_HANDLER_ARRAY
from rpython.rlib.rarithmetic import LONG_BIT, is_valid_int
from rpython.memory.gc import minimark, incminimark
from rpython.memory.gctypelayout import zero_gc_pointers_inside, zero_gc_pointers
from rpython.rlib.debug import debug_print
import pdb
WORD = LONG_BIT // 8
ADDR_ARRAY = lltype.Array(llmemory.Address)
S = lltype.GcForwardReference()
S.become(lltype.GcStruct('S',
('x', lltype.Signed),
('prev', lltype.Ptr(S)),
('next', lltype.Ptr(S))))
RAW = lltype.Struct('RAW', ('p', lltype.Ptr(S)), ('q', lltype.Ptr(S)))
VAR = lltype.GcArray(lltype.Ptr(S))
VARNODE = lltype.GcStruct('VARNODE', ('a', lltype.Ptr(VAR)))
class DirectRootWalker(object):
def __init__(self, tester):
self.tester = tester
def walk_roots(self, collect_stack_root,
collect_static_in_prebuilt_nongc,
collect_static_in_prebuilt_gc,
is_minor=False):
gc = self.tester.gc
layoutbuilder = self.tester.layoutbuilder
if collect_static_in_prebuilt_gc:
for addrofaddr in layoutbuilder.addresses_of_static_ptrs:
if addrofaddr.address[0]:
collect_static_in_prebuilt_gc(gc, addrofaddr)
if collect_static_in_prebuilt_nongc:
for addrofaddr in layoutbuilder.addresses_of_static_ptrs_in_nongc:
if addrofaddr.address[0]:
collect_static_in_prebuilt_nongc(gc, addrofaddr)
if collect_stack_root:
stackroots = self.tester.stackroots
a = lltype.malloc(ADDR_ARRAY, len(stackroots), flavor='raw')
for i in range(len(a)):
a[i] = llmemory.cast_ptr_to_adr(stackroots[i])
a_base = lltype.direct_arrayitems(a)
for i in range(len(a)):
ai = lltype.direct_ptradd(a_base, i)
collect_stack_root(gc, llmemory.cast_ptr_to_adr(ai))
for i in range(len(a)):
PTRTYPE = lltype.typeOf(stackroots[i])
stackroots[i] = llmemory.cast_adr_to_ptr(a[i], PTRTYPE)
lltype.free(a, flavor='raw')
def _walk_prebuilt_gc(self, callback):
pass
def finished_minor_collection(self):
pass
class BaseDirectGCTest(object):
GC_PARAMS = {}
def setup_method(self, meth):
from rpython.config.translationoption import get_combined_translation_config
config = get_combined_translation_config(translating=True).translation
self.stackroots = []
GC_PARAMS = self.GC_PARAMS.copy()
if hasattr(meth, 'GC_PARAMS'):
GC_PARAMS.update(meth.GC_PARAMS)
GC_PARAMS['translated_to_c'] = False
self.gc = self.GCClass(config, **GC_PARAMS)
self.gc.DEBUG = True
self.rootwalker = DirectRootWalker(self)
self.gc.set_root_walker(self.rootwalker)
self.layoutbuilder = TypeLayoutBuilder(self.GCClass)
self.get_type_id = self.layoutbuilder.get_type_id
gcdata = self.layoutbuilder.initialize_gc_query_function(self.gc)
ll_handlers = lltype.malloc(FIN_HANDLER_ARRAY, 0, immortal=True)
gcdata.finalizer_handlers = llmemory.cast_ptr_to_adr(ll_handlers)
self.gc.setup()
def consider_constant(self, p):
obj = p._obj
TYPE = lltype.typeOf(obj)
self.layoutbuilder.consider_constant(TYPE, obj, self.gc)
def write(self, p, fieldname, newvalue):
if self.gc.needs_write_barrier:
addr_struct = llmemory.cast_ptr_to_adr(p)
self.gc.write_barrier(addr_struct)
setattr(p, fieldname, newvalue)
def writearray(self, p, index, newvalue):
if self.gc.needs_write_barrier:
addr_struct = llmemory.cast_ptr_to_adr(p)
if hasattr(self.gc, 'write_barrier_from_array'):
self.gc.write_barrier_from_array(addr_struct, index)
else:
self.gc.write_barrier(addr_struct)
p[index] = newvalue
def malloc(self, TYPE, n=None):
addr = self.gc.malloc(self.get_type_id(TYPE), n)
debug_print(self.gc)
obj_ptr = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(TYPE))
if not self.gc.malloc_zero_filled:
zero_gc_pointers_inside(obj_ptr, TYPE)
return obj_ptr
class DirectGCTest(BaseDirectGCTest):
def test_simple(self):
p = self.malloc(S)
p.x = 5
self.stackroots.append(p)
self.gc.collect()
p = self.stackroots[0]
assert p.x == 5
def test_missing_stack_root(self):
p = self.malloc(S)
p.x = 5
self.gc.collect() # 'p' should go away
py.test.raises(RuntimeError, 'p.x')
def test_prebuilt_gc(self):
k = lltype.malloc(S, immortal=True)
k.x = 42
self.consider_constant(k)
self.write(k, 'next', self.malloc(S))
k.next.x = 43
self.write(k.next, 'next', self.malloc(S))
k.next.next.x = 44
self.gc.collect()
assert k.x == 42
assert k.next.x == 43
assert k.next.next.x == 44
def test_prebuilt_nongc(self):
raw = lltype.malloc(RAW, immortal=True)
self.consider_constant(raw)
raw.p = self.malloc(S)
raw.p.x = 43
raw.q = self.malloc(S)
raw.q.x = 44
self.gc.collect()
assert raw.p.x == 43
assert raw.q.x == 44
def test_many_objects(self):
def alloc2(i):
a1 = self.malloc(S)
a1.x = i
self.stackroots.append(a1)
a2 = self.malloc(S)
a1 = self.stackroots.pop()
a2.x = i + 1000
return a1, a2
def growloop(loop, a1, a2):
self.write(a1, 'prev', loop.prev)
self.write(a1.prev, 'next', a1)
self.write(a1, 'next', loop)
self.write(loop, 'prev', a1)
self.write(a2, 'prev', loop)
self.write(a2, 'next', loop.next)
self.write(a2.next, 'prev', a2)
self.write(loop, 'next', a2)
def newloop():
p = self.malloc(S)
p.next = p # initializing stores, no write barrier
p.prev = p
return p
# a loop attached to a stack root
self.stackroots.append(newloop())
# another loop attached to a prebuilt gc node
k = lltype.malloc(S, immortal=True)
k.next = k
k.prev = k
self.consider_constant(k)
# a third loop attached to a prebuilt nongc
raw = lltype.malloc(RAW, immortal=True)
self.consider_constant(raw)
raw.p = newloop()
# run!
for i in range(100):
a1, a2 = alloc2(i)
growloop(self.stackroots[0], a1, a2)
a1, a2 = alloc2(i)
growloop(k, a1, a2)
a1, a2 = alloc2(i)
growloop(raw.p, a1, a2)
def test_varsized_from_stack(self):
expected = {}
def verify():
for (index, index2), value in expected.items():
assert self.stackroots[index][index2].x == value
x = 0
for i in range(40):
assert 'DEAD' not in repr(self.stackroots)
a = self.malloc(VAR, i)
assert 'DEAD' not in repr(a)
self.stackroots.append(a)
print 'ADDED TO STACKROOTS:', llmemory.cast_adr_to_int(
llmemory.cast_ptr_to_adr(a))
assert 'DEAD' not in repr(self.stackroots)
for j in range(5):
assert 'DEAD' not in repr(self.stackroots)
p = self.malloc(S)
assert 'DEAD' not in repr(self.stackroots)
p.x = x
index = x % len(self.stackroots)
if index > 0:
index2 = (x / len(self.stackroots)) % index
a = self.stackroots[index]
assert len(a) == index
self.writearray(a, index2, p)
expected[index, index2] = x
x += 1291
verify()
self.gc.collect()
verify()
self.gc.collect()
verify()
def test_varsized_from_prebuilt_gc(self):
expected = {}
def verify():
for (index, index2), value in expected.items():
assert prebuilt[index].a[index2].x == value
x = 0
prebuilt = [lltype.malloc(VARNODE, immortal=True, zero=True)
for i in range(40)]
for node in prebuilt:
self.consider_constant(node)
for i in range(len(prebuilt)):
self.write(prebuilt[i], 'a', self.malloc(VAR, i))
for j in range(20):
p = self.malloc(S)
p.x = x
index = x % (i+1)
if index > 0:
index2 = (x / (i+1)) % index
a = prebuilt[index].a
assert len(a) == index
self.writearray(a, index2, p)
expected[index, index2] = x
x += 1291
verify()
self.gc.collect()
verify()
self.gc.collect()
verify()
def test_id(self):
ids = {}
def allocate_bunch(count=50):
base = len(self.stackroots)
for i in range(count):
p = self.malloc(S)
self.stackroots.append(p)
for i in range(count):
j = base + (i*1291) % count
pid = self.gc.id(self.stackroots[j])
assert isinstance(pid, int)
ids[j] = pid
def verify():
for j, expected in ids.items():
assert self.gc.id(self.stackroots[j]) == expected
allocate_bunch(5)
verify()
allocate_bunch(75)
verify()
allocate_bunch(5)
verify()
self.gc.collect()
verify()
self.gc.collect()
verify()
def test_identityhash(self):
# a "does not crash" kind of test
p_const = lltype.malloc(S, immortal=True)
self.consider_constant(p_const)
# (1) p is in the nursery
self.gc.collect()
p = self.malloc(S)
hash = self.gc.identityhash(p)
print hash
assert is_valid_int(hash)
assert hash == self.gc.identityhash(p)
self.stackroots.append(p)
for i in range(6):
self.gc.collect()
assert hash == self.gc.identityhash(self.stackroots[-1])
self.stackroots.pop()
# (2) p is an older object
p = self.malloc(S)
self.stackroots.append(p)
self.gc.collect()
hash = self.gc.identityhash(self.stackroots[-1])
print hash
assert is_valid_int(hash)
for i in range(6):
self.gc.collect()
assert hash == self.gc.identityhash(self.stackroots[-1])
self.stackroots.pop()
# (3) p is a gen3 object (for hybrid)
p = self.malloc(S)
self.stackroots.append(p)
for i in range(6):
self.gc.collect()
hash = self.gc.identityhash(self.stackroots[-1])
print hash
assert is_valid_int(hash)
for i in range(2):
self.gc.collect()
assert hash == self.gc.identityhash(self.stackroots[-1])
self.stackroots.pop()
# (4) p is a prebuilt object
hash = self.gc.identityhash(p_const)
print hash
assert is_valid_int(hash)
assert hash == self.gc.identityhash(p_const)
# (5) p is actually moving (for the markcompact gc only?)
p0 = self.malloc(S)
self.stackroots.append(p0)
p = self.malloc(S)
self.stackroots.append(p)
hash = self.gc.identityhash(p)
self.stackroots.pop(-2)
self.gc.collect() # p0 goes away, p shifts left
assert hash == self.gc.identityhash(self.stackroots[-1])
self.gc.collect()
assert hash == self.gc.identityhash(self.stackroots[-1])
self.stackroots.pop()
# (6) ask for the hash of varsized objects, larger and larger
for i in range(10):
self.gc.collect()
p = self.malloc(VAR, i)
self.stackroots.append(p)
hash = self.gc.identityhash(p)
self.gc.collect()
assert hash == self.gc.identityhash(self.stackroots[-1])
self.stackroots.pop()
# (7) the same, but the objects are dying young
for i in range(10):
self.gc.collect()
p = self.malloc(VAR, i)
self.stackroots.append(p)
hash1 = self.gc.identityhash(p)
hash2 = self.gc.identityhash(p)
assert hash1 == hash2
self.stackroots.pop()
def test_memory_alignment(self):
A1 = lltype.GcArray(lltype.Char)
for i in range(50):
p1 = self.malloc(A1, i)
if i:
p1[i-1] = chr(i)
self.stackroots.append(p1)
self.gc.collect()
for i in range(1, 50):
p = self.stackroots[-50+i]
assert p[i-1] == chr(i)
class TestSemiSpaceGC(DirectGCTest):
from rpython.memory.gc.semispace import SemiSpaceGC as GCClass
def test_shrink_array(self):
S1 = lltype.GcStruct('S1', ('h', lltype.Char),
('v', lltype.Array(lltype.Char)))
p1 = self.malloc(S1, 2)
p1.h = '?'
for i in range(2):
p1.v[i] = chr(50 + i)
addr = llmemory.cast_ptr_to_adr(p1)
ok = self.gc.shrink_array(addr, 1)
assert ok
assert p1.h == '?'
assert len(p1.v) == 1
for i in range(1):
assert p1.v[i] == chr(50 + i)
class TestGenerationGC(TestSemiSpaceGC):
from rpython.memory.gc.generation import GenerationGC as GCClass
def test_collect_gen(self):
gc = self.gc
old_semispace_collect = gc.semispace_collect
old_collect_nursery = gc.collect_nursery
calls = []
def semispace_collect():
calls.append('semispace_collect')
return old_semispace_collect()
def collect_nursery():
calls.append('collect_nursery')
return old_collect_nursery()
gc.collect_nursery = collect_nursery
gc.semispace_collect = semispace_collect
gc.collect()
assert calls == ['semispace_collect']
calls = []
gc.collect(0)
assert calls == ['collect_nursery']
calls = []
gc.collect(1)
assert calls == ['semispace_collect']
calls = []
gc.collect(9)
assert calls == ['semispace_collect']
calls = []
def test_write_barrier_direct(self):
s0 = lltype.malloc(S, immortal=True)
self.consider_constant(s0)
s = self.malloc(S)
s.x = 1
s0.next = s
self.gc.write_barrier(llmemory.cast_ptr_to_adr(s0))
self.gc.collect(0)
assert s0.next.x == 1
class TestHybridGC(TestGenerationGC):
from rpython.memory.gc.hybrid import HybridGC as GCClass
GC_PARAMS = {'space_size': 48*WORD,
'min_nursery_size': 12*WORD,
'nursery_size': 12*WORD,
'large_object': 3*WORD,
'large_object_gcptrs': 3*WORD,
'generation3_collect_threshold': 5,
}
def test_collect_gen(self):
gc = self.gc
old_semispace_collect = gc.semispace_collect
old_collect_nursery = gc.collect_nursery
calls = []
def semispace_collect():
gen3 = gc.is_collecting_gen3()
calls.append(('semispace_collect', gen3))
return old_semispace_collect()
def collect_nursery():
calls.append('collect_nursery')
return old_collect_nursery()
gc.collect_nursery = collect_nursery
gc.semispace_collect = semispace_collect
gc.collect()
assert calls == [('semispace_collect', True)]
calls = []
gc.collect(0)
assert calls == ['collect_nursery']
calls = []
gc.collect(1)
assert calls == [('semispace_collect', False)]
calls = []
gc.collect(2)
assert calls == [('semispace_collect', True)]
calls = []
gc.collect(9)
assert calls == [('semispace_collect', True)]
calls = []
def test_identityhash(self):
py.test.skip("does not support raw_mallocs(sizeof(S)+sizeof(hash))")
class TestMiniMarkGCSimple(DirectGCTest):
from rpython.memory.gc.minimark import MiniMarkGC as GCClass
from rpython.memory.gc.minimarktest import SimpleArenaCollection
# test the GC itself, providing a simple class for ArenaCollection
GC_PARAMS = {'ArenaCollectionClass': SimpleArenaCollection}
def test_card_marker(self):
for arraylength in (range(4, 17)
+ [69] # 3 bytes
+ [300]): # 10 bytes
print 'array length:', arraylength
nums = {}
a = self.malloc(VAR, arraylength)
self.stackroots.append(a)
for i in range(50):
p = self.malloc(S)
p.x = -i
a = self.stackroots[-1]
index = (i*i) % arraylength
self.writearray(a, index, p)
nums[index] = p.x
#
for index, expected_x in nums.items():
assert a[index].x == expected_x
self.stackroots.pop()
test_card_marker.GC_PARAMS = {"card_page_indices": 4}
def test_writebarrier_before_copy(self):
largeobj_size = self.gc.nonlarge_max + 1
self.gc.next_major_collection_threshold = 99999.0
p_src = self.malloc(VAR, largeobj_size)
p_dst = self.malloc(VAR, largeobj_size)
# make them old
self.stackroots.append(p_src)
self.stackroots.append(p_dst)
self.gc.collect()
p_dst = self.stackroots.pop()
p_src = self.stackroots.pop()
#
addr_src = llmemory.cast_ptr_to_adr(p_src)
addr_dst = llmemory.cast_ptr_to_adr(p_dst)
hdr_src = self.gc.header(addr_src)
hdr_dst = self.gc.header(addr_dst)
#
assert hdr_src.tid & minimark.GCFLAG_TRACK_YOUNG_PTRS
assert hdr_dst.tid & minimark.GCFLAG_TRACK_YOUNG_PTRS
#
res = self.gc.writebarrier_before_copy(addr_src, addr_dst, 0, 0, 10)
assert res
assert hdr_dst.tid & minimark.GCFLAG_TRACK_YOUNG_PTRS
#
hdr_src.tid &= ~minimark.GCFLAG_TRACK_YOUNG_PTRS # pretend we have young ptrs
res = self.gc.writebarrier_before_copy(addr_src, addr_dst, 0, 0, 10)
assert res # we optimized it
assert hdr_dst.tid & minimark.GCFLAG_TRACK_YOUNG_PTRS == 0 # and we copied the flag
#
self.gc.card_page_indices = 128 # force > 0
hdr_src.tid |= minimark.GCFLAG_TRACK_YOUNG_PTRS
hdr_dst.tid |= minimark.GCFLAG_TRACK_YOUNG_PTRS
hdr_src.tid |= minimark.GCFLAG_HAS_CARDS
hdr_src.tid |= minimark.GCFLAG_CARDS_SET
# hdr_dst.tid does not have minimark.GCFLAG_HAS_CARDS
res = self.gc.writebarrier_before_copy(addr_src, addr_dst, 0, 0, 10)
assert not res # there might be young ptrs, let ll_arraycopy to find them
def test_writebarrier_before_copy_preserving_cards(self):
from rpython.rtyper.lltypesystem import llarena
tid = self.get_type_id(VAR)
largeobj_size = self.gc.nonlarge_max + 1
self.gc.next_major_collection_threshold = 99999.0
addr_src = self.gc.external_malloc(tid, largeobj_size, alloc_young=True)
addr_dst = self.gc.external_malloc(tid, largeobj_size, alloc_young=True)
hdr_src = self.gc.header(addr_src)
hdr_dst = self.gc.header(addr_dst)
#
assert hdr_src.tid & minimark.GCFLAG_HAS_CARDS
assert hdr_dst.tid & minimark.GCFLAG_HAS_CARDS
#
self.gc.write_barrier_from_array(addr_src, 0)
index_in_third_page = int(2.5 * self.gc.card_page_indices)
assert index_in_third_page < largeobj_size
self.gc.write_barrier_from_array(addr_src, index_in_third_page)
#
assert hdr_src.tid & minimark.GCFLAG_CARDS_SET
addr_byte = self.gc.get_card(addr_src, 0)
assert ord(addr_byte.char[0]) == 0x01 | 0x04 # bits 0 and 2
#
res = self.gc.writebarrier_before_copy(addr_src, addr_dst,
0, 0, 2*self.gc.card_page_indices)
assert res
#
assert hdr_dst.tid & minimark.GCFLAG_CARDS_SET
addr_byte = self.gc.get_card(addr_dst, 0)
assert ord(addr_byte.char[0]) == 0x01 | 0x04 # bits 0 and 2
test_writebarrier_before_copy_preserving_cards.GC_PARAMS = {
"card_page_indices": 4}
class TestMiniMarkGCFull(DirectGCTest):
from rpython.memory.gc.minimark import MiniMarkGC as GCClass
class TestIncrementalMiniMarkGCSimple(TestMiniMarkGCSimple):
from rpython.memory.gc.incminimark import IncrementalMiniMarkGC as GCClass
def test_write_barrier_marking_simple(self):
for i in range(2):
curobj = self.malloc(S)
curobj.x = i
self.stackroots.append(curobj)
oldobj = self.stackroots[-1]
oldhdr = self.gc.header(llmemory.cast_ptr_to_adr(oldobj))
assert oldhdr.tid & incminimark.GCFLAG_VISITED == 0
self.gc.debug_gc_step_until(incminimark.STATE_MARKING)
oldobj = self.stackroots[-1]
# object shifted by minor collect
oldhdr = self.gc.header(llmemory.cast_ptr_to_adr(oldobj))
assert oldhdr.tid & incminimark.GCFLAG_VISITED == 0
self.gc._minor_collection()
self.gc.visit_all_objects_step(1)
assert oldhdr.tid & incminimark.GCFLAG_VISITED
#at this point the first object should have been processed
newobj = self.malloc(S)
self.write(oldobj,'next',newobj)
assert self.gc.header(self.gc.old_objects_pointing_to_young.tolist()[0]) == oldhdr
self.gc._minor_collection()
self.gc.debug_check_consistency()
def test_sweeping_simple(self):
assert self.gc.gc_state == incminimark.STATE_SCANNING
for i in range(2):
curobj = self.malloc(S)
curobj.x = i
self.stackroots.append(curobj)
self.gc.debug_gc_step_until(incminimark.STATE_SWEEPING)
oldobj = self.stackroots[-1]
oldhdr = self.gc.header(llmemory.cast_ptr_to_adr(oldobj))
assert oldhdr.tid & incminimark.GCFLAG_VISITED
newobj1 = self.malloc(S)
newobj2 = self.malloc(S)
newobj1.x = 1337
newobj2.x = 1338
self.write(oldobj,'next',newobj1)
self.gc.debug_gc_step_until(incminimark.STATE_SCANNING)
#should not be cleared even though it was allocated while sweeping
newobj1 = oldobj.next
assert newobj1.x == 1337
def test_obj_on_escapes_on_stack(self):
obj0 = self.malloc(S)
self.stackroots.append(obj0)
obj0.next = self.malloc(S)
self.gc.debug_gc_step_until(incminimark.STATE_MARKING)
obj0 = self.stackroots[-1]
obj1 = obj0.next
obj1.x = 13
obj0.next = lltype.nullptr(S)
self.stackroots.append(obj1)
self.gc.debug_gc_step_until(incminimark.STATE_SCANNING)
assert self.stackroots[1].x == 13
class TestIncrementalMiniMarkGCFull(DirectGCTest):
from rpython.memory.gc.incminimark import IncrementalMiniMarkGC as GCClass
def test_malloc_fixedsize_no_cleanup(self):
p = self.malloc(S)
import pytest
#ensure the memory is uninitialized
with pytest.raises(lltype.UninitializedMemoryAccess):
x1 = p.x
#ensure all the ptr fields are zeroed
assert p.prev == lltype.nullptr(S)
assert p.next == lltype.nullptr(S)
def test_malloc_varsize_no_cleanup(self):
x = lltype.Signed
VAR1 = lltype.GcArray(x)
p = self.malloc(VAR1,5)
import pytest
with pytest.raises(lltype.UninitializedMemoryAccess):
assert isinstance(p[0], lltype._uninitialized)
x1 = p[0]
def test_malloc_varsize_no_cleanup2(self):
#as VAR is GcArray so the ptr will don't need to be zeroed
p = self.malloc(VAR, 100)
for i in range(100):
assert p[i] == lltype.nullptr(S)
def test_malloc_varsize_no_cleanup3(self):
VAR1 = lltype.Array(lltype.Ptr(S))
p1 = lltype.malloc(VAR1, 10, flavor='raw', track_allocation=False)
import pytest
with pytest.raises(lltype.UninitializedMemoryAccess):
for i in range(10):
assert p1[i] == lltype.nullptr(S)
p1[i]._free()
p1._free()
def test_malloc_struct_of_ptr_struct(self):
S3 = lltype.GcForwardReference()
S3.become(lltype.GcStruct('S3',
('gcptr_struct', S),
('prev', lltype.Ptr(S)),
('next', lltype.Ptr(S))))
s3 = self.malloc(S3)
assert s3.gcptr_struct.prev == lltype.nullptr(S)
assert s3.gcptr_struct.next == lltype.nullptr(S)
def test_malloc_array_of_ptr_struct(self):
ARR_OF_PTR_STRUCT = lltype.GcArray(lltype.Ptr(S))
arr_of_ptr_struct = self.malloc(ARR_OF_PTR_STRUCT,5)
for i in range(5):
assert arr_of_ptr_struct[i] == lltype.nullptr(S)
assert arr_of_ptr_struct[i] == lltype.nullptr(S)
arr_of_ptr_struct[i] = self.malloc(S)
assert arr_of_ptr_struct[i].prev == lltype.nullptr(S)
assert arr_of_ptr_struct[i].next == lltype.nullptr(S)
#fail for now
def xxx_test_malloc_array_of_ptr_arr(self):
ARR_OF_PTR_ARR = lltype.GcArray(lltype.Ptr(lltype.GcArray(lltype.Ptr(S))))
arr_of_ptr_arr = self.malloc(ARR_OF_PTR_ARR, 10)
self.stackroots.append(arr_of_ptr_arr)
for i in range(10):
assert arr_of_ptr_arr[i] == lltype.nullptr(lltype.GcArray(lltype.Ptr(S)))
for i in range(10):
self.writearray(arr_of_ptr_arr, i,
self.malloc(lltype.GcArray(lltype.Ptr(S)), i))
#self.stackroots.append(arr_of_ptr_arr[i])
#debug_print(arr_of_ptr_arr[i])
for elem in arr_of_ptr_arr[i]:
#self.stackroots.append(elem)
assert elem == lltype.nullptr(S)
elem = self.malloc(S)
assert elem.prev == lltype.nullptr(S)
assert elem.next == lltype.nullptr(S)
| |
from copy import deepcopy
from django.contrib import admin
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import NoReverseMatch
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from mezzanine.conf import settings
from mezzanine.core.admin import DisplayableAdmin, DisplayableAdminForm
from mezzanine.pages.models import Page, RichTextPage, Link
from mezzanine.utils.urls import admin_url
page_fieldsets = deepcopy(DisplayableAdmin.fieldsets)
page_fieldsets[0][1]["fields"] += ("in_menus", "login_required",)
class PageAdminForm(DisplayableAdminForm):
def clean_slug(self):
"""
If the slug has been changed, save the old one. We will use it later
in PageAdmin.model_save() to make the slug change propagate down the
page tree.
"""
if self.instance.slug != self.cleaned_data['slug']:
self.instance._old_slug = self.instance.slug
return self.cleaned_data['slug']
class PageAdmin(DisplayableAdmin):
"""
Admin class for the ``Page`` model and all subclasses of
``Page``. Handles redirections between admin interfaces for the
``Page`` model and its subclasses.
"""
form = PageAdminForm
fieldsets = page_fieldsets
change_list_template = "admin/pages/page/change_list.html"
def __init__(self, *args, **kwargs):
"""
For ``Page`` subclasses that are registered with an Admin class
that doesn't implement fieldsets, add any extra model fields
to this instance's fieldsets. This mimics Django's behaviour of
adding all model fields when no fieldsets are defined on the
Admin class.
"""
super(PageAdmin, self).__init__(*args, **kwargs)
# Test that the fieldsets don't differ from PageAdmin's.
if self.model is not Page and self.fieldsets == PageAdmin.fieldsets:
# Make a copy so that we aren't modifying other Admin
# classes' fieldsets.
self.fieldsets = deepcopy(self.fieldsets)
# Insert each field between the publishing fields and nav
# fields. Do so in reverse order to retain the order of
# the model's fields.
exclude_fields = Page._meta.get_all_field_names() + ["page_ptr"]
try:
exclude_fields.extend(self.exclude)
except (AttributeError, TypeError):
pass
try:
exclude_fields.extend(self.form.Meta.exclude)
except (AttributeError, TypeError):
pass
fields = self.model._meta.fields + self.model._meta.many_to_many
for field in reversed(fields):
if field.name not in exclude_fields and field.editable:
self.fieldsets[0][1]["fields"].insert(3, field.name)
def in_menu(self):
"""
Hide subclasses from the admin menu.
"""
return self.model is Page
def _check_permission(self, request, page, permission):
"""
Runs the custom permission check and raises an
exception if False.
"""
if not getattr(page, "can_" + permission)(request):
raise PermissionDenied
def add_view(self, request, **kwargs):
"""
For the ``Page`` model, redirect to the add view for the
first page model, based on the ``ADD_PAGE_ORDER`` setting.
"""
if self.model is Page:
return HttpResponseRedirect(self.get_content_models()[0].add_url)
return super(PageAdmin, self).add_view(request, **kwargs)
def change_view(self, request, object_id, **kwargs):
"""
For the ``Page`` model, check ``page.get_content_model()``
for a subclass and redirect to its admin change view.
Also enforce custom change permissions for the page instance.
"""
page = get_object_or_404(Page, pk=object_id)
content_model = page.get_content_model()
self._check_permission(request, content_model, "change")
if self.model is Page:
if content_model is not None:
change_url = admin_url(content_model.__class__, "change",
content_model.id)
return HttpResponseRedirect(change_url)
kwargs.setdefault("extra_context", {})
kwargs["extra_context"].update({
"hide_delete_link": not content_model.can_delete(request),
"hide_slug_field": content_model.overridden(),
})
return super(PageAdmin, self).change_view(request, object_id, **kwargs)
def delete_view(self, request, object_id, **kwargs):
"""
Enforce custom delete permissions for the page instance.
"""
page = get_object_or_404(Page, pk=object_id)
content_model = page.get_content_model()
self._check_permission(request, content_model, "delete")
return super(PageAdmin, self).delete_view(request, object_id, **kwargs)
def changelist_view(self, request, extra_context=None):
"""
Redirect to the ``Page`` changelist view for ``Page``
subclasses.
"""
if self.model is not Page:
return HttpResponseRedirect(admin_url(Page, "changelist"))
if not extra_context:
extra_context = {}
extra_context["page_models"] = self.get_content_models()
return super(PageAdmin, self).changelist_view(request, extra_context)
def save_model(self, request, obj, form, change):
"""
Set the ID of the parent page if passed in via querystring, and make
sure the new slug propagates to all descendant pages.
"""
if change and hasattr(obj, "_old_slug"):
# _old_slug was set in PageAdminForm.clean_slug().
new_slug = obj.slug
obj.slug = obj._old_slug
obj.set_slug(new_slug)
# Force parent to be saved to trigger handling of ordering and slugs.
parent = request.GET.get("parent")
if parent is not None and not change:
obj.parent_id = parent
obj.save()
super(PageAdmin, self).save_model(request, obj, form, change)
def _maintain_parent(self, request, response):
"""
Maintain the parent ID in the querystring for response_add and
response_change.
"""
location = response._headers.get("location")
parent = request.GET.get("parent")
if parent and location and "?" not in location[1]:
url = "%s?parent=%s" % (location[1], parent)
return HttpResponseRedirect(url)
return response
def response_add(self, request, obj):
"""
Enforce page permissions and maintain the parent ID in the
querystring.
"""
response = super(PageAdmin, self).response_add(request, obj)
return self._maintain_parent(request, response)
def response_change(self, request, obj):
"""
Enforce page permissions and maintain the parent ID in the
querystring.
"""
response = super(PageAdmin, self).response_change(request, obj)
return self._maintain_parent(request, response)
@classmethod
def get_content_models(cls):
"""
Return all Page subclasses that are admin registered, ordered
based on the ``ADD_PAGE_ORDER`` setting.
"""
models = []
for model in Page.get_content_models():
try:
admin_url(model, "add")
except NoReverseMatch:
continue
else:
setattr(model, "name", model._meta.verbose_name)
setattr(model, "add_url", admin_url(model, "add"))
models.append(model)
order = [name.lower() for name in settings.ADD_PAGE_ORDER]
def sort_key(page):
name = "%s.%s" % (page._meta.app_label, page._meta.object_name)
try:
order.index(name.lower())
except ValueError:
return page.name
return sorted(models, key=sort_key)
# Drop the meta data fields, and move slug towards the stop.
link_fieldsets = deepcopy(page_fieldsets[:1])
link_fieldsets[0][1]["fields"] = link_fieldsets[0][1]["fields"][:-1]
link_fieldsets[0][1]["fields"].insert(1, "slug")
class LinkAdmin(PageAdmin):
fieldsets = link_fieldsets
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Make slug mandatory.
"""
if db_field.name == "slug":
kwargs["required"] = True
return super(LinkAdmin, self).formfield_for_dbfield(db_field, **kwargs)
def save_form(self, request, form, change):
"""
Don't show links in the sitemap.
"""
obj = form.save(commit=False)
if not obj.id and "in_sitemap" not in form.fields:
obj.in_sitemap = False
return super(LinkAdmin, self).save_form(request, form, change)
admin.site.register(Page, PageAdmin)
admin.site.register(RichTextPage, PageAdmin)
admin.site.register(Link, LinkAdmin)
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import nova.conf
from nova.notifications.objects import base
from nova.notifications.objects import flavor as flavor_payload
from nova.notifications.objects import keypair as keypair_payload
from nova.objects import base as nova_base
from nova.objects import fields
CONF = nova.conf.CONF
@nova_base.NovaObjectRegistry.register_notification
class InstancePayload(base.NotificationPayloadBase):
SCHEMA = {
'uuid': ('instance', 'uuid'),
'user_id': ('instance', 'user_id'),
'tenant_id': ('instance', 'project_id'),
'reservation_id': ('instance', 'reservation_id'),
'display_name': ('instance', 'display_name'),
'display_description': ('instance', 'display_description'),
'host_name': ('instance', 'hostname'),
'host': ('instance', 'host'),
'node': ('instance', 'node'),
'os_type': ('instance', 'os_type'),
'architecture': ('instance', 'architecture'),
'availability_zone': ('instance', 'availability_zone'),
'image_uuid': ('instance', 'image_ref'),
'key_name': ('instance', 'key_name'),
'kernel_id': ('instance', 'kernel_id'),
'ramdisk_id': ('instance', 'ramdisk_id'),
'created_at': ('instance', 'created_at'),
'launched_at': ('instance', 'launched_at'),
'terminated_at': ('instance', 'terminated_at'),
'deleted_at': ('instance', 'deleted_at'),
'updated_at': ('instance', 'updated_at'),
'state': ('instance', 'vm_state'),
'power_state': ('instance', 'power_state'),
'task_state': ('instance', 'task_state'),
'progress': ('instance', 'progress'),
'metadata': ('instance', 'metadata'),
'locked': ('instance', 'locked'),
'auto_disk_config': ('instance', 'auto_disk_config')
}
# Version 1.0: Initial version
# Version 1.1: add locked and display_description field
# Version 1.2: Add auto_disk_config field
# Version 1.3: Add key_name field
# Version 1.4: Add BDM related data
# Version 1.5: Add updated_at field
# Version 1.6: Add request_id field
# Version 1.7: Added action_initiator_user and action_initiator_project to
# InstancePayload
VERSION = '1.7'
fields = {
'uuid': fields.UUIDField(),
'user_id': fields.StringField(nullable=True),
'tenant_id': fields.StringField(nullable=True),
'reservation_id': fields.StringField(nullable=True),
'display_name': fields.StringField(nullable=True),
'display_description': fields.StringField(nullable=True),
'host_name': fields.StringField(nullable=True),
'host': fields.StringField(nullable=True),
'node': fields.StringField(nullable=True),
'os_type': fields.StringField(nullable=True),
'architecture': fields.StringField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'flavor': fields.ObjectField('FlavorPayload'),
'image_uuid': fields.StringField(nullable=True),
'key_name': fields.StringField(nullable=True),
'kernel_id': fields.StringField(nullable=True),
'ramdisk_id': fields.StringField(nullable=True),
'created_at': fields.DateTimeField(nullable=True),
'launched_at': fields.DateTimeField(nullable=True),
'terminated_at': fields.DateTimeField(nullable=True),
'deleted_at': fields.DateTimeField(nullable=True),
'updated_at': fields.DateTimeField(nullable=True),
'state': fields.InstanceStateField(nullable=True),
'power_state': fields.InstancePowerStateField(nullable=True),
'task_state': fields.InstanceTaskStateField(nullable=True),
'progress': fields.IntegerField(nullable=True),
'ip_addresses': fields.ListOfObjectsField('IpPayload'),
'block_devices': fields.ListOfObjectsField('BlockDevicePayload',
nullable=True),
'metadata': fields.DictOfStringsField(),
'locked': fields.BooleanField(),
'auto_disk_config': fields.DiskConfigField(),
'request_id': fields.StringField(nullable=True),
'action_initiator_user': fields.StringField(nullable=True),
'action_initiator_project': fields.StringField(nullable=True),
}
def __init__(self, context, instance, bdms=None):
super(InstancePayload, self).__init__()
network_info = instance.get_network_info()
self.ip_addresses = IpPayload.from_network_info(network_info)
self.flavor = flavor_payload.FlavorPayload(flavor=instance.flavor)
if bdms is not None:
self.block_devices = BlockDevicePayload.from_bdms(bdms)
else:
self.block_devices = BlockDevicePayload.from_instance(instance)
# NOTE(Kevin_Zheng): Don't include request_id for periodic tasks,
# RequestContext for periodic tasks does not include project_id
# and user_id. Consider modify this once periodic tasks got a
# consistent request_id.
self.request_id = context.request_id if (context.project_id and
context.user_id) else None
self.action_initiator_user = context.user_id
self.action_initiator_project = context.project_id
self.populate_schema(instance=instance)
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionPayload(InstancePayload):
# No SCHEMA as all the additional fields are calculated
# Version 1.1: locked and display_description added to InstancePayload
# Version 1.2: Added auto_disk_config field to InstancePayload
# Version 1.3: Added key_name field to InstancePayload
# Version 1.4: Add BDM related data
# Version 1.5: Added updated_at field to InstancePayload
# Version 1.6: Added request_id field to InstancePayload
# Version 1.7: Added action_initiator_user and action_initiator_project to
# InstancePayload
VERSION = '1.7'
fields = {
'fault': fields.ObjectField('ExceptionPayload', nullable=True),
'request_id': fields.StringField(nullable=True),
}
def __init__(self, context, instance, fault, bdms=None):
super(InstanceActionPayload, self).__init__(context=context,
instance=instance,
bdms=bdms)
self.fault = fault
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionVolumePayload(InstanceActionPayload):
# Version 1.0: Initial version
# Version 1.1: Added key_name field to InstancePayload
# Version 1.2: Add BDM related data
# Version 1.3: Added updated_at field to InstancePayload
# Version 1.4: Added request_id field to InstancePayload
# Version 1.5: Added action_initiator_user and action_initiator_project to
# InstancePayload
VERSION = '1.5'
fields = {
'volume_id': fields.UUIDField()
}
def __init__(self, context, instance, fault, volume_id):
super(InstanceActionVolumePayload, self).__init__(
context=context,
instance=instance,
fault=fault)
self.volume_id = volume_id
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionVolumeSwapPayload(InstanceActionPayload):
# No SCHEMA as all the additional fields are calculated
# Version 1.1: locked and display_description added to InstancePayload
# Version 1.2: Added auto_disk_config field to InstancePayload
# Version 1.3: Added key_name field to InstancePayload
# Version 1.4: Add BDM related data
# Version 1.5: Added updated_at field to InstancePayload
# Version 1.6: Added request_id field to InstancePayload
# Version 1.7: Added action_initiator_user and action_initiator_project to
# InstancePayload
VERSION = '1.7'
fields = {
'old_volume_id': fields.UUIDField(),
'new_volume_id': fields.UUIDField(),
}
def __init__(self, context, instance, fault, old_volume_id, new_volume_id):
super(InstanceActionVolumeSwapPayload, self).__init__(
context=context,
instance=instance,
fault=fault)
self.old_volume_id = old_volume_id
self.new_volume_id = new_volume_id
@nova_base.NovaObjectRegistry.register_notification
class InstanceCreatePayload(InstanceActionPayload):
# No SCHEMA as all the additional fields are calculated
# Version 1.2: Initial version. It starts at 1.2 to match with the version
# of the InstanceActionPayload at the time when this specific
# payload is created as a child of it so that the
# instance.create notification using this new payload does not
# have decreasing version.
# 1.3: Add keypairs field
# 1.4: Add key_name field to InstancePayload
# 1.5: Add BDM related data to InstancePayload
# 1.6: Add tags field to InstanceCreatePayload
# 1.7: Added updated_at field to InstancePayload
# 1.8: Added request_id field to InstancePayload
# 1.9: Add trusted_image_certificates field to
# InstanceCreatePayload
# 1.10: Added action_initiator_user and action_initiator_project to
# InstancePayload
VERSION = '1.10'
fields = {
'keypairs': fields.ListOfObjectsField('KeypairPayload'),
'tags': fields.ListOfStringsField(),
'trusted_image_certificates': fields.ListOfStringsField(
nullable=True)
}
def __init__(self, context, instance, fault, bdms):
super(InstanceCreatePayload, self).__init__(
context=context,
instance=instance,
fault=fault,
bdms=bdms)
self.keypairs = [keypair_payload.KeypairPayload(keypair=keypair)
for keypair in instance.keypairs]
self.tags = [instance_tag.tag
for instance_tag in instance.tags]
self.trusted_image_certificates = None
if instance.trusted_certs:
self.trusted_image_certificates = instance.trusted_certs.ids
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionResizePrepPayload(InstanceActionPayload):
# No SCHEMA as all the additional fields are calculated
# Version 1.0: Initial version
# Version 1.1: Added request_id field to InstancePayload
# Version 1.2: Added action_initiator_user and action_initiator_project to
# InstancePayload
VERSION = '1.2'
fields = {
'new_flavor': fields.ObjectField('FlavorPayload', nullable=True)
}
def __init__(self, context, instance, fault, new_flavor):
super(InstanceActionResizePrepPayload, self).__init__(
context=context,
instance=instance,
fault=fault)
self.new_flavor = new_flavor
@nova_base.NovaObjectRegistry.register_notification
class InstanceUpdatePayload(InstancePayload):
# Version 1.0: Initial version
# Version 1.1: locked and display_description added to InstancePayload
# Version 1.2: Added tags field
# Version 1.3: Added auto_disk_config field to InstancePayload
# Version 1.4: Added key_name field to InstancePayload
# Version 1.5: Add BDM related data
# Version 1.6: Added updated_at field to InstancePayload
# Version 1.7: Added request_id field to InstancePayload
# Version 1.8: Added action_initiator_user and action_initiator_project to
# InstancePayload
VERSION = '1.8'
fields = {
'state_update': fields.ObjectField('InstanceStateUpdatePayload'),
'audit_period': fields.ObjectField('AuditPeriodPayload'),
'bandwidth': fields.ListOfObjectsField('BandwidthPayload'),
'old_display_name': fields.StringField(nullable=True),
'tags': fields.ListOfStringsField(),
}
def __init__(self, context, instance, state_update, audit_period,
bandwidth, old_display_name):
super(InstanceUpdatePayload, self).__init__(
context=context, instance=instance)
self.state_update = state_update
self.audit_period = audit_period
self.bandwidth = bandwidth
self.old_display_name = old_display_name
self.tags = [instance_tag.tag
for instance_tag in instance.tags.objects]
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionRescuePayload(InstanceActionPayload):
# Version 1.0: Initial version
# Version 1.1: Added request_id field to InstancePayload
# Version 1.2: Added action_initiator_user and action_initiator_project to
# InstancePayload
VERSION = '1.2'
fields = {
'rescue_image_ref': fields.UUIDField(nullable=True)
}
def __init__(self, context, instance, fault, rescue_image_ref):
super(InstanceActionRescuePayload, self).__init__(
context=context,
instance=instance,
fault=fault)
self.rescue_image_ref = rescue_image_ref
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionRebuildPayload(InstanceActionPayload):
# No SCHEMA as all the additional fields are calculated
# Version 1.7: Initial version. It starts at 1.7 to equal one more than
# the version of the InstanceActionPayload at the time
# when this specific payload is created so that the
# instance.rebuild.* notifications using this new payload
# signal the change of nova_object.name.
# Version 1.8: Added action_initiator_user and action_initiator_project to
# InstancePayload
VERSION = '1.8'
fields = {
'trusted_image_certificates': fields.ListOfStringsField(
nullable=True)
}
def __init__(self, context, instance, fault, bdms=None):
super(InstanceActionRebuildPayload, self).__init__(
context=context,
instance=instance,
fault=fault,
bdms=bdms)
self.trusted_image_certificates = None
if instance.trusted_certs:
self.trusted_image_certificates = instance.trusted_certs.ids
@nova_base.NovaObjectRegistry.register_notification
class IpPayload(base.NotificationPayloadBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'label': fields.StringField(),
'mac': fields.MACAddressField(),
'meta': fields.DictOfStringsField(),
'port_uuid': fields.UUIDField(nullable=True),
'version': fields.IntegerField(),
'address': fields.IPV4AndV6AddressField(),
'device_name': fields.StringField(nullable=True)
}
def __init__(self, label, mac, meta, port_uuid, version, address,
device_name):
super(IpPayload, self).__init__()
self.label = label
self.mac = mac
self.meta = meta
self.port_uuid = port_uuid
self.version = version
self.address = address
self.device_name = device_name
@classmethod
def from_network_info(cls, network_info):
"""Returns a list of IpPayload object based on the passed
network_info.
"""
ips = []
if network_info is not None:
for vif in network_info:
for ip in vif.fixed_ips():
ips.append(cls(
label=vif["network"]["label"],
mac=vif["address"],
meta=vif["meta"],
port_uuid=vif["id"],
version=ip["version"],
address=ip["address"],
device_name=vif["devname"]))
return ips
@nova_base.NovaObjectRegistry.register_notification
class BandwidthPayload(base.NotificationPayloadBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'network_name': fields.StringField(),
'in_bytes': fields.IntegerField(),
'out_bytes': fields.IntegerField(),
}
def __init__(self, network_name, in_bytes, out_bytes):
super(BandwidthPayload, self).__init__()
self.network_name = network_name
self.in_bytes = in_bytes
self.out_bytes = out_bytes
@nova_base.NovaObjectRegistry.register_notification
class AuditPeriodPayload(base.NotificationPayloadBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'audit_period_beginning': fields.DateTimeField(),
'audit_period_ending': fields.DateTimeField(),
}
def __init__(self, audit_period_beginning, audit_period_ending):
super(AuditPeriodPayload, self).__init__()
self.audit_period_beginning = audit_period_beginning
self.audit_period_ending = audit_period_ending
@nova_base.NovaObjectRegistry.register_notification
class BlockDevicePayload(base.NotificationPayloadBase):
# Version 1.0: Initial version
VERSION = '1.0'
SCHEMA = {
'device_name': ('bdm', 'device_name'),
'boot_index': ('bdm', 'boot_index'),
'delete_on_termination': ('bdm', 'delete_on_termination'),
'volume_id': ('bdm', 'volume_id'),
'tag': ('bdm', 'tag')
}
fields = {
'device_name': fields.StringField(nullable=True),
'boot_index': fields.IntegerField(nullable=True),
'delete_on_termination': fields.BooleanField(default=False),
'volume_id': fields.UUIDField(),
'tag': fields.StringField(nullable=True)
}
def __init__(self, bdm):
super(BlockDevicePayload, self).__init__()
self.populate_schema(bdm=bdm)
@classmethod
def from_instance(cls, instance):
"""Returns a list of BlockDevicePayload objects based on the passed
bdms.
"""
if not CONF.notifications.bdms_in_notifications:
return None
instance_bdms = instance.get_bdms()
if instance_bdms is not None:
return cls.from_bdms(instance_bdms)
else:
return []
@classmethod
def from_bdms(cls, bdms):
"""Returns a list of BlockDevicePayload objects based on the passed
BlockDeviceMappingList.
"""
payloads = []
for bdm in bdms:
if bdm.volume_id is not None:
payloads.append(cls(bdm))
return payloads
@nova_base.NovaObjectRegistry.register_notification
class InstanceStateUpdatePayload(base.NotificationPayloadBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'old_state': fields.StringField(nullable=True),
'state': fields.StringField(nullable=True),
'old_task_state': fields.StringField(nullable=True),
'new_task_state': fields.StringField(nullable=True),
}
def __init__(self, old_state, state, old_task_state, new_task_state):
super(InstanceStateUpdatePayload, self).__init__()
self.old_state = old_state
self.state = state
self.old_task_state = old_task_state
self.new_task_state = new_task_state
@base.notification_sample('instance-delete-start.json')
@base.notification_sample('instance-delete-end.json')
@base.notification_sample('instance-pause-start.json')
@base.notification_sample('instance-pause-end.json')
@base.notification_sample('instance-unpause-start.json')
@base.notification_sample('instance-unpause-end.json')
@base.notification_sample('instance-resize-start.json')
@base.notification_sample('instance-resize-end.json')
@base.notification_sample('instance-resize-error.json')
@base.notification_sample('instance-suspend-start.json')
@base.notification_sample('instance-suspend-end.json')
@base.notification_sample('instance-power_on-start.json')
@base.notification_sample('instance-power_on-end.json')
@base.notification_sample('instance-power_off-start.json')
@base.notification_sample('instance-power_off-end.json')
@base.notification_sample('instance-reboot-start.json')
@base.notification_sample('instance-reboot-end.json')
@base.notification_sample('instance-reboot-error.json')
@base.notification_sample('instance-shutdown-start.json')
@base.notification_sample('instance-shutdown-end.json')
@base.notification_sample('instance-interface_attach-start.json')
@base.notification_sample('instance-interface_attach-end.json')
@base.notification_sample('instance-interface_attach-error.json')
@base.notification_sample('instance-shelve-start.json')
@base.notification_sample('instance-shelve-end.json')
@base.notification_sample('instance-resume-start.json')
@base.notification_sample('instance-resume-end.json')
@base.notification_sample('instance-restore-start.json')
@base.notification_sample('instance-restore-end.json')
@base.notification_sample('instance-evacuate.json')
@base.notification_sample('instance-resize_finish-start.json')
@base.notification_sample('instance-resize_finish-end.json')
@base.notification_sample('instance-live_migration_pre-start.json')
@base.notification_sample('instance-live_migration_pre-end.json')
@base.notification_sample('instance-live_migration_abort-start.json')
@base.notification_sample('instance-live_migration_abort-end.json')
@base.notification_sample('instance-live_migration_post-start.json')
@base.notification_sample('instance-live_migration_post-end.json')
@base.notification_sample('instance-live_migration_post_dest-start.json')
@base.notification_sample('instance-live_migration_post_dest-end.json')
@base.notification_sample('instance-live_migration_rollback-start.json')
@base.notification_sample('instance-live_migration_rollback-end.json')
@base.notification_sample('instance-live_migration_rollback_dest-start.json')
@base.notification_sample('instance-live_migration_rollback_dest-end.json')
@base.notification_sample('instance-interface_detach-start.json')
@base.notification_sample('instance-interface_detach-end.json')
@base.notification_sample('instance-resize_confirm-start.json')
@base.notification_sample('instance-resize_confirm-end.json')
@base.notification_sample('instance-resize_revert-start.json')
@base.notification_sample('instance-resize_revert-end.json')
@base.notification_sample('instance-live_migration_force_complete-start.json')
@base.notification_sample('instance-live_migration_force_complete-end.json')
@base.notification_sample('instance-shelve_offload-start.json')
@base.notification_sample('instance-shelve_offload-end.json')
@base.notification_sample('instance-soft_delete-start.json')
@base.notification_sample('instance-soft_delete-end.json')
@base.notification_sample('instance-trigger_crash_dump-start.json')
@base.notification_sample('instance-trigger_crash_dump-end.json')
@base.notification_sample('instance-unrescue-start.json')
@base.notification_sample('instance-unrescue-end.json')
@base.notification_sample('instance-unshelve-start.json')
@base.notification_sample('instance-unshelve-end.json')
@base.notification_sample('instance-lock.json')
@base.notification_sample('instance-unlock.json')
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionNotification(base.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('InstanceActionPayload')
}
@base.notification_sample('instance-update.json')
@nova_base.NovaObjectRegistry.register_notification
class InstanceUpdateNotification(base.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('InstanceUpdatePayload')
}
@base.notification_sample('instance-volume_swap-start.json')
@base.notification_sample('instance-volume_swap-end.json')
@base.notification_sample('instance-volume_swap-error.json')
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionVolumeSwapNotification(base.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('InstanceActionVolumeSwapPayload')
}
@base.notification_sample('instance-volume_attach-start.json')
@base.notification_sample('instance-volume_attach-end.json')
@base.notification_sample('instance-volume_attach-error.json')
@base.notification_sample('instance-volume_detach-start.json')
@base.notification_sample('instance-volume_detach-end.json')
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionVolumeNotification(base.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('InstanceActionVolumePayload')
}
@base.notification_sample('instance-create-start.json')
@base.notification_sample('instance-create-end.json')
@base.notification_sample('instance-create-error.json')
@nova_base.NovaObjectRegistry.register_notification
class InstanceCreateNotification(base.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('InstanceCreatePayload')
}
@base.notification_sample('instance-resize_prep-start.json')
@base.notification_sample('instance-resize_prep-end.json')
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionResizePrepNotification(base.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('InstanceActionResizePrepPayload')
}
@base.notification_sample('instance-snapshot-start.json')
@base.notification_sample('instance-snapshot-end.json')
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionSnapshotNotification(base.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('InstanceActionSnapshotPayload')
}
@base.notification_sample('instance-rescue-start.json')
@base.notification_sample('instance-rescue-end.json')
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionRescueNotification(base.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('InstanceActionRescuePayload')
}
@base.notification_sample('instance-rebuild_scheduled.json')
@base.notification_sample('instance-rebuild-start.json')
@base.notification_sample('instance-rebuild-end.json')
@base.notification_sample('instance-rebuild-error.json')
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionRebuildNotification(base.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('InstanceActionRebuildPayload')
}
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionSnapshotPayload(InstanceActionPayload):
# Version 1.6: Initial version. It starts at version 1.6 as
# instance.snapshot.start and .end notifications are switched
# from using InstanceActionPayload 1.5 to this new payload and
# also it added a new field so we wanted to keep the version
# number increasing to signal the change.
# Version 1.7: Added request_id field to InstancePayload
# Version 1.8: Added action_initiator_user and action_initiator_project to
# InstancePayload
VERSION = '1.8'
fields = {
'snapshot_image_id': fields.UUIDField(),
}
def __init__(self, context, instance, fault, snapshot_image_id):
super(InstanceActionSnapshotPayload, self).__init__(
context=context,
instance=instance,
fault=fault)
self.snapshot_image_id = snapshot_image_id
@nova_base.NovaObjectRegistry.register_notification
class InstanceExistsPayload(InstancePayload):
# Version 1.0: Initial version
# Version 1.1: Added action_initiator_user and action_initiator_project to
# InstancePayload
VERSION = '1.1'
fields = {
'audit_period': fields.ObjectField('AuditPeriodPayload'),
'bandwidth': fields.ListOfObjectsField('BandwidthPayload'),
}
def __init__(self, context, instance, audit_period, bandwidth):
super(InstanceExistsPayload, self).__init__(context=context,
instance=instance)
self.audit_period = audit_period
self.bandwidth = bandwidth
@base.notification_sample('instance-exists.json')
@nova_base.NovaObjectRegistry.register_notification
class InstanceExistsNotification(base.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('InstanceExistsPayload')
}
| |
import sys
from time import time
import pandas as pd
from pandas import DataFrame
import numpy as np
import matplotlib.pyplot as plt
import itertools
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
set = sys.argv[1]; #Set
numSpeakers = sys.argv[2]; #Number of Speakers
blockLength = sys.argv[3]; #Block length
hopLength = sys.argv[4]; #Hop length
fileID = sys.argv[5];
#fileIDMin = sys.argv[5]; #fileID - lower limit
#fileIDMax = sys.argv[6]; #fileID - higher limit
#########################################################################
class ClusteringParams(object):
def __init__(self, time_to_process, estimator_intertia, homogeneity_score, completeness_score, v_measure_score, adjusted_rand_score,adjusted_mutual_info_score, silhouette_score):
self.time_to_process = time_to_process
self.estimator_intertia = estimator_intertia
self.homogeneity_score = homogeneity_score
self.completeness_score = completeness_score
self.v_measure_score = v_measure_score
self.adjusted_rand_score = adjusted_rand_score
self.adjusted_mutual_info_score = adjusted_mutual_info_score
self.silhouette_score = silhouette_score
class Params(object):
time_to_process_avg = 0
estimator_intertia_avg = 0
homogeneity_score_avg = 0
completeness_score_avg = 0
v_measure_score_avg = 0
adjusted_rand_score_avg = 0
adjusted_mutual_info_score_avg = 0
silhouette_score_avg = 0
time_to_process_sum = 0
estimator_intertia_sum = 0
homogeneity_score_sum = 0
completeness_score_sum = 0
v_measure_score_sum = 0
adjusted_rand_score_sum = 0
adjusted_mutual_info_score_sum = 0
silhouette_score_sum = 0
time_to_process_min = 100
estimator_intertia_min = 100
homogeneity_score_min = 100
completeness_score_min = 100
v_measure_score_min = 100
adjusted_rand_score_min = 100
adjusted_mutual_info_score_min = 100
silhouette_score_min = 100
time_to_process_max = 0
estimator_intertia_max = 0
homogeneity_score_max = 0
completeness_score_max = 0
v_measure_score_max = 0
adjusted_rand_score_max = 0
adjusted_mutual_info_score_max = 0
silhouette_score_max = 0
time_to_process_max_ind = 0
estimator_intertia_max_ind = 0
homogeneity_score_max_ind = 0
completeness_score_max_ind = 0
v_measure_score_max_ind = 0
adjusted_rand_score_max_ind = 0
adjusted_mutual_info_score_max_ind = 0
silhouette_score_max_ind = 0
time_to_process_min_ind = 100
estimator_intertia_min_ind = 100
homogeneity_score_min_ind = 100
completeness_score_min_ind = 100
v_measure_score_min_ind = 100
adjusted_rand_score_min_ind = 100
adjusted_mutual_info_score_min_ind = 100
silhouette_score_min_ind = 100
def __init__(self,numReps):
self.numReps = numReps
self.result = [ ClusteringParams(0,0,0,0,0,0,0,0) for i in range(numReps)]
def addData(self, index, cluster_params):
self.result[index].time_to_process = cluster_params.time_to_process;
self.result[index].estimator_intertia = cluster_params.estimator_intertia;
self.result[index].homogeneity_score = cluster_params.homogeneity_score;
self.result[index].completeness_score = cluster_params.completeness_score;
self.result[index].v_measure_score = cluster_params.v_measure_score;
self.result[index].adjusted_rand_score = cluster_params.adjusted_rand_score;
self.result[index].adjusted_mutual_info_score = cluster_params.adjusted_mutual_info_score;
self.result[index].silhouette_score = cluster_params.silhouette_score;
self.time_to_process_sum = self.time_to_process_sum + cluster_params.time_to_process
self.estimator_intertia_sum = self.estimator_intertia_sum + cluster_params.estimator_intertia
self.homogeneity_score_sum = self.homogeneity_score_sum + cluster_params.homogeneity_score
self.completeness_score_sum = self.completeness_score_sum + cluster_params.completeness_score
self.v_measure_score_sum = self.v_measure_score_sum + cluster_params.v_measure_score
self.adjusted_rand_score_sum = self.adjusted_rand_score_sum + cluster_params.adjusted_rand_score
self.adjusted_mutual_info_score_sum = self.adjusted_mutual_info_score_sum + cluster_params.adjusted_mutual_info_score
self.silhouette_score_sum = self.silhouette_score_sum + cluster_params.silhouette_score
if (cluster_params.time_to_process > self.time_to_process_max):
self.time_to_process_max = cluster_params.time_to_process
self.time_to_process_max_ind = index
if (cluster_params.time_to_process < self.time_to_process_min):
self.time_to_process_min = cluster_params.time_to_process
self.time_to_process_min_ind = index
if (cluster_params.estimator_intertia > self.estimator_intertia_max):
self.estimator_intertia_max = cluster_params.estimator_intertia
self.estimator_intertia_max_ind = index
if (cluster_params.estimator_intertia < self.estimator_intertia_min):
self.estimator_intertia_min = cluster_params.estimator_intertia
self.estimator_intertia_min_ind = index
if (cluster_params.homogeneity_score > self.homogeneity_score_max):
self.homogeneity_score_max = cluster_params.homogeneity_score
self.homogeneity_score_max_ind = index
if (cluster_params.homogeneity_score < self.homogeneity_score_min):
self.homogeneity_score_min = cluster_params.homogeneity_score
self.homogeneity_score_min_ind = index
if (cluster_params.completeness_score > self.completeness_score_max):
self.completeness_score_max = cluster_params.completeness_score
self.completeness_score_max_ind = index
if (cluster_params.completeness_score < self.completeness_score_min):
self.completeness_score_min = cluster_params.completeness_score
self.completeness_score_min_ind = index
if (cluster_params.v_measure_score > self.v_measure_score_max):
self.v_measure_score_max = cluster_params.v_measure_score
self.v_measure_score_max_ind = index
if (cluster_params.v_measure_score < self.v_measure_score_min):
self.v_measure_score_min = cluster_params.v_measure_score
self.v_measure_score_min_ind = index
if (cluster_params.adjusted_rand_score > self.adjusted_rand_score_max):
self.adjusted_rand_score_max = cluster_params.adjusted_rand_score
self.adjusted_rand_score_max_ind = index
if (cluster_params.adjusted_rand_score < self.adjusted_rand_score_min):
self.adjusted_rand_score_min = cluster_params.adjusted_rand_score
self.adjusted_rand_score_min_ind = index
if (cluster_params.adjusted_mutual_info_score > self.adjusted_mutual_info_score_max):
self.adjusted_mutual_info_score_max = cluster_params.adjusted_mutual_info_score
self.adjusted_mutual_info_score_max_ind = index
if (cluster_params.adjusted_mutual_info_score < self.adjusted_mutual_info_score_min):
self.adjusted_mutual_info_score_min = cluster_params.adjusted_mutual_info_score
self.adjusted_mutual_info_score_min_ind = index
if (cluster_params.silhouette_score > self.silhouette_score_max):
self.silhouette_score_max = cluster_params.silhouette_score
self.silhouette_score_max_ind = index
if (cluster_params.silhouette_score < self.silhouette_score_min):
self.silhouette_score_min = cluster_params.silhouette_score
self.silhouette_score_min_ind = index
def calculateAverages():
self.time_to_process_avg = self.time_to_process_sum/float(numReps)
self.estimator_intertia_avg = self.estimator_intertia_sum/float(numReps)
self.homogeneity_score_avg = self.homogeneity_score_sum/float(numReps)
self.completeness_score_avg = self.completeness_score_sum/float(numReps)
self.v_measure_score_avg = self.v_measure_score_sum/float(numReps)
self.adjusted_rand_score_avg = self.adjusted_rand_score_sum/float(numReps)
self.adjusted_mutual_info_score_avg = self.adjusted_mutual_info_score_sum/float(numReps)
self.silhouette_score_avg = self.silhouette_score_sum/float(numReps)
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
homogeneity_score = metrics.homogeneity_score(labels,estimator.labels_)
completeness_score = metrics.completeness_score(labels, estimator.labels_)
v_measure_score = metrics.v_measure_score(labels, estimator.labels_)
adjusted_rand_score = metrics.adjusted_rand_score(labels, estimator.labels_)
adjusted_mutual_info_score = metrics.adjusted_mutual_info_score(labels, estimator.labels_)
silhouette_score = metrics.silhouette_score(features, estimator.labels_,
metric='euclidean',
sample_size=sample_size)
return ClusteringParams((time()-t0), estimator.inertia_, homogeneity_score, completeness_score, v_measure_score, adjusted_rand_score, adjusted_mutual_info_score, silhouette_score)
# print('Name: % 9s \n'
# 'Time: %.2fs \n'
# 'Estimator Inertia: %i \n'
# 'Homogeneity Score: %.3f \n'
# 'Completeness Score: %.3f \n'
# 'V Measure score: %.3f \n'
# 'Adjusted rand score: %.3f \n'
# 'Adjusted Mutual Info score: %.3f \n'
# 'Silhouette Score: %.3f'
# % (name, (time()-t0),estimator.inertia_,
# homogeneity_score,
# completeness_score,
# v_measure_score,
# adjusted_rand_score,
# adjusted_mutual_info_score,
# silhouette_score))
def visualize_kmeans(feature_vector):
#Visualize data
reduced_data = PCA(n_components=2).fit_transform(feature_vector)
kmeans = KMeans(init='k-means++',n_clusters=n_speakers,n_init=10)
kmeans.fit(reduced_data)
#step size of mesh
h = .02
#Plot the decision boundary
x_min, x_max = reduced_data[:,0].min() - 1, reduced_data[:,0].max() + 1
y_min, y_max = reduced_data[:,1].min() - 1, reduced_data[:,1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
#Obtain labels for each point in mesh
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
#Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
#Colour Cycler
colorcycler = itertools.cycle(['r', 'g', 'b', 'y','b','w','c','m'])
for speaker in speaker_ids:
speaker_labels = np.argwhere(labels==speaker)
plt.scatter(reduced_data[speaker_labels,0],
reduced_data[speaker_labels,1],
color=next(colorcycler))
#plt.plot(reduced_data[:,0], reduced_data[:,1], 'k.',markersize=2)
#plt.plot(reduced_data[:,0],reduced_data[:,1],'g^', reduced_data[:,0])
#plot the centroids as white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:,0],centroids[:,1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the speakers (PCA-reduced data)')
plt.xlim(x_min,x_max)
plt.ylim(y_min,y_max)
plt.xticks(())
plt.yticks(())
plt.show()
#########################################################################
path = "/Users/avrosh/Documents/Coursework/7100_Spring_16/Dataset/dataset/"+set+"/features/set"+set+"_"+hopLength+"_"+blockLength+"_S"+numSpeakers+"_"+fileID+".csv"
f = open(path)
headers = f.readline().strip().split(",")
data = np.loadtxt(fname = f, delimiter=',')
labels = data[:,0]
#print labels
#features = scale(data[:,1:])
features = data[:,1:]
#print features
#
n_samples, n_features = features.shape
n_speakers = len(np.unique(labels))
speaker_ids = np.unique(labels)
print speaker_ids
print ("n_speakers %d \nn_samples %d \nn_features %d" % (n_speakers,n_samples,n_features))
paramsKMeans = Params(n_features)
paramsKMeansRandInit = Params(n_features)
#paramsKMeansPCA = Params(n_features)
for x in xrange(0,n_features):
#for x in xrange(0,1):
feature_vector = features[:,x].reshape(-1, 1)
#repeat if you want to visualize the data
# feature_vector = np.repeat(feature_vector,2,axis=1)
# print feature_vector.shape
# print x
sample_size = 300
# print feature_vector
#KMeans
paramsKMeans.addData(x,bench_k_means(KMeans(init='k-means++', n_clusters=n_speakers, n_init=10),
name='k-means++',
data=feature_vector))
#KMeans with random initialization
paramsKMeansRandInit.addData(x,bench_k_means(KMeans(init='random', n_clusters=n_speakers, n_init=10),
name='Random',
data=feature_vector))
# #KMeans PCA
# #in this case the seeding of the centers in deterministic, hence we run the algorithm only once
# pca = PCA(n_components=n_speakers).fit(feature_vector)
# paramsKMeansPCA[x] = bench_k_means(KMeans(init=pca.components_, n_clusters=n_speakers, n_init=1),
# name='PCA-based',
# data=feature_vector)
# print feature_vector.shape
# visualize_kmeans(feature_vector)
print(79 * '_')
print "And the best feature is: "
print headers[paramsKMeans.completeness_score_max_ind+1]
#
#for param in paramsKMeans.result:
# print param.completeness_score
#
#for param in paramsKMeans.result:
# print param.adjusted_rand_score
paramsKMeans.calculateAverages;
print('Name: % 9s \n'
'Time: %.2fs %s \n'
'Estimator Inertia: %i %s \n'
'Homogeneity Score: %.3f %s \n'
'Completeness Score: %.3f %s \n'
'V Measure score: %.3f %s \n'
'Adjusted rand score: %.3f %s \n'
'Adjusted Mutual Info score: %.3f %s \n'
'Silhouette Score: %.3f %s'
% ("Kmeans", paramsKMeans.time_to_process_min,headers[paramsKMeans.time_to_process_min_ind + 1],
paramsKMeans.estimator_intertia_max,headers[paramsKMeans.estimator_intertia_max_ind + 1],
paramsKMeans.homogeneity_score_max,headers[paramsKMeans.homogeneity_score_max_ind + 1],
paramsKMeans.completeness_score_max,headers[paramsKMeans.completeness_score_max_ind + 1],
paramsKMeans.v_measure_score_max,headers[paramsKMeans.v_measure_score_max_ind + 1],
paramsKMeans.adjusted_rand_score_max,headers[paramsKMeans.adjusted_rand_score_max_ind + 1],
paramsKMeans.adjusted_mutual_info_score_max,headers[paramsKMeans.adjusted_mutual_info_score_max_ind + 1],
paramsKMeans.silhouette_score_max,headers[paramsKMeans.silhouette_score_max_ind + 1]))
| |
import pickle
from django import forms
from django.core.exceptions import ValidationError
from django.db import models
from django.test import SimpleTestCase, TestCase
from django.utils.functional import lazy
from .models import (
Bar, Choiceful, Foo, RenamedField, VerboseNameField, Whiz, WhizDelayed,
WhizIter, WhizIterEmpty,
)
class Nested:
class Field(models.Field):
pass
class BasicFieldTests(SimpleTestCase):
def test_show_hidden_initial(self):
"""
Fields with choices respect show_hidden_initial as a kwarg to
formfield().
"""
choices = [(0, 0), (1, 1)]
model_field = models.Field(choices=choices)
form_field = model_field.formfield(show_hidden_initial=True)
self.assertTrue(form_field.show_hidden_initial)
form_field = model_field.formfield(show_hidden_initial=False)
self.assertFalse(form_field.show_hidden_initial)
def test_field_repr(self):
"""
__repr__() of a field displays its name.
"""
f = Foo._meta.get_field('a')
self.assertEqual(repr(f), '<django.db.models.fields.CharField: a>')
f = models.fields.CharField()
self.assertEqual(repr(f), '<django.db.models.fields.CharField>')
def test_field_repr_nested(self):
"""__repr__() uses __qualname__ for nested class support."""
self.assertEqual(repr(Nested.Field()), '<model_fields.tests.Nested.Field>')
def test_field_name(self):
"""
A defined field name (name="fieldname") is used instead of the model
model's attribute name (modelname).
"""
instance = RenamedField()
self.assertTrue(hasattr(instance, 'get_fieldname_display'))
self.assertFalse(hasattr(instance, 'get_modelname_display'))
def test_field_verbose_name(self):
m = VerboseNameField
for i in range(1, 23):
self.assertEqual(m._meta.get_field('field%d' % i).verbose_name, 'verbose field%d' % i)
self.assertEqual(m._meta.get_field('id').verbose_name, 'verbose pk')
def test_choices_form_class(self):
"""Can supply a custom choices form class to Field.formfield()"""
choices = [('a', 'a')]
field = models.CharField(choices=choices)
klass = forms.TypedMultipleChoiceField
self.assertIsInstance(field.formfield(choices_form_class=klass), klass)
def test_formfield_disabled(self):
"""Field.formfield() sets disabled for fields with choices."""
field = models.CharField(choices=[('a', 'b')])
form_field = field.formfield(disabled=True)
self.assertIs(form_field.disabled, True)
def test_field_str(self):
f = models.Field()
self.assertEqual(str(f), '<django.db.models.fields.Field>')
f = Foo._meta.get_field('a')
self.assertEqual(str(f), 'model_fields.Foo.a')
def test_field_ordering(self):
"""Fields are ordered based on their creation."""
f1 = models.Field()
f2 = models.Field(auto_created=True)
f3 = models.Field()
self.assertLess(f2, f1)
self.assertGreater(f3, f1)
self.assertIsNotNone(f1)
self.assertNotIn(f2, (None, 1, ''))
def test_field_instance_is_picklable(self):
"""Field instances can be pickled."""
field = models.Field(max_length=100, default='a string')
# Must be picklable with this cached property populated (#28188).
field._get_default
pickle.dumps(field)
def test_deconstruct_nested_field(self):
"""deconstruct() uses __qualname__ for nested class support."""
name, path, args, kwargs = Nested.Field().deconstruct()
self.assertEqual(path, 'model_fields.tests.Nested.Field')
class ChoicesTests(SimpleTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.no_choices = Choiceful._meta.get_field('no_choices')
cls.empty_choices = Choiceful._meta.get_field('empty_choices')
cls.empty_choices_bool = Choiceful._meta.get_field('empty_choices_bool')
cls.empty_choices_text = Choiceful._meta.get_field('empty_choices_text')
cls.with_choices = Choiceful._meta.get_field('with_choices')
def test_choices(self):
self.assertIsNone(self.no_choices.choices)
self.assertEqual(self.empty_choices.choices, ())
self.assertEqual(self.with_choices.choices, [(1, 'A')])
def test_flatchoices(self):
self.assertEqual(self.no_choices.flatchoices, [])
self.assertEqual(self.empty_choices.flatchoices, [])
self.assertEqual(self.with_choices.flatchoices, [(1, 'A')])
def test_check(self):
self.assertEqual(Choiceful.check(), [])
def test_invalid_choice(self):
model_instance = None # Actual model instance not needed.
self.no_choices.validate(0, model_instance)
msg = "['Value 99 is not a valid choice.']"
with self.assertRaisesMessage(ValidationError, msg):
self.empty_choices.validate(99, model_instance)
with self.assertRaisesMessage(ValidationError, msg):
self.with_choices.validate(99, model_instance)
def test_formfield(self):
no_choices_formfield = self.no_choices.formfield()
self.assertIsInstance(no_choices_formfield, forms.IntegerField)
fields = (
self.empty_choices, self.with_choices, self.empty_choices_bool,
self.empty_choices_text,
)
for field in fields:
with self.subTest(field=field):
self.assertIsInstance(field.formfield(), forms.ChoiceField)
class GetFieldDisplayTests(SimpleTestCase):
def test_choices_and_field_display(self):
"""
get_choices() interacts with get_FIELD_display() to return the expected
values.
"""
self.assertEqual(Whiz(c=1).get_c_display(), 'First') # A nested value
self.assertEqual(Whiz(c=0).get_c_display(), 'Other') # A top level value
self.assertEqual(Whiz(c=9).get_c_display(), 9) # Invalid value
self.assertIsNone(Whiz(c=None).get_c_display()) # Blank value
self.assertEqual(Whiz(c='').get_c_display(), '') # Empty value
self.assertEqual(WhizDelayed(c=0).get_c_display(), 'Other') # Delayed choices
def test_get_FIELD_display_translated(self):
"""A translated display value is coerced to str."""
val = Whiz(c=5).get_c_display()
self.assertIsInstance(val, str)
self.assertEqual(val, 'translated')
def test_iterator_choices(self):
"""
get_choices() works with Iterators.
"""
self.assertEqual(WhizIter(c=1).c, 1) # A nested value
self.assertEqual(WhizIter(c=9).c, 9) # Invalid value
self.assertIsNone(WhizIter(c=None).c) # Blank value
self.assertEqual(WhizIter(c='').c, '') # Empty value
def test_empty_iterator_choices(self):
"""
get_choices() works with empty iterators.
"""
self.assertEqual(WhizIterEmpty(c="a").c, "a") # A nested value
self.assertEqual(WhizIterEmpty(c="b").c, "b") # Invalid value
self.assertIsNone(WhizIterEmpty(c=None).c) # Blank value
self.assertEqual(WhizIterEmpty(c='').c, '') # Empty value
class GetChoicesTests(SimpleTestCase):
def test_empty_choices(self):
choices = []
f = models.CharField(choices=choices)
self.assertEqual(f.get_choices(include_blank=False), choices)
def test_blank_in_choices(self):
choices = [('', '<><>'), ('a', 'A')]
f = models.CharField(choices=choices)
self.assertEqual(f.get_choices(include_blank=True), choices)
def test_blank_in_grouped_choices(self):
choices = [
('f', 'Foo'),
('b', 'Bar'),
('Group', (
('', 'No Preference'),
('fg', 'Foo'),
('bg', 'Bar'),
)),
]
f = models.CharField(choices=choices)
self.assertEqual(f.get_choices(include_blank=True), choices)
def test_lazy_strings_not_evaluated(self):
lazy_func = lazy(lambda x: 0 / 0, int) # raises ZeroDivisionError if evaluated.
f = models.CharField(choices=[(lazy_func('group'), (('a', 'A'), ('b', 'B')))])
self.assertEqual(f.get_choices(include_blank=True)[0], ('', '---------'))
class GetChoicesOrderingTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.foo1 = Foo.objects.create(a='a', d='12.34')
cls.foo2 = Foo.objects.create(a='b', d='12.34')
cls.bar1 = Bar.objects.create(a=cls.foo1, b='a')
cls.bar2 = Bar.objects.create(a=cls.foo2, b='a')
cls.field = Bar._meta.get_field('a')
def assertChoicesEqual(self, choices, objs):
self.assertEqual(choices, [(obj.pk, str(obj)) for obj in objs])
def test_get_choices(self):
self.assertChoicesEqual(
self.field.get_choices(include_blank=False, ordering=('a',)),
[self.foo1, self.foo2]
)
self.assertChoicesEqual(
self.field.get_choices(include_blank=False, ordering=('-a',)),
[self.foo2, self.foo1]
)
def test_get_choices_reverse_related_field(self):
self.assertChoicesEqual(
self.field.remote_field.get_choices(include_blank=False, ordering=('a',)),
[self.bar1, self.bar2]
)
self.assertChoicesEqual(
self.field.remote_field.get_choices(include_blank=False, ordering=('-a',)),
[self.bar2, self.bar1]
)
| |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for forward-mode automatic differentiation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.python import pywrap_tfe
from tensorflow.python.eager import backprop
from tensorflow.python.eager import backprop_util
from tensorflow.python.eager import def_function
from tensorflow.python.eager import execute
from tensorflow.python.eager import forwardprop_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.unconnected_gradients import UnconnectedGradients
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
# Dictionary mapping from op names to special-cased jvp functions. Otherwise
# backward functions are transposed on the tape.
_SPECIAL_CASES = {}
def _identity_jvp(attr_tuple, inputs, outputs, tangents):
# Special-cased mostly for resource handles, where creating ones Tensors from
# handle data for transposing the backward function on the tape is error-prone
# (even if we get good handle data, partially defined shapes are an issue).
del attr_tuple, inputs, outputs
return [array_ops.identity(t) for t in tangents]
_SPECIAL_CASES["Identity"] = _identity_jvp
def _read_variable_jvp(attr_tuple, inputs, outputs, tangents):
# Like for Identity, this special case means we don't need to create
# variable-shaped Tensors from resource handles.
del attr_tuple, inputs, outputs
return [array_ops.identity(t) for t in tangents]
_SPECIAL_CASES["ReadVariableOp"] = _read_variable_jvp
_TRACE_COUNT_CONSISTENCY_LOCK = threading.Lock()
# Map from op names to number of traces of _jvp_helper. Used to cap the number
# of traces due to shape differences while still specializing where possible.
_TRACE_COUNT = {}
def _jvp_helper(op_name, attr_tuple, inputs, outputs, tangents):
"""Computes a Jacobian-vector product for an op.
Note that this function would be wasteful if executed eagerly. It runs the
backward gradient function and throws away the result just to record its
operations on a GradientTape. These unused ops are pruned away when this
function is traced.
Args:
op_name: A string, the type of operation being executed.
attr_tuple: Attributes of the operation.
inputs: A flat list of input Tensors to the operation.
outputs: A flat list of output Tensors from the operation.
tangents: A flat list of Tensors, same shape as `inputs`.
Returns:
A flat list of tangents corresponding to `outputs`.
"""
with _TRACE_COUNT_CONSISTENCY_LOCK:
# Just make sure writes don't clobber each other's increments; reads in
# _jvp_dispatch do not lock.
_TRACE_COUNT[op_name] = _TRACE_COUNT.get(op_name, 0) + 1
special_case = _SPECIAL_CASES.get(op_name, None)
if special_case is not None:
return special_case(attr_tuple, inputs, outputs, tangents)
if not outputs:
# tape.gradients([], inputs) doesn't make much sense
return []
# Generally inner GradientTapes won't function while outer accumulators are
# recording. We temporarily reset forwardprop state to allow GradientTapes to
# function here.
with forwardprop_util.push_forwardprop_state():
trainable_inputs = []
trainable_indices = []
nontrivial_tangents = []
for input_index, tensor in enumerate(inputs):
if backprop_util.IsTrainable(tensor):
trainable_inputs.append(tensor)
trainable_indices.append(input_index)
nontrivial_tangents.append(tangents[input_index])
with backprop.GradientTape() as transpose_tape:
with backprop.GradientTape() as backfunc_tape:
backfunc_tape.watch(trainable_inputs)
execute.record_gradient(op_name, inputs, attr_tuple, outputs)
forwardprop_aids = []
trainable_outputs = []
nontrivial_output_indices = []
for output_index, output in enumerate(outputs):
if backprop_util.IsTrainable(output):
forwardprop_aids.append(
array_ops.ones_like(output, name="unused_forwardprop_aid"))
trainable_outputs.append(output)
nontrivial_output_indices.append(output_index)
transpose_tape.watch(forwardprop_aids)
grads = backfunc_tape.gradient(
trainable_outputs,
trainable_inputs,
forwardprop_aids,
unconnected_gradients=UnconnectedGradients.ZERO)
nontrivial_output_tangents = transpose_tape.gradient(
grads, forwardprop_aids, output_gradients=nontrivial_tangents)
output_tangents = [None] * len(outputs)
for index, tangent in zip(nontrivial_output_indices,
nontrivial_output_tangents):
output_tangents[index] = tangent
return output_tangents
# TODO(allenl): experimental_relax_shapes for gradients which rely on static
# shape information are underspecialized. We may want hand-written forward
# implementations, or a more satisfying story about how we re-specialize
# gradients which were traced with relaxed shapes (e.g. use conds instead of
# trace-time Python logic).
_jvp_relaxed_shapes = def_function.function(
_jvp_helper, experimental_relax_shapes=True)
_jvp_exact_shapes = def_function.function(
_jvp_helper, experimental_relax_shapes=False)
# The maximum number of exact-shape traces to perform for a single op before
# switching to shape relaxation.
_TRACE_COUNT_LIMIT = 32
def _jvp_dispatch(op_name, attr_tuple, inputs, outputs, tangents):
"""Determine which forwardprop function to call."""
# Note that this _TRACE_COUNT read races with writes. That's fine, it just
# means we may trace a few more exact shapes before moving on to relaxation.
if _TRACE_COUNT.get(op_name, 0) < _TRACE_COUNT_LIMIT:
return _jvp_exact_shapes(
op_name, attr_tuple, inputs, outputs, tangents)
else:
return _jvp_relaxed_shapes(
op_name, attr_tuple, inputs, outputs, tangents)
pywrap_tfe.TFE_Py_RegisterJVPFunction(_jvp_dispatch)
@tf_export("autodiff.ForwardAccumulator", v1=[])
class ForwardAccumulator(object):
"""Computes Jacobian-vector products ("JVP"s) using forward-mode autodiff.
Compare to `tf.GradientTape` which computes vector-Jacobian products ("VJP"s)
using reverse-mode autodiff (backprop). Reverse mode is more attractive when
computing gradients of a scalar-valued function with respect to many inputs
(e.g. a neural network with many parameters and a scalar loss). Forward mode
works best on functions with many outputs and few inputs. Since it does not
hold on to intermediate activations, it is much more memory efficient than
backprop where it is applicable.
Consider a simple linear regression:
>>> x = tf.constant([[2.0, 3.0], [1.0, 4.0]])
>>> dense = tf.keras.layers.Dense(1)
>>> dense.build([2])
>>> with tf.autodiff.ForwardAccumulator(
... primals=dense.kernel,
... tangents=tf.constant([[1.], [0.]])) as acc:
... loss = tf.reduce_sum((dense(x) - tf.constant([1., -1.])) ** 2.)
>>> acc.jvp(loss)
<tf.Tensor: shape=(), dtype=float32, numpy=...>
The example has two variables containing parameters, `dense.kernel` (2
parameters) and `dense.bias` (1 parameter). Considering the training data `x`
as a constant, this means the Jacobian matrix for the function mapping from
parameters to loss has one row and three columns.
With forwardprop, we specify a length-three vector in advance which multiplies
the Jacobian. The `primals` constructor argument is the parameter (a
`tf.Tensor` or `tf.Variable`) we're specifying a vector for, and the
`tangents` argument is the "vector" in Jacobian-vector product. If our goal is
to compute the entire Jacobian matrix, forwardprop computes one column at a
time while backprop computes one row at a time. Since the Jacobian in the
linear regression example has only one row, backprop requires fewer
invocations:
>>> x = tf.constant([[2.0, 3.0], [1.0, 4.0]])
>>> dense = tf.keras.layers.Dense(1)
>>> dense.build([2])
>>> loss_fn = lambda: tf.reduce_sum((dense(x) - tf.constant([1., -1.])) ** 2.)
>>> kernel_fprop = []
>>> with tf.autodiff.ForwardAccumulator(
... dense.kernel, tf.constant([[1.], [0.]])) as acc:
... kernel_fprop.append(acc.jvp(loss_fn()))
>>> with tf.autodiff.ForwardAccumulator(
... dense.kernel, tf.constant([[0.], [1.]])) as acc:
... kernel_fprop.append(acc.jvp(loss_fn()))
>>> with tf.autodiff.ForwardAccumulator(dense.bias, tf.constant([1.])) as acc:
... bias_fprop = acc.jvp(loss_fn())
>>> with tf.GradientTape() as tape:
... loss = loss_fn()
>>> kernel_grad, bias_grad = tape.gradient(loss, (dense.kernel, dense.bias))
>>> np.testing.assert_allclose(
... kernel_grad, tf.stack(kernel_fprop)[:, tf.newaxis])
>>> np.testing.assert_allclose(bias_grad, bias_fprop[tf.newaxis])
Implicit in the `tape.gradient` call is a length-one vector which
left-multiplies the Jacobian, a vector-Jacobian product.
`ForwardAccumulator` maintains JVPs corresponding primal tensors it is
watching, derived from the original `primals` specified in the constructor. As
soon as a primal tensor is deleted, `ForwardAccumulator` deletes the
corresponding JVP.
`acc.jvp(x)` retrieves `acc`'s JVP corresponding to the primal tensor `x`. It
does not perform any computation. `acc.jvp` calls can be repeated as long as
`acc` is accessible, whether the context manager is active or not. New JVPs
are only computed while the context manager is active.
Note that `ForwardAccumulator`s are always applied in the order their context
managers were entered, so inner accumulators will not see JVP computation from
outer accumulators. Take higher-order JVPs from outer accumulators:
>>> primal = tf.constant(1.1)
>>> with tf.autodiff.ForwardAccumulator(primal, tf.constant(1.)) as outer:
... with tf.autodiff.ForwardAccumulator(primal, tf.constant(1.)) as inner:
... primal_out = primal ** tf.constant(3.5)
>>> inner_jvp = inner.jvp(primal_out)
>>> inner_jvp # 3.5 * 1.1 ** 2.5
<tf.Tensor: shape=(), dtype=float32, numpy=4.4417057>
>>> outer.jvp(inner_jvp) # 3.5 * 2.5 * 1.1 ** 1.5
<tf.Tensor: shape=(), dtype=float32, numpy=10.094786>
Reversing the collection in the last line to instead retrieve
`inner.jvp(outer.jvp(primal_out))` will not work.
Strict nesting also applies to combinations of `ForwardAccumulator` and
`tf.GradientTape`. More deeply nested `GradientTape` objects will ignore the
products of outer `ForwardAccumulator` objects. This allows (for example)
memory-efficient forward-over-backward computation of Hessian-vector products,
where the inner `GradientTape` would otherwise hold on to all intermediate
JVPs:
>>> v = tf.Variable([1., 2.])
>>> with tf.autodiff.ForwardAccumulator(
... v,
... # The "vector" in Hessian-vector product.
... tf.constant([1., 0.])) as acc:
... with tf.GradientTape() as tape:
... y = tf.reduce_sum(v ** 3.)
... backward = tape.gradient(y, v)
>>> backward # gradient from backprop
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([ 3., 12.], dtype=float32)>
>>> acc.jvp(backward) # forward-over-backward Hessian-vector product
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([6., 0.], dtype=float32)>
"""
def __init__(self, primals, tangents):
"""Specify tensors to watch and their Jacobian-vector products.
Mathematically, `tangents` is a vector right-multiplying the Jacobian matrix
(a Jacobian-vector product) for the function computed while this accumulator
is active. Since JVPs are computed in forward mode as the computation
happens, this vector must be supplied in advance.
Listing a single tensor multiple times in `primals` raises an
exception. Excluding a tensor from `primals` is equivalent to watching it
with a tangent tensor of zeros.
Args:
primals: A tensor or nested structure of tensors to watch.
tangents: A tensor or nested structure of tensors, with the same nesting
structure as `primals`, with each element being a vector with the same
size as the corresponding primal element.
Raises:
ValueError: If the same tensor or variable is specified multiple times in
`primals`.
"""
self._accumulator = pywrap_tfe.TFE_Py_ForwardAccumulatorNew()
self._recording = False
primal_ids = set()
for primal in nest.flatten(primals):
if id(primal) in primal_ids:
raise ValueError(
"Tensor {} was specified as a primal multiple times. This may "
"indicate an error. If it was intended, please sum the "
"corresponding tangents.")
primal_ids.add(id(primal))
self._watch(primals, tangents)
def __enter__(self):
self._push_accumulator()
return self
def __exit__(self, typ, value, traceback):
if self._recording:
self._pop_accumulator()
def _push_accumulator(self):
if self._recording:
raise ValueError("Accumulator is already recording.")
pywrap_tfe.TFE_Py_ForwardAccumulatorSetAdd(self._accumulator)
self._recording = True
def _pop_accumulator(self):
if not self._recording:
raise ValueError("Accumulator is not recording.")
pywrap_tfe.TFE_Py_ForwardAccumulatorSetRemove(self._accumulator)
self._recording = False
def _watch(self, primals, tangents):
"""Ensures that `primals` are being traced by this accumulator.
Mathematically, `tangents` is a vector right-multiplying the Jacobian matrix
(a Jacobian-vector product) for the function computed while this accumulator
is active. Since JVPs are computed in forward mode as the computation
happens, this vector must be supplied in advance.
Watching a single tensor multiple times sums each of its `tangents`. Any
un-watched tensor has zeros for its tangent vector.
Args:
primals: A Tensor or list of Tensors.
tangents: A Tensor or list of Tensors matching `primals`.
"""
nest.assert_same_structure(primals, tangents)
for t, g in zip(nest.flatten(primals), nest.flatten(tangents)):
if not t.dtype.is_floating:
logging.log_first_n(
logging.WARN, "The dtype of the watched primal must be "
"floating (e.g. tf.float32), got %r", 5, t.dtype)
g = ops.convert_to_tensor(g, dtype=t.dtype)
if hasattr(t, "handle"):
# Run convert_to_tensor to get the captured handle from whichever
# function we're running if necessary.
t = ops.convert_to_tensor(t.handle)
pywrap_tfe.TFE_Py_ForwardAccumulatorWatch(self._accumulator, t, g)
def jvp(self, primals, unconnected_gradients=UnconnectedGradients.NONE):
"""Fetches the Jacobian-vector product computed for `primals`.
Note that this method performs no computation, and simply looks up a JVP
that was already computed (unlike backprop using a `tf.GradientTape`, where
the computation happens on the call to `tape.gradient`).
Args:
primals: A watched Tensor or structure of Tensors to fetch the JVPs for.
unconnected_gradients: A value which can either hold 'none' or 'zero' and
alters the value which will be returned if no JVP was computed for
`primals`. The possible values and effects are detailed in
'tf.UnconnectedGradients' and it defaults to 'none'.
Returns:
Tensors with the same shapes and dtypes as `primals`, or None if no JVP
is available.
"""
unconnected_gradients = UnconnectedGradients(unconnected_gradients)
if self._accumulator is None:
raise ValueError("Called jvp() without first tracing anything.")
def _fetch_jvp(tensor):
if hasattr(tensor, "handle"):
tensor = ops.convert_to_tensor(tensor.handle)
result = pywrap_tfe.TFE_Py_ForwardAccumulatorJVP(self._accumulator,
tensor)
if result is None and unconnected_gradients == UnconnectedGradients.ZERO:
return array_ops.zeros_like(tensor)
return result
return nest.map_structure(_fetch_jvp, primals)
| |
import os
from math import radians, degrees, fabs
from copy import deepcopy
import rospy
import tf
from std_msgs.msg import Header
from geometry_msgs.msg import Vector3
from baxter_interface import Limb
class PoseGenerator(object):
def __init__(self, mode, arm_mode, step=5):
self.mode = mode
self.arm_mode = arm_mode
self._step = step
self._right_limb = Limb('right')
self._left_limb = Limb('left')
self._subscribe()
def _subscribe(self):
self._last_data_0 = (Vector3(), Vector3())
self._last_data_1 = (Vector3(), Vector3())
self._calib_data_0 = (Vector3(), Vector3())
self._calib_data_1 = (Vector3(), Vector3())
self._sub_ori_0 = rospy.Subscriber("/low_myo/imu_rpy",
Vector3,
self._orientation_callback_0)
self._sub_pos_0 = rospy.Subscriber("/low_myo/imu_pos",
Vector3,
self._position_callback_0)
self._sub_ori_1 = rospy.Subscriber("/top_myo/imu_rpy",
Vector3,
self._orientation_callback_1)
self._sub_pos_1 = rospy.Subscriber("/top_myo/imu_pos",
Vector3,
self._position_callback_1)
def _orientation_callback_0(self, data):
self._last_data_0[0].x = data.x
self._last_data_0[0].y = data.y
self._last_data_0[0].z = data.z
def _position_callback_0(self, data):
self._last_data_0[1].x = data.x
self._last_data_0[1].y = data.y
self._last_data_0[1].z = data.z
def _orientation_callback_1(self, data):
self._last_data_1[0].x = data.x
self._last_data_1[0].y = data.y
self._last_data_1[0].z = data.z
def _position_callback_1(self, data):
self._last_data_1[1].x = data.x
self._last_data_1[1].y = data.y
self._last_data_1[1].z = data.z
def calibrate(self):
"""
Calibrate position of the robot arm wrt the myo data
"""
raw_input("Press enter when user is at the right pose")
self._calib_data_0 = deepcopy(self._last_data_0)
self._calib_data_1 = deepcopy(self._last_data_1)
self._right_calib_pose = self._right_limb.joint_angles()
self._left_calib_pose = self._left_limb.joint_angles()
def _is_vector_valid(self, data):
"""
Check if data is likely to be valid.
"""
test = data.x + data.y + data.z
return test != 0
def _is_over_step(self, change):
return abs(change) > self._step
def generate_pose(self):
"""
Given new data and position of the arm, calculate new
joint positions.
"""
if self.mode == "one_arm":
return self.one_arm_generate_pose()
elif self.mode == "two_arms":
return self.two_arms_generate_pose()
else:
raise ValueError("Mode %s is invalid!" % self.mode)
def one_arm_generate_pose(self):
rospy.logdebug("Generating pose")
data_0 = deepcopy(self._last_data_0[0])
data_1 = deepcopy(self._last_data_1[0])
this_pose = deepcopy(self._right_calib_pose)
if self.arm_mode == "first":
if self._is_vector_valid(data_1):
change_1 = Vector3()
change_1.x = data_1.x - self._calib_data_1[0].x
change_1.y = data_1.y - self._calib_data_1[0].y
change_1.z = data_1.z - self._calib_data_1[0].z
if (change_1.x < -179):
change_1.x += 360
if (change_1.y < -179):
change_1.y += 360
if (change_1.z < -179):
change_1.z += 360
if (change_1.x > 179):
change_1.x -= 360
if (change_1.y > 179):
change_1.y -= 360
if (change_1.z > 179):
change_1.z -= 360
# print "MYO_1 (Upper arm)"
rospy.logdebug("Data_1.x %f" % data_1.x)
rospy.logdebug("CalibData_1.x %f" % self._calib_data_1[0].x)
rospy.logdebug("Data_1.y %f" % data_1.y)
rospy.logdebug("CalibData_1.y %f" % self._calib_data_1[0].y)
rospy.logdebug("Data_1.z %f" % data_1.z)
rospy.logdebug("CalibData_1.z %f" % self._calib_data_1[0].z)
# print "ROLL: ", change_1.z
if (self._is_over_step(change_1.z)):
this_pose["right_e0"] += radians(-1 * change_1.z)
# print "PITCH: ", change_1.y
if (self._is_over_step(change_1.y)):
# this_pose["right_e1"] += radians(-1 * change_1.y)
this_pose["right_s1"] += radians(1 * change_1.y)
# print "YAW: ", change_1.x
if (self._is_over_step(change_1.x)):
this_pose["right_s0"] += radians(-1 * change_1.x)
if self._is_vector_valid(data_0):
change_0 = Vector3()
change_0.x = data_0.x - self._calib_data_0[0].x - change_1.x
change_0.y = data_0.y - self._calib_data_0[0].y - change_1.y
change_0.z = data_0.z - self._calib_data_0[0].z - change_1.z
if (change_0.x < -179):
change_0.x += 360
if (change_0.y < -179):
change_0.y += 360
if (change_0.z < -179):
change_0.z += 360
if (change_0.x > 179):
change_0.x -= 360
if (change_0.y > 179):
change_0.y -= 360
if (change_0.z > 179):
change_0.z -= 360
# print "MYO_0 (Forearm)"
rospy.logdebug("Data_0.x %f" % data_0.x)
rospy.logdebug("CalibData_0.x %f" % self._calib_data_0[0].x)
rospy.logdebug("Data_0.y %f" % data_0.y)
rospy.logdebug("CalibData_0.y %f" % self._calib_data_0[0].y)
rospy.logdebug("Data_0.z %f" % data_0.z)
rospy.logdebug("CalibData_0.z %f" % self._calib_data_0[0].z)
# print "ROLL: ", change_0.z
if (self._is_over_step(change_0.z)):
this_pose["right_w0"] += radians(-1 * change_0.z)
# this_pose["right_w0"] += radians(1 * change_0.z)
# print "PITCH: ", change_0.y
if (self._is_over_step(change_0.y)):
# this_pose["right_w1"] += radians(-1 * change_0.y)
# this_pose["right_w1"] += radians(-1 * change_0.y)
this_pose["right_w1"] += radians(1 * change_0.y)
# print "YAW: ", change_0.x
if (self._is_over_step(change_0.x)):
# this_pose["right_w2"] += radians(-1 * change_0.x)
# this_pose["right_w2"] += radians(1 * change_0.x)
this_pose["right_e1"] += radians(-1 * change_0.x)
elif self.arm_mode == "second":
print "SECOND!"
# Alex you need to change below
if self._is_vector_valid(data_1):
change_1 = Vector3()
change_1.x = data_1.x - self._calib_data_1[0].x
change_1.y = data_1.y - self._calib_data_1[0].y
change_1.z = data_1.z - self._calib_data_1[0].z
print "MYO_1 (Upper arm)"
print "ROLL: ", change_1.x
if (self._is_over_step(change_1.x)):
this_pose["right_e0"] += radians(change_1.x)
print "PITCH: ", data_1.y
if (self._is_over_step(change_1.y)):
this_pose["right_s1"] += radians(-1 * change_1.y)
print "YAW: ", change_1.z
if (self._is_over_step(change_1.z)):
this_pose["right_s0"] += radians(change_1.z)
if self._is_vector_valid(data_0):
change_0 = Vector3()
change_0.x = data_0.x - self._calib_data_0[0].x
change_0.y = data_0.y - self._calib_data_0[0].y
change_0.z = data_0.z - self._calib_data_0[0].z
print "MYO_0 (Forearm)"
print "PITCH: ", data_0.y
print "ROLL: ", data_0.x
if (self._is_over_step(change_0.x)):
this_pose["right_w2"] += radians(-1 * change_0.x)
if (self._is_over_step(change_0.y)):
change_0.y += change_1.y
this_pose["right_e1"] += radians(-1 * change_0.y)
print "YAW: ", data_0.z
if (self._is_over_step(change_0.z)):
this_pose["right_w1"] += radians(change_0.z)
return this_pose
def two_arms_generate_pose(self):
rospy.logdebug("Generating pose")
data_0 = deepcopy(self._last_data_0[0])
data_1 = deepcopy(self._last_data_1[0])
right_pose = deepcopy(self._right_calib_pose)
left_pose = deepcopy(self._left_calib_pose)
if self.arm_mode == "first":
if self._is_vector_valid(data_0):
change_0 = Vector3()
change_0.x = data_0.x - self._calib_data_0[0].x
change_0.y = data_0.y - self._calib_data_0[0].y
change_0.z = data_0.z - self._calib_data_0[0].z
print "MYO_0 (Right forearm)"
print "PITCH: ", change_0.y
if (self._is_over_step(change_0.y)):
right_pose["right_w1"] += radians(-1 * change_0.y)
print "YAW: ", change_0.z
if (self._is_over_step(change_0.z)):
right_pose["right_w0"] += radians(-1 * change_0.z)
print "ROLL: ", change_0.x
if (self._is_over_step(change_0.x)):
right_pose["right_w2"] += radians(-1 * change_0.x)
if self._is_vector_valid(data_1):
change_1 = Vector3()
change_1.x = data_1.x - self._calib_data_1[0].x
change_1.y = data_1.y - self._calib_data_1[0].y
change_1.z = data_1.z - self._calib_data_1[0].z
print "MYO_1 (Left forearm)"
print "PITCH: ", change_1.y
if (self._is_over_step(change_1.y)):
left_pose["left_w1"] += radians(-1 * change_1.y)
print "YAW: ", change_1.z
if (self._is_over_step(change_1.z)):
left_pose["left_w0"] += radians(-1 * change_1.z)
print "ROLL: ", change_1.x
if (self._is_over_step(change_1.x)):
left_pose["left_w2"] += radians(-1 * change_1.x)
elif self.arm_mode == "second":
if self._is_vector_valid(data_0):
change_0 = Vector3()
change_0.x = data_0.x - self._calib_data_0[0].x
change_0.y = data_0.y - self._calib_data_0[0].y
change_0.z = data_0.z - self._calib_data_0[0].z
print "MYO_0 (Right forearm)"
print "PITCH: ", change_0.y
if (self._is_over_step(change_0.y)):
right_pose["right_e1"] += radians(change_0.y)
print "YAW: ", change_0.z
if (self._is_over_step(change_0.z)):
right_pose["right_w1"] += radians(change_0.z)
print "ROLL: ", change_0.x
if (self._is_over_step(change_0.x)):
right_pose["right_w2"] += radians(-1 * change_0.x)
if self._is_vector_valid(data_1):
change_1 = Vector3()
change_1.x = data_1.x - self._calib_data_1[0].x
change_1.y = data_1.y - self._calib_data_1[0].y
change_1.z = data_1.z - self._calib_data_1[0].z
print "MYO_1 (Left forearm)"
print "PITCH: ", change_1.y
if (self._is_over_step(change_1.y)):
left_pose["left_e1"] += radians(-1 * change_1.y)
print "YAW: ", change_1.z
if (self._is_over_step(change_1.z)):
left_pose["left_w1"] += radians(-1 * change_1.z)
print "ROLL: ", change_1.x
if (self._is_over_step(change_1.x)):
left_pose["left_w2"] += radians(change_1.x)
return (right_pose, left_pose)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example code for TensorFlow Wide & Deep Tutorial using TF.Learn API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import shutil
import sys
import tempfile
import pandas as pd
from six.moves import urllib
import tensorflow as tf
CSV_COLUMNS = [
"age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week", "native_country",
"income_bracket"
]
gender = tf.feature_column.categorical_column_with_vocabulary_list(
"gender", ["Female", "Male"])
education = tf.feature_column.categorical_column_with_vocabulary_list(
"education", [
"Bachelors", "HS-grad", "11th", "Masters", "9th",
"Some-college", "Assoc-acdm", "Assoc-voc", "7th-8th",
"Doctorate", "Prof-school", "5th-6th", "10th", "1st-4th",
"Preschool", "12th"
])
marital_status = tf.feature_column.categorical_column_with_vocabulary_list(
"marital_status", [
"Married-civ-spouse", "Divorced", "Married-spouse-absent",
"Never-married", "Separated", "Married-AF-spouse", "Widowed"
])
relationship = tf.feature_column.categorical_column_with_vocabulary_list(
"relationship", [
"Husband", "Not-in-family", "Wife", "Own-child", "Unmarried",
"Other-relative"
])
workclass = tf.feature_column.categorical_column_with_vocabulary_list(
"workclass", [
"Self-emp-not-inc", "Private", "State-gov", "Federal-gov",
"Local-gov", "?", "Self-emp-inc", "Without-pay", "Never-worked"
])
# To show an example of hashing:
occupation = tf.feature_column.categorical_column_with_hash_bucket(
"occupation", hash_bucket_size=1000)
native_country = tf.feature_column.categorical_column_with_hash_bucket(
"native_country", hash_bucket_size=1000)
# Continuous base columns.
age = tf.feature_column.numeric_column("age")
education_num = tf.feature_column.numeric_column("education_num")
capital_gain = tf.feature_column.numeric_column("capital_gain")
capital_loss = tf.feature_column.numeric_column("capital_loss")
hours_per_week = tf.feature_column.numeric_column("hours_per_week")
# Transformations.
age_buckets = tf.feature_column.bucketized_column(
age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
# Wide columns and deep columns.
base_columns = [
gender, education, marital_status, relationship, workclass, occupation,
native_country, age_buckets,
]
crossed_columns = [
tf.feature_column.crossed_column(
["education", "occupation"], hash_bucket_size=1000),
tf.feature_column.crossed_column(
[age_buckets, "education", "occupation"], hash_bucket_size=1000),
tf.feature_column.crossed_column(
["native_country", "occupation"], hash_bucket_size=1000)
]
deep_columns = [
tf.feature_column.indicator_column(workclass),
tf.feature_column.indicator_column(education),
tf.feature_column.indicator_column(gender),
tf.feature_column.indicator_column(relationship),
# To show an example of embedding
tf.feature_column.embedding_column(native_country, dimension=8),
tf.feature_column.embedding_column(occupation, dimension=8),
age,
education_num,
capital_gain,
capital_loss,
hours_per_week,
]
def maybe_download(train_data, test_data):
"""Maybe downloads training data and returns train and test file names."""
if train_data:
train_file_name = train_data
else:
train_file = tempfile.NamedTemporaryFile(delete=False)
urllib.request.urlretrieve(
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data",
train_file.name) # pylint: disable=line-too-long
train_file_name = train_file.name
train_file.close()
print("Training data is downloaded to %s" % train_file_name)
if test_data:
test_file_name = test_data
else:
test_file = tempfile.NamedTemporaryFile(delete=False)
urllib.request.urlretrieve(
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test",
test_file.name) # pylint: disable=line-too-long
test_file_name = test_file.name
test_file.close()
print("Test data is downloaded to %s"% test_file_name)
return train_file_name, test_file_name
def build_estimator(model_dir, model_type):
"""Build an estimator."""
if model_type == "wide":
m = tf.estimator.LinearClassifier(
model_dir=model_dir, feature_columns=base_columns + crossed_columns)
elif model_type == "deep":
m = tf.estimator.DNNClassifier(
model_dir=model_dir,
feature_columns=deep_columns,
hidden_units=[100, 50])
else:
m = tf.estimator.DNNLinearCombinedClassifier(
model_dir=model_dir,
linear_feature_columns=crossed_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[100, 50])
return m
def input_fn(data_file, num_epochs, shuffle):
"""Input builder function."""
df_data = pd.read_csv(
tf.gfile.Open(data_file),
names=CSV_COLUMNS,
skipinitialspace=True,
engine="python",
skiprows=1)
# remove NaN elements
df_data = df_data.dropna(how="any", axis=0)
labels = df_data["income_bracket"].apply(lambda x: ">50K" in x).astype(int)
return tf.estimator.inputs.pandas_input_fn(
x=df_data,
y=labels,
batch_size=100,
num_epochs=num_epochs,
shuffle=shuffle,
num_threads=5)
def train_and_eval(model_dir, model_type, train_steps, train_data, test_data):
"""Train and evaluate the model."""
train_file_name, test_file_name = maybe_download(train_data, test_data)
# Specify file path below if want to find the output easily
model_dir = tempfile.mkdtemp() if not model_dir else model_dir
m = build_estimator(model_dir, model_type)
# set num_epochs to None to get infinite stream of data.
m.train(
input_fn=input_fn(train_file_name, num_epochs=None, shuffle=True),
steps=train_steps)
# set steps to None to run evaluation until all data consumed.
results = m.evaluate(
input_fn=input_fn(test_file_name, num_epochs=1, shuffle=False),
steps=None)
print("model directory = %s" % model_dir)
for key in sorted(results):
print("%s: %s" % (key, results[key]))
# Manual cleanup
shutil.rmtree(model_dir)
FLAGS = None
def main(_):
train_and_eval(FLAGS.model_dir, FLAGS.model_type, FLAGS.train_steps,
FLAGS.train_data, FLAGS.test_data)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--model_dir",
type=str,
default="",
help="Base directory for output models."
)
parser.add_argument(
"--model_type",
type=str,
default="wide_n_deep",
help="Valid model types: {'wide', 'deep', 'wide_n_deep'}."
)
parser.add_argument(
"--train_steps",
type=int,
default=2000,
help="Number of training steps."
)
parser.add_argument(
"--train_data",
type=str,
default="",
help="Path to the training data."
)
parser.add_argument(
"--test_data",
type=str,
default="",
help="Path to the test data."
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| |
from __future__ import absolute_import
import os
import json
import pytest
import mock
from jsonschema import validate, SchemaError, ValidationError
from app.utils import drop_foreign_fields
from app.validation import (
validates_against_schema,
is_valid_service_id,
is_valid_date,
is_valid_acknowledged_state,
get_validation_errors,
is_valid_email_address,
is_valid_string,
min_price_less_than_max_price,
translate_json_schema_errors,
buyer_email_address_has_approved_domain,
is_approved_buyer_domain,
)
from tests.helpers import load_example_listing
def drop_api_exported_fields_so_that_api_import_will_validate(data):
return drop_foreign_fields(
data, ['id', 'lot', 'supplierId', 'supplierName', 'links', 'status',
'frameworkSlug', 'frameworkName', 'lotName', 'createdAt', 'updatedAt'])
def test_supplier_validates():
data = load_example_listing("supplier_creation")
errs = get_validation_errors("new-supplier", data)
assert len(errs) == 0
def test_supplier_validates_with_no_companies_house_number():
data = load_example_listing("supplier_creation")
data.pop("companiesHouseNumber", None)
errs = get_validation_errors("new-supplier", data)
assert len(errs) == 0
def test_supplier_fails_with_bad_companies_house_number():
data = load_example_listing("supplier_creation")
data["companiesHouseNumber"] = "short"
errs = get_validation_errors("new-supplier", data)
assert len(errs) == 1
@pytest.mark.parametrize(
'duns', ['12345678', '1234567890']
)
def test_new_supplier_fails_with_bad_duns(duns):
data = load_example_listing("new-supplier")
data["dunsNumber"] = duns
errs = get_validation_errors("new-supplier", data)
assert len(errs) == 1
def test_for_valid_date():
cases = [
("2010-01-01", True),
("2010-02-29", False),
("invalid", False)
]
for example, expected in cases:
assert is_valid_date(example) == expected
def test_for_valid_acknowledged_state():
cases = [
("all", True),
("true", True),
("false", True),
("2010-02-29", False),
("invalid", False)
]
for example, expected in cases:
assert is_valid_acknowledged_state(example) == expected
def test_for_valid_service_id():
cases = [
("valid-service-id", True),
("5-g5-0379-325", True),
("1234567890123456", True),
("VALID-service-id", True),
("invalid.service.id", False),
("invalid*service-id", False),
("", False),
("0123456789", True),
("012345678", False),
("01234567890123456789", True),
("012345678901234567890", False)
]
for example, expected in cases:
assert is_valid_service_id(example) == expected
def test_for_valid_string():
cases = [
("valid-string", 1, 200, True),
("tooshort", 100, 200, False),
("toolong", 1, 1, False),
("1234567890123456", 1, 200, True),
("THIS-IS-VALID-id", 1, 200, True),
("invalid%chars&here", 1, 200, False),
("no spaces", 1, 200, False),
("no\nnewlines", 1, 200, False),
("123-and-strings", 1, 200, True),
]
for example, min, max, expected in cases:
assert is_valid_string(example, min, max) == expected
def test_all_schemas_are_valid():
for file_name in os.listdir('json_schemas'):
file_path = 'json_schemas/%s' % file_name
if os.path.isfile(file_path) and file_path.endswith(".json"):
check_schema_file(file_path)
def test_updater_json_validates_correctly():
"""
This schema currently allows extra fields as part of a 2 stage
migration of API validatiopn rules. This test will change back to
not allowing the invalid fields when the utils is updated.
:return:
"""
invalid_updater_no_fields = {}
invalid_updater_extra_fields = {'updated_by': 'this', 'invalid': 'this'}
invalid_updater_only_invalid_fields = {'invalid': 'this'}
valid_updater = {'updated_by': 'this'}
assert validates_against_schema('services-update', invalid_updater_no_fields) is False
assert validates_against_schema('services-update', invalid_updater_extra_fields) is True
assert validates_against_schema('services-update', invalid_updater_only_invalid_fields) is False
assert validates_against_schema('services-update', valid_updater) is True
def test_user_creation_validates():
longer_than_255 = "a" * 256
exactly_255 = "a" * 255
case = [
({'emailAddress': 'this@that.com',
'role': 'buyer',
'name': exactly_255,
'password': exactly_255}, True, "valid"),
({'emailAddress': 'thisthat.com',
'role': 'buyer',
'name': exactly_255,
'password': exactly_255}, False, "invalid email thisthat.com"),
({'emailAddress': 'this@that',
'role': 'buyer',
'name': exactly_255,
'password': exactly_255}, False, "invalid email this@that"),
({'emailAddress': 'this@t@hat.com',
'role': 'buyer',
'name': exactly_255,
'password': exactly_255}, False, "invalid email this@t@hat.com"),
({'emailAddress': '',
'role': 'buyer',
'name': exactly_255,
'password': exactly_255}, False, "Missing email"),
({'emailAddress': 'this@that.com',
'role': 'buyer',
'password': exactly_255}, False, "missing name"),
({'emailAddress': 'this@that.com',
'role': 'buyer',
'name': exactly_255}, False, "missing password"),
({'emailAddress': 'this@that.com',
'role': 'buyer',
'name': exactly_255,
'password': longer_than_255}, False, "too long password"),
({'emailAddress': 'this@that.com',
'role': 'buyer',
'name': exactly_255,
'password': longer_than_255}, False, "too short password"),
({'emailAddress': 'this@that.com',
'role': 'buyer',
'name': exactly_255,
'password': ''}, False, "too short password"),
({'emailAddress': 'this@that.com',
'role': 'buyer',
'name': '',
'password': exactly_255}, False, "too short name"),
({'emailAddress': 'this@that.com',
'role': 'buyer',
'name': exactly_255,
'password': exactly_255,
'hashpw': True}, True, "valid with hashpw"),
({'emailAddress': 'this@that.com',
'role': 'buyer',
'name': exactly_255,
'password': exactly_255,
'hashpw': False}, True, "valid with dont hashpw"),
({'emailAddress': 'this@that.com',
'role': 'buyer',
'name': exactly_255,
'password': exactly_255,
'hashpw': 'dewdew'}, False, "invalid hashpw"),
({'emailAddress': 'this@that.com',
'role': 'invalid',
'name': exactly_255,
'password': exactly_255}, False, "invalid role"),
({'emailAddress': 'this@that.com',
'role': 'buyer',
'name': exactly_255,
'supplierId': 123,
'password': exactly_255}, True, "valid supplier id"),
({'emailAddress': 'this@that.com',
'role': 'buyer',
'name': exactly_255,
'supplierId': '',
'password': exactly_255}, False, "invalid supplier id (to short)"),
({'emailAddress': 'this@that.com',
'role': 'buyer',
'name': exactly_255,
'supplierId': longer_than_255,
'password': exactly_255}, False, "invalid supplier id (to long)")
]
for example, expected, message in case:
result = validates_against_schema('users', example)
assert result == expected
def test_auth_user_validates():
longer_than_255 = "a" * 256
exactly_255 = "a" * 255
case = [
({'emailAddress': 'this@that.com',
'password': exactly_255}, True, "valid"),
({'emailAddress': 'thisthat.com',
'password': exactly_255}, False, "invalid email thisthat.com"),
({'emailAddress': 'this@that',
'password': exactly_255}, False, "invalid email this@that"),
({'emailAddress': 'this@t@hat.com',
'password': exactly_255}, False, "invalid email this@t@hat.com"),
({'emailAddress': '',
'password': exactly_255}, False, "Missing email"),
({'emailAddress': 'this@that.com',
'name': exactly_255}, False, "missing password"),
({'emailAddress': 'this@that.com',
'password': longer_than_255}, False, "too long password"),
({'emailAddress': 'this@that.com',
'password': longer_than_255}, False, "too short password"),
({'emailAddress': 'this@that.com',
'password': ''}, False, "too short password")
]
for example, expected, message in case:
result = validates_against_schema('users-auth', example)
assert result == expected
def test_valid_g4_service_has_no_validation_errors():
data = load_example_listing("G4")
data = drop_api_exported_fields_so_that_api_import_will_validate(data)
errs = get_validation_errors("services-g-cloud-4", data)
assert not errs
def test_valid_g5_service_has_no_validation_errors():
data = load_example_listing("G5")
data = drop_api_exported_fields_so_that_api_import_will_validate(data)
errs = get_validation_errors("services-g-cloud-5", data)
assert not errs
def test_valid_g6_service_has_no_validation_errors():
data = load_example_listing("G6-PaaS")
data = drop_api_exported_fields_so_that_api_import_will_validate(data)
errs = get_validation_errors("services-g-cloud-6-paas", data)
assert not errs
def test_valid_g7_service_has_no_validation_errors():
data = load_example_listing("G7-SCS")
data = drop_api_exported_fields_so_that_api_import_will_validate(data)
errs = get_validation_errors("services-g-cloud-7-scs", data)
assert not errs
def test_g7_missing_required_field_has_validation_error():
data = load_example_listing("G7-SCS")
data.pop("serviceSummary", None)
errs = get_validation_errors("services-g-cloud-7-scs", data)
assert "answer_required" in errs['serviceSummary']
def test_enforce_required_false_allows_missing_fields():
data = load_example_listing("G7-SCS")
data = drop_api_exported_fields_so_that_api_import_will_validate(data)
data.pop("serviceSummary", None)
data.pop("serviceDefinitionDocumentURL", None)
errs = get_validation_errors("services-g-cloud-7-scs", data,
enforce_required=False)
assert not errs
def test_required_fields_param_requires_specified_fields():
data = load_example_listing("G7-SCS")
data.pop("serviceSummary", None)
data.pop("serviceDefinitionDocumentURL", None)
errs = get_validation_errors("services-g-cloud-7-scs", data,
enforce_required=False,
required_fields=['serviceSummary'])
assert "answer_required" in errs['serviceSummary']
def test_additional_properties_has_validation_error():
data = load_example_listing("G7-SCS")
data = drop_api_exported_fields_so_that_api_import_will_validate(data)
data.update({'newKey': 1})
errs = get_validation_errors("services-g-cloud-7-scs", data)
assert "Additional properties are not allowed ('newKey' was unexpected)" \
in "{}".format(errs['_form'])
def test_invalid_enum_values_has_validation_error():
data = load_example_listing("G7-SCS")
data.update({'minimumContractPeriod': 'Fortnight'})
errs = get_validation_errors("services-g-cloud-7-scs", data)
assert "'Fortnight' is not one of" in errs['minimumContractPeriod']
def test_invalid_url_field_has_validation_error():
data = load_example_listing("G7-SCS")
data.update({'serviceDefinitionDocumentURL': 'not_a_url'})
errs = get_validation_errors("services-g-cloud-7-scs", data)
assert errs['serviceDefinitionDocumentURL'] == 'invalid_format'
def test_too_many_words_causes_validation_error():
data = load_example_listing("G7-SCS")
data.update({'serviceBenefits': ['more than ten words 5 6 7 8 9 10 11']})
errs = get_validation_errors("services-g-cloud-7-scs", data)
assert "under_10_words" in errs['serviceBenefits']
def test_too_many_list_items_causes_validation_error():
data = load_example_listing("G7-SCS")
data.update({'serviceBenefits': [
'1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11'
]})
errs = get_validation_errors("services-g-cloud-7-scs", data)
assert "max_items_limit" in errs['serviceBenefits']
def test_string_too_long_causes_validation_error():
data = load_example_listing("G7-SCS")
data.update({'serviceName': "a" * 101})
errs = get_validation_errors("services-g-cloud-7-scs", data)
assert "under_character_limit" in errs['serviceName']
def test_percentage_out_of_range_causes_validation_error():
data = load_example_listing("G6-PaaS")
data.update({'serviceAvailabilityPercentage':
{"value": 101, "assurance": "Service provider assertion"}})
errs = get_validation_errors("services-g-cloud-7-paas", data)
assert "not_a_number" in errs['serviceAvailabilityPercentage']
def test_assurance_only_causes_validation_error():
data = load_example_listing("G6-PaaS")
data.update({'serviceAvailabilityPercentage':
{"assurance": "Service provider assertion"}})
errs = get_validation_errors("services-g-cloud-7-paas", data)
assert "answer_required" in errs['serviceAvailabilityPercentage']
def test_non_number_value_causes_validation_error():
data = load_example_listing("G6-PaaS")
data.update({'serviceAvailabilityPercentage': {"value": "a99.9", "assurance": "Service provider assertion"}})
errs = get_validation_errors("services-g-cloud-7-paas", data)
assert "not_a_number" in errs['serviceAvailabilityPercentage']
def test_value_only_causes_validation_error():
data = load_example_listing("G6-PaaS")
data.update({'serviceAvailabilityPercentage': {"value": 99.9}})
errs = get_validation_errors("services-g-cloud-7-paas", data)
assert "assurance_required" in errs['serviceAvailabilityPercentage']
def test_price_not_money_format_validation_error():
cases = [
"foo", # not numeric
"12.", # too few decimal places
"12.000001", # too many decimal places
".1", # too few digits
]
data = load_example_listing("G7-SCS")
def check_min_price_error(field, case):
data[field] = case
errs = get_validation_errors("services-g-cloud-7-scs", data)
assert field in errs
assert "not_money_format" in errs[field]
check_min_price_error('priceMin', "")
for case in cases:
check_min_price_error('priceMin', case)
check_min_price_error('priceMax', case)
def test_price_not_money_format_valid_cases():
cases = [
'12',
'12.1',
'12.11',
'12.111',
'12.1111',
'12.11111',
]
data = load_example_listing("G7-SCS")
def check_min_price_valid(field, case):
data[field] = case
errs = get_validation_errors("services-g-cloud-7-scs", data)
assert "not_money_format" not in errs.get(field, "")
check_min_price_valid('priceMax', "")
for case in cases:
check_min_price_valid('priceMin', case)
check_min_price_valid('priceMax', case)
def test_min_price_larger_than_max_price_causes_validation_error():
cases = ['32.20', '9.00']
for price_max in cases:
data = load_example_listing("G7-SCS")
data.update({"priceMax": price_max})
errs = get_validation_errors("services-g-cloud-7-scs", data)
assert 'max_less_than_min' == errs['priceMax']
def test_max_price_larger_than_min_price():
cases = ['132.20', '']
for price_max in cases:
data = load_example_listing("G7-SCS")
data.update({"priceMax": price_max})
errs = get_validation_errors("services-g-cloud-7-scs", data)
assert 'priceMax' not in errs
def test_max_price_larger_than_min_price_with_multiple_price_fields():
data = {
'agileCoachPriceMin': '200',
'agileCoachPriceMax': '250',
'developerPriceMin': '200',
'developerPriceMax': '25',
'windowCleanerPriceMin': '12.50',
'windowCleanerPriceMax': '300',
}
errors = min_price_less_than_max_price({}, data)
assert errors == {'developerPriceMax': 'max_less_than_min'}
def test_max_price_larger_than_min_price_with_multiple_price_errors():
data = {
'agileCoachPriceMin': '200',
'agileCoachPriceMax': '250',
'developerPriceMin': '200',
'developerPriceMax': '25',
'designerPriceMin': '300',
'designerPriceMax': '299.99',
'windowCleanerPriceMin': '12.50',
'windowCleanerPriceMax': '300',
}
errors = min_price_less_than_max_price({}, data)
assert errors == {'developerPriceMax': 'max_less_than_min', 'designerPriceMax': 'max_less_than_min'}
def test_max_price_larger_than_min_does_not_overwrite_previous_errors():
data = {
'agileCoachPriceMin': '200',
'agileCoachPriceMax': '250',
'developerPriceMin': '200',
'developerPriceMax': '25',
'designerPriceMin': '300',
'designerPriceMax': '299.99',
'windowCleanerPriceMin': '12.50',
'windowCleanerPriceMax': '300',
}
previous_errors = {'designerPriceMax': 'non_curly_quotes'}
errors = min_price_less_than_max_price(previous_errors, data)
assert errors == {'developerPriceMax': 'max_less_than_min'}
@pytest.mark.parametrize("schema_name", (
"brief-responses-digital-outcomes-and-specialists-digital-specialists",
"brief-responses-digital-outcomes-and-specialists-2-digital-specialists",
))
def test_brief_response_essential_requirements(schema_name):
assert get_validation_errors(
schema_name,
{
"availability": "valid start date",
"dayRate": "100",
"essentialRequirementsMet": True,
"essentialRequirements": [
{"evidence": "valid evidence"},
{"evidence": "word " * 100},
{"evidence": "some more valid evidence"},
{},
],
"niceToHaveRequirements": [
{"yesNo": False},
],
"respondToEmailAddress": "valid@email.com",
},
) == {
'essentialRequirements': [
{
'error': 'under_100_words',
'field': 'evidence',
'index': 1,
},
{
'error': 'answer_required',
'field': 'evidence',
'index': 3,
},
],
}
@pytest.mark.parametrize("schema_name", (
"brief-responses-digital-outcomes-and-specialists-digital-specialists",
"brief-responses-digital-outcomes-and-specialists-2-digital-specialists",
))
class TestBriefResponseNiceToHaveRequirements:
def setup_method(self, method):
self.data = {
"availability": "valid start date",
"dayRate": "100",
"essentialRequirementsMet": True,
"essentialRequirements": [
{"evidence": "valid evidence"},
],
"respondToEmailAddress": "valid@email.com",
}
def test_nice_to_have_optional(self, schema_name):
assert not get_validation_errors(schema_name, self.data)
def test_error_messages(self, schema_name):
self.data["niceToHaveRequirements"] = [
{},
{"yesNo": True, "evidence": "valid evidence"},
{"yesNo": True, "evidence": "word " * 100},
{"yesNo": True},
{"yesNo": False},
{"yesNo": False, "evidence": "shouldnt be here"},
]
assert get_validation_errors(schema_name, self.data)["niceToHaveRequirements"] == [
{
'error': 'answer_required',
'field': 'yesNo',
'index': 0
},
{
'error': 'under_100_words',
'field': 'evidence',
'index': 2
},
{
'error': 'answer_required',
'field': 'evidence',
'index': 3
},
{
'error': (
# python 3.6+ guarantees consistent dict ordering
"{'yesNo': False, 'evidence': 'shouldnt be here'} is not valid under any of the given schemas"
),
'index': 5,
},
]
def test_pure_booleans(self, schema_name):
self.data["niceToHaveRequirements"] = [True, True, True]
assert get_validation_errors(schema_name, self.data)["niceToHaveRequirements"] == "True is not of type 'object'"
def test_dict_bool_mix(self, schema_name):
self.data["niceToHaveRequirements"] = [
{"yesNo": False, "evidence": "shouldnt be here"},
True,
{'yesNo': False},
{"yesNo": False, "evidence": "shouldnt be here"},
]
assert get_validation_errors(schema_name, self.data)["niceToHaveRequirements"] == "True is not of type 'object'"
@pytest.mark.parametrize("schema_name", ("services-g-cloud-9-cloud-software", "services-g-cloud-9-cloud-hosting"))
@pytest.mark.parametrize("required_fields,input_data,expected_errors", (
# Valid responses for boolean question with followup:
(["freeVersionTrial"], {"freeVersionTrialOption": False}, {}),
(
["freeVersionTrialOption"],
{
"freeVersionTrialOption": True,
"freeVersionDescription": "description",
"freeVersionLink": "https://gov.uk",
},
{},
),
# Missing followup answers:
(
["freeVersionTrialOption"],
{"freeVersionTrialOption": True},
{"freeVersionDescription": "answer_required"},
),
(
["freeVersionTrialOption"],
{"freeVersionTrialOption": True, "freeVersionLink": "https://gov.uk"},
{"freeVersionDescription": "answer_required"},
),
# Missing followup question is not required if it's optional:
(
["freeVersionTrialOption"],
{"freeVersionTrialOption": True, "freeVersionDescription": "description"},
{},
),
# Followup answers when none should be present. These return the original
# schema error since they shouldn't happen when request is coming from the
# frontend app so we don't need to display an error message.
(
["freeVersionTrialOption"],
{
"freeVersionTrialOption": False,
"freeVersionLink": "https://gov.uk",
},
{'_form': ['{} is not valid under any of the given schemas'.format({
"freeVersionTrialOption": False,
"freeVersionLink": "https://gov.uk",
})]},
),
(
["freeVersionTrialOption"],
{
"freeVersionTrialOption": False,
"freeVersionDescription": "description",
},
{'_form': [u'{} is not valid under any of the given schemas'.format({
"freeVersionTrialOption": False,
"freeVersionDescription": "description",
})]},
),
# Followup answers when the original question answer is missing
(
["freeVersionTrialOption"],
{"freeVersionDescription": "description", "freeVersionLink": "https://gov.uk"},
{"freeVersionTrialOption": "answer_required"},
),
# Valid responses for checkbox question with followups
(
["securityGovernanceStandards"],
{"securityGovernanceAccreditation": True, "securityGovernanceStandards": ["csa_ccm"]},
{},
),
(
["securityGovernanceStandards"],
{"securityGovernanceAccreditation": False, "securityGovernanceApproach": "some other approach"},
{},
),
(
["securityGovernanceStandards"],
{
"securityGovernanceAccreditation": True,
"securityGovernanceStandards": ["csa_ccm", "other"],
"securityGovernanceStandardsOther": "some other standards",
},
{},
),
# Missing followup answers for checkbox question
(
["securityGovernanceStandards"],
{"securityGovernanceAccreditation": True, "securityGovernanceStandards": ["csa_ccm", "other"]},
{"securityGovernanceStandardsOther": "answer_required"},
),
# Followup answers when none should be present
(
["securityGovernanceStandards"],
{
"securityGovernanceAccreditation": True,
"securityGovernanceStandards": ["csa_ccm"],
"securityGovernanceStandardsOther": "some other standards",
},
{"_form": [u'{} is not valid under any of the given schemas'.format({
"securityGovernanceAccreditation": True,
"securityGovernanceStandards": ["csa_ccm"],
"securityGovernanceStandardsOther": "some other standards",
})]},
),
# Followup answers when the original question answer is missing
(
["securityGovernanceAccreditation"],
{"securityGovernanceStandards": ["csa_ccm"]},
{"securityGovernanceAccreditation": "answer_required"},
),
(
["securityGovernanceStandards"],
{"securityGovernanceStandardsOther": "some other standards"},
{"securityGovernanceStandards": "answer_required"},
),
))
def test_g9_followup_questions(schema_name, required_fields, input_data, expected_errors):
assert get_validation_errors(
schema_name,
input_data,
enforce_required=False,
required_fields=required_fields,
) == expected_errors
@pytest.mark.parametrize("required_fields,input_data,expected_errors", (
(
["openStandardsPrinciples", "dataProtocols"],
{
"openStandardsPrinciples": True,
"dataProtocols": True,
"designerPriceMax": "900",
},
{},
),
# by including designerPriceMax in the required_fields we're stating that its dependencies must also be satisfied
(
["openStandardsPrinciples", "dataProtocols", "designerPriceMax"],
{
"openStandardsPrinciples": True,
"dataProtocols": True,
"designerPriceMax": "900",
},
{
'designerAccessibleApplications': 'answer_required',
'designerLocations': 'answer_required',
'designerPriceMin': 'answer_required',
},
),
))
def test_dos4_dependent_questions(required_fields, input_data, expected_errors):
# specifically dos4 services because they have "dependencies"
assert get_validation_errors(
"services-digital-outcomes-and-specialists-4-digital-specialists",
input_data,
enforce_required=False,
required_fields=required_fields,
) == expected_errors
def test_api_type_is_optional():
data = load_example_listing("G6-PaaS")
del data["apiType"]
errs = get_validation_errors("services-g-cloud-7-paas", data)
assert not errs.get('apiType', None)
def check_schema_file(file_path):
with open(file_path) as json_schema_file:
assert check_schema(json.load(json_schema_file))
def check_schema(schema):
try:
validate({}, schema)
except SchemaError as ex:
print('Invalid JSON schema: %s' % ex.message)
return False
except ValidationError:
return True
else:
return True
def api_error(errors):
schema_errors = []
for error_data in errors:
context_errors = []
for context_error in error_data.get('context', []):
context_errors.append(
ValidationError(message=context_error['message'], validator=context_error['validator'])
)
error_data['context'] = context_errors
schema_errors.append(ValidationError(**error_data))
return translate_json_schema_errors(schema_errors, {})
def test_translate_oneof_errors():
assert api_error([{
'validator': 'oneOf',
'message': "failed",
'path': ['example', 0],
'context': [
{'message': "'example-field' required", 'validator': 'required'}
],
}]) == {'example': [{'error': 'answer_required', 'field': 'example-field', 'index': 0}]}
def test_translate_unknown_oneoff_eerror():
assert api_error([{
'validator': 'oneOf',
'message': "failed",
'path': ['example', 0],
'context': [
{'message': "Unknown type", 'validator': 'type'}
],
}]) == {'example': [{'error': 'failed', 'index': 0}]}
@pytest.mark.parametrize(
'email, expected_result', [
('hurray@cool.gov', True), ('hurray@very.cool.gov', True), ('hurray@notcool.gov', False)
]
)
def test_buyer_email_address_has_approved_domain(email, expected_result):
existing_domains = [
mock.Mock(domain_name='cool.gov')
]
assert buyer_email_address_has_approved_domain(existing_domains, email) == expected_result
@pytest.mark.parametrize(
'domain, expected_result', [
('cool.gov', True), ('very.cool.gov', True), ('notcool.gov', False)
]
)
def test_is_approved_buyer_domain(domain, expected_result):
existing_domains = [
mock.Mock(domain_name='cool.gov')
]
assert is_approved_buyer_domain(existing_domains, domain) == expected_result
@pytest.mark.parametrize(
("email_address", "is_valid"),
(
("me@example.com", True),
("very.common@example.com", True),
("disposable.style.email.with+symbol@example.com", True),
("", False),
("Abc.example.com", False),
("email-address-with-NUL\x00@example.com", False),
(r'a"b(c)d,e:f;g<h>i[j\k]l@example.com', False),
))
def test_is_valid_email_address(email_address, is_valid):
assert is_valid_email_address(email_address) is is_valid
| |
"""DHCPv4 options part1"""
# pylint: disable=invalid-name,line-too-long
import pytest
import srv_control
import misc
import srv_msg
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
def test_v4_options_subnet_mask():
# Checks that server is able to serve subnet-mask option to clients.
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('subnet-mask', '255.255.255.0')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(1)
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
# References: v4.options, v4.prl, RFC2131
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
def test_v4_options_time_offset():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('time-offset', '50')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(2)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(2)
srv_msg.response_check_option_content(2, 'value', 50)
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
def test_v4_options_routers():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('routers', '100.100.100.10,50.50.50.5')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(3)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'value', '100.100.100.10')
srv_msg.response_check_option_content(3, 'value', '50.50.50.5')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
def test_v4_options_time_servers():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('time-servers', '199.199.199.1,199.199.199.2')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(4)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(4)
srv_msg.response_check_option_content(4, 'value', '199.199.199.1')
srv_msg.response_check_option_content(4, 'value', '199.199.199.2')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
def test_v4_options_name_servers():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('name-servers', '199.199.199.1,100.100.100.1')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(5)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(5)
srv_msg.response_check_option_content(5, 'value', '199.199.199.1')
srv_msg.response_check_option_content(5, 'value', '100.100.100.1')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
def test_v4_options_domain_name_servers():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('domain-name-servers', '199.199.199.1,100.100.100.1')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(6)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(6)
srv_msg.response_check_option_content(6, 'value', '199.199.199.1')
srv_msg.response_check_option_content(6, 'value', '100.100.100.1')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
def test_v4_options_domain_name_servers_csv_correct():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.add_line({"option-data": [{"code": 6, "data": "C0000201", "csv-format": False}]})
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(6)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(6)
srv_msg.response_check_option_content(6, 'value', '192.0.2.1')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
def test_v4_options_domain_name_servers_csv_incorrect_hex():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.add_line({"option-data": [{"code": 6, "data": "C000020Z1", "csv-format": False}]})
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configure')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
def test_v4_options_domain_name_servers_csv_incorrect_address():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.add_line({"option-data": [{"code": 6, "data": "199.0.2.1", "csv-format": False}]})
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'configure')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
def test_v4_options_log_servers():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('log-servers', '199.199.199.1,100.100.100.1')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(7)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(7)
srv_msg.response_check_option_content(7, 'value', '199.199.199.1')
srv_msg.response_check_option_content(7, 'value', '100.100.100.1')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
def test_v4_options_cookie_servers():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('cookie-servers', '199.199.199.1,100.100.100.1')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(8)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(8)
srv_msg.response_check_option_content(8, 'value', '199.199.199.1')
srv_msg.response_check_option_content(8, 'value', '100.100.100.1')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
def test_v4_options_lpr_servers():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('lpr-servers', '199.199.199.1,150.150.150.1')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(9)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(9)
srv_msg.response_check_option_content(9, 'value', '199.199.199.1')
srv_msg.response_check_option_content(9, 'value', '150.150.150.1')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
def test_v4_options_impress_servers():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('impress-servers', '199.199.199.1,150.150.150.1')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(10)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(10)
srv_msg.response_check_option_content(10, 'value', '199.199.199.1')
srv_msg.response_check_option_content(10, 'value', '150.150.150.1')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
def test_v4_options_resource_location_servers():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('resource-location-servers', '199.199.199.1,150.150.150.1')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(11)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(11)
srv_msg.response_check_option_content(11, 'value', '199.199.199.1')
srv_msg.response_check_option_content(11, 'value', '150.150.150.1')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
def test_v4_options_host_name():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('host-name', 'isc.example.com')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(12)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(12)
srv_msg.response_check_option_content(12, 'value', 'isc.example.com')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
def test_v4_options_boot_size():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('boot-size', '55')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(13)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(13)
srv_msg.response_check_option_content(13, 'value', 55)
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
def test_v4_options_merit_dump():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('merit-dump', 'some-string')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(14)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(14)
srv_msg.response_check_option_content(14, 'value', 'some-string')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
def test_v4_options_swap_server():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('swap-server', '199.199.199.1')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(16)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(16)
srv_msg.response_check_option_content(16, 'value', '199.199.199.1')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
def test_v4_options_root_path():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('root-path', '/some/location/example/')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(17)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(17)
srv_msg.response_check_option_content(17, 'value', '/some/location/example/')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
def test_v4_options_extensions_path():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('extensions-path', '/some/location/example/')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(18)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(18)
srv_msg.response_check_option_content(18, 'value', '/some/location/example/')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
def test_v4_options_policy_filter():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('policy-filter', '199.199.199.1,50.50.50.1')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(21)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(21)
srv_msg.response_check_option_content(21, 'value', '199.199.199.1')
srv_msg.response_check_option_content(21, 'value', '50.50.50.1')
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
def test_v4_options_max_dgram_reassembly():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('max-dgram-reassembly', '600')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(22)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(22)
srv_msg.response_check_option_content(22, 'value', 600)
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
def test_v4_options_default_ip_ttl():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('default-ip-ttl', '86')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(23)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(23)
srv_msg.response_check_option_content(23, 'value', 86)
@pytest.mark.v4
@pytest.mark.options
@pytest.mark.subnet
def test_v4_options_path_mtu_aging_timeout():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.10')
srv_control.config_srv_opt('path-mtu-aging-timeout', '85')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(24)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(24)
srv_msg.response_check_option_content(24, 'value', 85)
| |
import tensorflow as tf
import numpy as np
import pickle
import time
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__))+'/../')
from dnc_v3 import DNC
from recurrent_controller import StatelessRecurrentController
def exact_acc(target_batch, predict_batch, stop_S=0, pprint=1.0):
acc=[]
for b in range(target_batch.shape[0]):
trim_target = []
trim_predict = []
for ti, t in enumerate(target_batch[b]):
if t != stop_S:
trim_target.append(t)
for ti, t in enumerate(predict_batch[b]):
if t != stop_S:
trim_predict.append(t)
if np.random.rand()>pprint:
print('{} vs {}'.format(trim_target, trim_predict))
ac=0
for n1,n2 in zip(trim_predict, trim_target):
if n1==n2:
ac+=1
acc.append(float(ac/max(len(trim_target), len(trim_predict))))#have to be correct all
return np.mean(acc)
def llprint(message):
sys.stdout.write(message)
sys.stdout.flush()
def load(path):
return pickle.load(open(path, 'rb'))
def onehot(index, size):
# print('-----')
# print(index)
vec = np.zeros(size, dtype=np.float32)
vec[int(index)] = 1.0
return vec
def copy_sample(vocab_lower, vocab_upper, length_from, length_to):
def random_length():
if length_from == length_to:
return length_from
return np.random.randint(length_from, length_to + 1)
seed = np.random.choice(list(range(int(vocab_lower),int(vocab_upper))),
int(random_length()), replace=True)
inp = seed.tolist()
inp = inp + [0]
out = seed.tolist()
out = out + [0]
return inp, out
def sort_sample(vocab_lower, vocab_upper, length_from, length_to):
def random_length():
if length_from == length_to:
return length_from
return np.random.randint(length_from, length_to + 1)
seed = np.random.choice(list(range(int(vocab_lower),int(vocab_upper))),
int(random_length()), replace=True)
inp = seed.tolist()
inp = inp + [0]
# out1 = seed[:len(seed)//2].tolist()
# out2 = seed[len(seed)//2:].tolist()
out = sorted(seed) + [0]
# out = sorted(out1) + sorted(out2, reverse=True) + [0]
return inp, out
def sumfr_sample(vocab_lower, vocab_upper, length_from, length_to):
def random_length():
if length_from == length_to:
return length_from
return np.random.randint(length_from, length_to + 1)
seed = np.random.choice(list(range(int(vocab_lower),int(vocab_upper))),
int(random_length()), replace=True)
inp = seed.tolist()
out=[]
for i in range(len(inp)//2):
out.append((inp[i]+inp[-1-i])//2)
inp = inp + [0]
# out1 = seed[:len(seed)//2].tolist()
# out2 = seed[len(seed)//2:].tolist()
out = out + [0]
# out = sorted(out1) + sorted(out2, reverse=True) + [0]
return inp, out
def prepare_batch(bs, vocab_size, length_from, length_to, args):
length=np.random.randint(length_from, length_to + 1)
inps=np.zeros(shape= [bs,length+1, vocab_size])
lout=length
if "sumfr" in args.task:
lout=length//2
oups=np.zeros(shape=[bs,lout+1, vocab_size])
oups2=np.zeros(shape=[bs,lout+1, vocab_size])
hold_mem = np.ones(length+1, dtype=bool)
# print(hold_mem)
holdstep=(length+1)//(args.mem_size+1)
holdstep=min(holdstep, args.max_remmember)
if args.mode=='train':
if args.memo_type=="":
holdstep=np.random.poisson(lam=holdstep, size=1)[0]
if holdstep>0:
for iii in range(holdstep, int(length+1), holdstep):
hold_mem[iii] = False
else:
hold_mem[(length+1)//2] = False
# print(hold_mem)
lin=[]
lou=[]
for b in range(bs):
if "copy" in args.task:
i,o=copy_sample(1,vocab_size,length, length)
elif "sort" in args.task:
i,o=sort_sample(1,vocab_size,length, length)
elif "sumfr" in args.task:
i,o=sumfr_sample(1,vocab_size,length, length)
#print(i)
#print(o)
lin.append(i)
lou.append(o)
c=0
for c1 in i :
inps[b, c, :]=onehot(c1, vocab_size)
c+=1
c = 0
for c2 in o:
oups[b, c, :] = onehot(c2, vocab_size)
c += 1
#print(inps)
#print(oups)
return inps, oups, oups2, length+1, lout+1, lin, lou, hold_mem
def copy_long_task(args):
dirname = os.path.dirname(os.path.abspath(__file__)) + '/data/save/'
print(dirname)
ckpts_dir = os.path.join(dirname, 'checkpoints_{}_task'.format(args.task))
llprint("Loading Data ... ")
batch_size = args.batch_size
input_size = args.number_range
output_size = args.number_range
print('dim out {}'.format(output_size))
words_count = args.mem_size
word_size = args.word_size
read_heads = args.read_heads
learning_rate = args.learning_rate
momentum = 0.9
iterations = args.iterations
start_step = 0
config = tf.ConfigProto(device_count={'CPU': args.cpu_num})
config.intra_op_parallelism_threads = args.cpu_num
config.gpu_options.allow_growth = True
# config.gpu_options.per_process_gpu_memory_fraction = args.gpu_ratio
graph = tf.Graph()
with graph.as_default():
tf.contrib.framework.get_or_create_global_step()
with tf.Session(graph=graph, config=config) as session:
llprint("Building Computational Graph ... ")
ncomputer = DNC(
StatelessRecurrentController,
input_size,
output_size,
output_size,
words_count,
word_size,
read_heads,
batch_size,
use_mem=args.use_mem,
controller_cell_type=args.cell_type,
use_emb_encoder=False,
use_emb_decoder=False,
dual_controller=True,
decoder_mode=False,
write_protect=True,
incremental_read=args.iread,
memory_read_heads_decode=args.read_heads_decode,
hold_mem_mode=args.hold_mem_mode,
hidden_controller_dim=args.hidden_dim,
attend_dim=args.attend,
cache_attend_dim=args.cache_attend_dim,
nlayer=args.nlayer,
batch_norm=args.batch_norm,
pass_encoder_state=True,
name='dnc'+str(args.cache_attend_dim)+str(args.hold_mem_mode)+args.memo_type+str(args.max_remmember)
)
# optimizer = tf.train.RMSPropOptimizer(learning_rate, momentum=momentum)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
_, prob, loss, apply_gradients = ncomputer.build_loss_function(optimizer, clip_s=[-5, 5])
llprint("Done!\n")
llprint("Done!\n")
llprint("Initializing Variables ... ")
session.run(tf.global_variables_initializer())
llprint("Done!\n")
variables_names = [v.name for v in tf.trainable_variables()]
values = session.run(variables_names)
print("SHOW VARIABLES")
for k, v in zip(variables_names, values):
print("Variable: {} shape {} ".format(k, v.shape))
# print (v)
print("*************")
if args.from_checkpoint is not '':
if args.from_checkpoint == 'default':
from_checkpoint = ncomputer.print_config()
else:
from_checkpoint = args.from_checkpoint
llprint("Restoring Checkpoint %s ... " % from_checkpoint)
ncomputer.restore(session, ckpts_dir, from_checkpoint)
llprint("Done!\n")
last_100_losses = []
print('no param {}'.format(ncomputer.get_size_model()))
start = 1 if start_step == 0 else start_step + 1
end = start_step + iterations + 1
if args.mode == 'test':
start = 0
end = start
start_time_100 = time.time()
avg_100_time = 0.
avg_counter = 0
if args.mode == 'train':
log_dir = './data/summary/log_{}/'.format(args.task)
if not os.path.isdir(log_dir):
os.mkdir(log_dir)
log_dir = '{}/{}/'.format(log_dir, ncomputer.print_config())
if not os.path.isdir(log_dir):
os.mkdir(log_dir)
train_writer = tf.summary.FileWriter(log_dir, session.graph)
min_tloss = 0
for i in range(start, end + 1):
try:
llprint("\rIteration %d/%d" % (i, end))
input_data, target_output, itarget, seq_len, decoder_length, _, _, hold = \
prepare_batch(batch_size,args.number_range, args.length_from, args.length_to,args)
fd={
ncomputer.input_encoder: input_data,
ncomputer.input_decoder: itarget,
ncomputer.target_output: target_output,
ncomputer.sequence_length: seq_len,
ncomputer.decode_length: decoder_length,
}
if args.hold_mem_mode>0:
fd[ncomputer.hold_mem]=hold
summerize = (i % args.valid_time == 0)
if args.mode == 'train':
loss_value, _ = session.run([
loss,
apply_gradients
], feed_dict=fd)
last_100_losses.append(loss_value)
if summerize:
llprint("\n\t episode %d -->Avg. Cross-Entropy: %.7f\n" % (i, np.mean(last_100_losses)))
trscores_acc = []
summary = tf.Summary()
summary.value.add(tag='batch_train_loss', simple_value=np.mean(last_100_losses))
ncomputer.clear_current_mem(session)
for ii in range(5):
input_data, target_output, itarget, seq_len, decoder_length, brin, brout, hold = \
prepare_batch(batch_size, args.number_range, args.length_from, args.length_to, args)
fd = {
ncomputer.input_encoder: input_data,
ncomputer.input_decoder: itarget,
ncomputer.target_output: target_output,
ncomputer.sequence_length: seq_len,
ncomputer.decode_length: decoder_length,
}
if args.hold_mem_mode > 0:
fd[ncomputer.hold_mem] = hold
out = session.run([prob], feed_dict=fd)
out = np.reshape(np.asarray(out), [-1, decoder_length, output_size])
out = np.argmax(out, axis=-1)
bout_list = []
for b in range(out.shape[0]):
out_list = []
for io in range(out.shape[1]):
if out[b][io] == 0:
break
out_list.append(out[b][io])
bout_list.append(out_list)
trscores_acc.append(exact_acc(np.asarray(brout), np.asarray(bout_list), pprint=0.95))
tpre=np.mean(trscores_acc)
print('acc {}'.format(tpre))
if args.mode == 'train':
summary.value.add(tag='train_acc', simple_value=tpre)
train_writer.add_summary(summary, i)
train_writer.flush()
end_time_100 = time.time()
elapsed_time = (end_time_100 - start_time_100) / 60
avg_counter += 1
avg_100_time += (1. / avg_counter) * (elapsed_time - avg_100_time)
estimated_time = (avg_100_time * ((end - i) / 100.)) / 60.
print("\tAvg. 100 iterations time: %.2f minutes" % (avg_100_time))
print("\tApprox. time to completion: %.2f hours" % (estimated_time))
start_time_100 = time.time()
last_100_losses = []
if args.mode == 'train' and tpre > min_tloss:
min_tloss = tpre
llprint("\nSaving Checkpoint ... "),
ncomputer.save(session, ckpts_dir, ncomputer.print_config())
llprint("Done!\n")
except KeyboardInterrupt:
sys.exit(0)
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--mode', default="train")
parser.add_argument('--label_other', default=False, type=str2bool)
parser.add_argument('--use_mem', default=True, type=str2bool)
parser.add_argument('--cell_type', default="lstm")
parser.add_argument('--task', default="copy")
parser.add_argument('--data_dir', default="./data/cornell20_20000_10/trim_20qa_single.pkl")
parser.add_argument('--from_checkpoint', default="")
parser.add_argument('--hidden_dim', default=128, type=int)
parser.add_argument('--attend', default=0, type=int)
parser.add_argument('--cache_attend_dim', default=0, type=int)
parser.add_argument('--mem_size', default=5, type=int)
parser.add_argument('--word_size', default=64, type=int)
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--read_heads', default=1, type=int)
parser.add_argument('--read_heads_decode', default=1, type=int)
parser.add_argument('--iread', default=False, type=str2bool)
parser.add_argument('--batch_norm', default=False, type=str2bool)
parser.add_argument('--number_range', default=100, type=int)
parser.add_argument('--length_from', default=100, type=int)
parser.add_argument('--length_to', default=100, type=int)
parser.add_argument('--memo_type', default="", type=str)
parser.add_argument('--hold_mem_mode', default=0, type=int)
parser.add_argument('--max_remmember', default=2, type=int)
parser.add_argument('--nlayer', default=1, type=int)
parser.add_argument('--learning_rate', default=0.001, type=float)
parser.add_argument('--lr_decay_step', default=20000, type=float)
parser.add_argument('--lr_decay_rate', default=0.9, type=float)
parser.add_argument('--iterations', default=10000, type=int)
parser.add_argument('--valid_time', default=100, type=int)
parser.add_argument('--gpu_ratio', default=0.4, type=float)
parser.add_argument('--cpu_num', default=10, type=int)
parser.add_argument('--gpu_device', default="1,2,3", type=str)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_device
args.task="sort100"
args.cell_type="lstm"
args.hold_mem_mode = 2
args.cache_attend_dim=32
args.read_heads_decode = 2
# args.hidden_dim = 48
args.memo_type = "uniform"
# args.use_mem = False
args.hidden_dim = 120
# args.attend = 64
# args.mode='test'
# args.from_checkpoint = 'default'
#args.mem_size=64
#args.hidden_dim=256
#args.max_len=10000
#args.top_word=300000
#args.prob_thresehold=0.5
#args.batch_size=1
#args.min_count=0
#args.valid_time=10000
print(args)
copy_long_task(args)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2010 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A driver for XenServer or Xen Cloud Platform.
**Related Flags**
:xenapi_connection_url: URL for connection to XenServer/Xen Cloud Platform.
:xenapi_connection_username: Username for connection to XenServer/Xen Cloud
Platform (default: root).
:xenapi_connection_password: Password for connection to XenServer/Xen Cloud
Platform.
:target_host: the iSCSI Target Host IP address, i.e. the IP
address for the nova-volume host
:target_port: iSCSI Target Port, 3260 Default
:iqn_prefix: IQN Prefix, e.g. 'iqn.2010-10.org.openstack'
**Variable Naming Scheme**
- suffix "_ref" for opaque references
- suffix "_uuid" for UUIDs
- suffix "_rec" for record objects
"""
import contextlib
import cPickle as pickle
import urlparse
import xmlrpclib
from eventlet import queue
from eventlet import timeout
from oslo.config import cfg
from nova import context
from nova import exception
from nova.openstack.common import log as logging
from nova.virt import driver
from nova.virt.xenapi import host
from nova.virt.xenapi import pool
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import vmops
from nova.virt.xenapi import volumeops
LOG = logging.getLogger(__name__)
xenapi_opts = [
cfg.StrOpt('xenapi_connection_url',
default=None,
help='URL for connection to XenServer/Xen Cloud Platform. '
'Required if compute_driver=xenapi.XenAPIDriver'),
cfg.StrOpt('xenapi_connection_username',
default='root',
help='Username for connection to XenServer/Xen Cloud Platform. '
'Used only if compute_driver=xenapi.XenAPIDriver'),
cfg.StrOpt('xenapi_connection_password',
default=None,
help='Password for connection to XenServer/Xen Cloud Platform. '
'Used only if compute_driver=xenapi.XenAPIDriver',
secret=True),
cfg.IntOpt('xenapi_connection_concurrent',
default=5,
help='Maximum number of concurrent XenAPI connections. '
'Used only if compute_driver=xenapi.XenAPIDriver'),
cfg.FloatOpt('xenapi_vhd_coalesce_poll_interval',
default=5.0,
help='The interval used for polling of coalescing vhds. '
'Used only if compute_driver=xenapi.XenAPIDriver'),
cfg.BoolOpt('xenapi_check_host',
default=True,
help='Ensure compute service is running on host XenAPI '
'connects to.'),
cfg.IntOpt('xenapi_vhd_coalesce_max_attempts',
default=5,
help='Max number of times to poll for VHD to coalesce. '
'Used only if compute_driver=xenapi.XenAPIDriver'),
cfg.StrOpt('xenapi_sr_base_path',
default='/var/run/sr-mount',
help='Base path to the storage repository'),
cfg.StrOpt('target_host',
default=None,
help='iSCSI Target Host'),
cfg.StrOpt('target_port',
default='3260',
help='iSCSI Target Port, 3260 Default'),
cfg.StrOpt('iqn_prefix',
default='iqn.2010-10.org.openstack',
help='IQN Prefix'),
# NOTE(sirp): This is a work-around for a bug in Ubuntu Maverick,
# when we pull support for it, we should remove this
cfg.BoolOpt('xenapi_remap_vbd_dev',
default=False,
help='Used to enable the remapping of VBD dev '
'(Works around an issue in Ubuntu Maverick)'),
cfg.StrOpt('xenapi_remap_vbd_dev_prefix',
default='sd',
help='Specify prefix to remap VBD dev to '
'(ex. /dev/xvdb -> /dev/sdb)'),
cfg.IntOpt('xenapi_login_timeout',
default=10,
help='Timeout in seconds for XenAPI login.'),
]
CONF = cfg.CONF
CONF.register_opts(xenapi_opts)
CONF.import_opt('host', 'nova.netconf')
class XenAPIDriver(driver.ComputeDriver):
"""A connection to XenServer or Xen Cloud Platform."""
def __init__(self, virtapi, read_only=False):
super(XenAPIDriver, self).__init__(virtapi)
url = CONF.xenapi_connection_url
username = CONF.xenapi_connection_username
password = CONF.xenapi_connection_password
if not url or password is None:
raise Exception(_('Must specify xenapi_connection_url, '
'xenapi_connection_username (optionally), and '
'xenapi_connection_password to use '
'compute_driver=xenapi.XenAPIDriver'))
self._session = XenAPISession(url, username, password, self.virtapi)
self._volumeops = volumeops.VolumeOps(self._session)
self._host_state = None
self._host = host.Host(self._session, self.virtapi)
self._vmops = vmops.VMOps(self._session, self.virtapi)
self._initiator = None
self._hypervisor_hostname = None
self._pool = pool.ResourcePool(self._session, self.virtapi)
@property
def host_state(self):
if not self._host_state:
self._host_state = host.HostState(self._session)
return self._host_state
def init_host(self, host):
if CONF.xenapi_check_host:
vm_utils.ensure_correct_host(self._session)
try:
vm_utils.cleanup_attached_vdis(self._session)
except Exception:
LOG.exception(_('Failure while cleaning up attached VDIs'))
def list_instances(self):
"""List VM instances."""
return self._vmops.list_instances()
def list_instance_uuids(self):
"""Get the list of nova instance uuids for VMs found on the
hypervisor.
"""
return self._vmops.list_instance_uuids()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""Create VM instance."""
self._vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
# TODO(Vek): Need to pass context in for access to auth_token
self._vmops.confirm_migration(migration, instance, network_info)
def finish_revert_migration(self, instance, network_info,
block_device_info=None):
"""Finish reverting a resize, powering back on the instance."""
# NOTE(vish): Xen currently does not use network info.
self._vmops.finish_revert_migration(instance, block_device_info)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
block_device_info=None):
"""Completes a resize, turning on the migrated instance."""
self._vmops.finish_migration(context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info)
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance."""
self._vmops.snapshot(context, instance, image_id, update_task_state)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot VM instance."""
self._vmops.reboot(instance, reboot_type,
bad_volumes_callback=bad_volumes_callback)
def set_admin_password(self, instance, new_pass):
"""Set the root/admin password on the VM instance."""
self._vmops.set_admin_password(instance, new_pass)
def inject_file(self, instance, b64_path, b64_contents):
"""Create a file on the VM instance. The file path and contents
should be base64-encoded.
"""
self._vmops.inject_file(instance, b64_path, b64_contents)
def change_instance_metadata(self, context, instance, diff):
"""Apply a diff to the instance metadata."""
self._vmops.change_instance_metadata(instance, diff)
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
"""Destroy VM instance."""
self._vmops.destroy(instance, network_info, block_device_info,
destroy_disks)
def pause(self, instance):
"""Pause VM instance."""
self._vmops.pause(instance)
def unpause(self, instance):
"""Unpause paused VM instance."""
self._vmops.unpause(instance)
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type, network_info,
block_device_info=None):
"""Transfers the VHD of a running instance to another host, then shuts
off the instance copies over the COW disk"""
# NOTE(vish): Xen currently does not use network info.
rv = self._vmops.migrate_disk_and_power_off(context, instance,
dest, instance_type)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
name_label = self._vmops._get_orig_vm_name_label(instance)
for vol in block_device_mapping:
connection_info = vol['connection_info']
mount_device = vol['mount_device'].rpartition("/")[2]
self._volumeops.detach_volume(connection_info,
name_label, mount_device)
return rv
def suspend(self, instance):
"""suspend the specified instance."""
self._vmops.suspend(instance)
def resume(self, instance, network_info, block_device_info=None):
"""resume the specified instance."""
self._vmops.resume(instance)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Rescue the specified instance."""
self._vmops.rescue(context, instance, network_info, image_meta,
rescue_password)
def unrescue(self, instance, network_info):
"""Unrescue the specified instance."""
self._vmops.unrescue(instance)
def power_off(self, instance):
"""Power off the specified instance."""
self._vmops.power_off(instance)
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
self._vmops.power_on(instance)
def soft_delete(self, instance):
"""Soft delete the specified instance."""
self._vmops.soft_delete(instance)
def restore(self, instance):
"""Restore the specified instance."""
self._vmops.restore(instance)
def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances."""
self._vmops.poll_rebooting_instances(timeout, instances)
def reset_network(self, instance):
"""reset networking for specified instance."""
self._vmops.reset_network(instance)
def inject_network_info(self, instance, network_info):
"""inject network info for specified instance."""
self._vmops.inject_network_info(instance, network_info)
def plug_vifs(self, instance_ref, network_info):
"""Plug VIFs into networks."""
self._vmops.plug_vifs(instance_ref, network_info)
def unplug_vifs(self, instance_ref, network_info):
"""Unplug VIFs from networks."""
self._vmops.unplug_vifs(instance_ref, network_info)
def get_info(self, instance):
"""Return data about VM instance."""
return self._vmops.get_info(instance)
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
return self._vmops.get_diagnostics(instance)
def get_all_bw_counters(self, instances):
"""Return bandwidth usage counters for each interface on each
running VM"""
# we only care about VMs that correspond to a nova-managed
# instance:
imap = dict([(inst['name'], inst['uuid']) for inst in instances])
bwcounters = []
# get a dictionary of instance names. values are dictionaries
# of mac addresses with values that are the bw counters:
# e.g. {'instance-001' : { 12:34:56:78:90:12 : {'bw_in': 0, ....}}
all_counters = self._vmops.get_all_bw_counters()
for instance_name, counters in all_counters.iteritems():
if instance_name in imap:
# yes these are stats for a nova-managed vm
# correlate the stats with the nova instance uuid:
for vif_counter in counters.values():
vif_counter['uuid'] = imap[instance_name]
bwcounters.append(vif_counter)
return bwcounters
def get_console_output(self, instance):
"""Return snapshot of console."""
return self._vmops.get_console_output(instance)
def get_vnc_console(self, instance):
"""Return link to instance's VNC console."""
return self._vmops.get_vnc_console(instance)
def get_volume_connector(self, instance):
"""Return volume connector information."""
if not self._initiator or not self._hypervisor_hostname:
stats = self.get_host_stats(refresh=True)
try:
self._initiator = stats['host_other-config']['iscsi_iqn']
self._hypervisor_hostname = stats['host_hostname']
except (TypeError, KeyError) as err:
LOG.warn(_('Could not determine key: %s') % err,
instance=instance)
self._initiator = None
return {
'ip': self.get_host_ip_addr(),
'initiator': self._initiator,
'host': self._hypervisor_hostname
}
@staticmethod
def get_host_ip_addr():
xs_url = urlparse.urlparse(CONF.xenapi_connection_url)
return xs_url.netloc
def attach_volume(self, connection_info, instance, mountpoint):
"""Attach volume storage to VM instance."""
return self._volumeops.attach_volume(connection_info,
instance['name'],
mountpoint)
def detach_volume(self, connection_info, instance, mountpoint):
"""Detach volume storage to VM instance."""
return self._volumeops.detach_volume(connection_info,
instance['name'],
mountpoint)
def get_console_pool_info(self, console_type):
xs_url = urlparse.urlparse(CONF.xenapi_connection_url)
return {'address': xs_url.netloc,
'username': CONF.xenapi_connection_username,
'password': CONF.xenapi_connection_password}
def get_available_resource(self, nodename):
"""Retrieve resource info.
This method is called when nova-compute launches, and
as part of a periodic task.
:param nodename: ignored in this driver
:returns: dictionary describing resources
"""
host_stats = self.get_host_stats(refresh=True)
# Updating host information
total_ram_mb = host_stats['host_memory_total'] / (1024 * 1024)
# NOTE(belliott) memory-free-computed is a value provided by XenServer
# for gauging free memory more conservatively than memory-free.
free_ram_mb = host_stats['host_memory_free_computed'] / (1024 * 1024)
total_disk_gb = host_stats['disk_total'] / (1024 * 1024 * 1024)
used_disk_gb = host_stats['disk_used'] / (1024 * 1024 * 1024)
dic = {'vcpus': 0,
'memory_mb': total_ram_mb,
'local_gb': total_disk_gb,
'vcpus_used': 0,
'memory_mb_used': total_ram_mb - free_ram_mb,
'local_gb_used': used_disk_gb,
'hypervisor_type': 'xen',
'hypervisor_version': 0,
'hypervisor_hostname': host_stats['host_hostname'],
'cpu_info': host_stats['host_cpu_info']['cpu_count']}
return dic
def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
# NOTE(salvatore-orlando): it enforces security groups on
# host initialization and live migration.
# In XenAPI we do not assume instances running upon host initialization
return
def check_can_live_migrate_destination(self, ctxt, instance_ref,
src_compute_info, dst_compute_info,
block_migration=False, disk_over_commit=False):
"""Check if it is possible to execute live migration.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
"""
return self._vmops.check_can_live_migrate_destination(ctxt,
instance_ref,
block_migration,
disk_over_commit)
def check_can_live_migrate_destination_cleanup(self, ctxt,
dest_check_data):
"""Do required cleanup on dest host after check_can_live_migrate calls
:param ctxt: security context
:param disk_over_commit: if true, allow disk over commit
"""
pass
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
includes the block_migration flag
"""
return self._vmops.check_can_live_migrate_source(ctxt, instance_ref,
dest_check_data)
def get_instance_disk_info(self, instance_name):
"""Used by libvirt for live migration. We rely on xenapi
checks to do this for us."""
pass
def pre_block_migration(self, ctxt, instance_ref, disk_info_json):
"""Used by libvirt for live migration. We rely on xenapi
checks to do this for us. May be used in the future to
populate the vdi/vif maps"""
pass
def live_migration(self, ctxt, instance_ref, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Performs the live migration of the specified instance.
:params ctxt: security context
:params instance_ref:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params dest: destination host
:params post_method:
post operation method.
expected nova.compute.manager.post_live_migration.
:params recover_method:
recovery method when any exception occurs.
expected nova.compute.manager.recover_live_migration.
:params block_migration: if true, migrate VM disk.
:params migrate_data: implementation specific params
"""
self._vmops.live_migrate(ctxt, instance_ref, dest, post_method,
recover_method, block_migration, migrate_data)
def pre_live_migration(self, context, instance_ref, block_device_info,
network_info, migrate_data=None):
"""Preparation live migration.
:params block_device_info:
It must be the result of _get_instance_volume_bdms()
at compute manager.
"""
# TODO(JohnGarbutt) look again when boot-from-volume hits trunk
pass
def post_live_migration_at_destination(self, ctxt, instance_ref,
network_info, block_migration,
block_device_info=None):
"""Post operation of live migration at destination host.
:params ctxt: security context
:params instance_ref:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params network_info: instance network information
:params : block_migration: if true, post operation of block_migraiton.
"""
# TODO(JohnGarbutt) look at moving/downloading ramdisk and kernel
pass
def unfilter_instance(self, instance_ref, network_info):
"""Removes security groups configured for an instance."""
return self._vmops.unfilter_instance(instance_ref, network_info)
def refresh_security_group_rules(self, security_group_id):
"""Updates security group rules for all instances associated with a
given security group.
Invoked when security group rules are updated."""
return self._vmops.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
"""Updates security group rules for all instances associated with a
given security group.
Invoked when instances are added/removed to a security group."""
return self._vmops.refresh_security_group_members(security_group_id)
def refresh_instance_security_rules(self, instance):
"""Updates security group rules for specified instance.
Invoked when instances are added/removed to a security group
or when a rule is added/removed to a security group."""
return self._vmops.refresh_instance_security_rules(instance)
def refresh_provider_fw_rules(self):
return self._vmops.refresh_provider_fw_rules()
def get_host_stats(self, refresh=False):
"""Return the current state of the host. If 'refresh' is
True, run the update first."""
return self.host_state.get_host_stats(refresh=refresh)
def host_power_action(self, host, action):
"""The only valid values for 'action' on XenServer are 'reboot' or
'shutdown', even though the API also accepts 'startup'. As this is
not technically possible on XenServer, since the host is the same
physical machine as the hypervisor, if this is requested, we need to
raise an exception.
"""
if action in ("reboot", "shutdown"):
return self._host.host_power_action(host, action)
else:
msg = _("Host startup on XenServer is not supported.")
raise NotImplementedError(msg)
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
return self._host.set_host_enabled(host, enabled)
def get_host_uptime(self, host):
"""Returns the result of calling "uptime" on the target host."""
return self._host.get_host_uptime(host)
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation."""
return self._host.host_maintenance_mode(host, mode)
def add_to_aggregate(self, context, aggregate, host, **kwargs):
"""Add a compute host to an aggregate."""
return self._pool.add_to_aggregate(context, aggregate, host, **kwargs)
def remove_from_aggregate(self, context, aggregate, host, **kwargs):
"""Remove a compute host from an aggregate."""
return self._pool.remove_from_aggregate(context,
aggregate, host, **kwargs)
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error=True):
"""Undo aggregate operation when pool error raised."""
return self._pool.undo_aggregate_operation(context, op,
aggregate, host, set_error)
def legacy_nwinfo(self):
"""
Indicate if the driver requires the legacy network_info format.
"""
# TODO(tr3buchet): remove this function once all virts return false
return False
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
self._vmops.power_on(instance)
def get_per_instance_usage(self):
"""Get information about instance resource usage.
:returns: dict of nova uuid => dict of usage
info
"""
return self._vmops.get_per_instance_usage()
class XenAPISession(object):
"""The session to invoke XenAPI SDK calls."""
def __init__(self, url, user, pw, virtapi):
import XenAPI
self.XenAPI = XenAPI
self._sessions = queue.Queue()
self.is_slave = False
exception = self.XenAPI.Failure(_("Unable to log in to XenAPI "
"(is the Dom0 disk full?)"))
url = self._create_first_session(url, user, pw, exception)
self._populate_session_pool(url, user, pw, exception)
self.host_uuid = self._get_host_uuid()
self.product_version, self.product_brand = \
self._get_product_version_and_brand()
self._virtapi = virtapi
def _create_first_session(self, url, user, pw, exception):
try:
session = self._create_session(url)
with timeout.Timeout(CONF.xenapi_login_timeout, exception):
session.login_with_password(user, pw)
except self.XenAPI.Failure, e:
# if user and pw of the master are different, we're doomed!
if e.details[0] == 'HOST_IS_SLAVE':
master = e.details[1]
url = pool.swap_xapi_host(url, master)
session = self.XenAPI.Session(url)
session.login_with_password(user, pw)
self.is_slave = True
else:
raise
self._sessions.put(session)
return url
def _populate_session_pool(self, url, user, pw, exception):
for i in xrange(CONF.xenapi_connection_concurrent - 1):
session = self._create_session(url)
with timeout.Timeout(CONF.xenapi_login_timeout, exception):
session.login_with_password(user, pw)
self._sessions.put(session)
def _get_host_uuid(self):
if self.is_slave:
aggr = self._virtapi.aggregate_get_by_host(
context.get_admin_context(),
CONF.host, key=pool_states.POOL_FLAG)[0]
if not aggr:
LOG.error(_('Host is member of a pool, but DB '
'says otherwise'))
raise exception.AggregateHostNotFound()
return aggr.metadetails[CONF.host]
else:
with self._get_session() as session:
host_ref = session.xenapi.session.get_this_host(session.handle)
return session.xenapi.host.get_uuid(host_ref)
def _get_product_version_and_brand(self):
"""Return a tuple of (major, minor, rev) for the host version and
a string of the product brand"""
software_version = self._get_software_version()
product_version_str = software_version.get('product_version')
product_brand = software_version.get('product_brand')
if None in (product_version_str, product_brand):
return (None, None)
product_version = tuple(int(part) for part in
product_version_str.split('.'))
return product_version, product_brand
def _get_software_version(self):
host = self.get_xenapi_host()
return self.call_xenapi('host.get_software_version', host)
def get_session_id(self):
"""Return a string session_id. Used for vnc consoles."""
with self._get_session() as session:
return str(session._session)
@contextlib.contextmanager
def _get_session(self):
"""Return exclusive session for scope of with statement."""
session = self._sessions.get()
try:
yield session
finally:
self._sessions.put(session)
def get_xenapi_host(self):
"""Return the xenapi host on which nova-compute runs on."""
with self._get_session() as session:
return session.xenapi.host.get_by_uuid(self.host_uuid)
def call_xenapi(self, method, *args):
"""Call the specified XenAPI method on a background thread."""
with self._get_session() as session:
return session.xenapi_request(method, args)
def call_plugin(self, plugin, fn, args):
"""Call host.call_plugin on a background thread."""
# NOTE(johannes): Fetch host before we acquire a session. Since
# get_xenapi_host() acquires a session too, it can result in a
# deadlock if multiple greenthreads race with each other. See
# bug 924918
host = self.get_xenapi_host()
# NOTE(armando): pass the host uuid along with the args so that
# the plugin gets executed on the right host when using XS pools
args['host_uuid'] = self.host_uuid
with self._get_session() as session:
return self._unwrap_plugin_exceptions(
session.xenapi.host.call_plugin,
host, plugin, fn, args)
def call_plugin_serialized(self, plugin, fn, *args, **kwargs):
params = {'params': pickle.dumps(dict(args=args, kwargs=kwargs))}
rv = self.call_plugin(plugin, fn, params)
return pickle.loads(rv)
def _create_session(self, url):
"""Stubout point. This can be replaced with a mock session."""
return self.XenAPI.Session(url)
def _unwrap_plugin_exceptions(self, func, *args, **kwargs):
"""Parse exception details."""
try:
return func(*args, **kwargs)
except self.XenAPI.Failure, exc:
LOG.debug(_("Got exception: %s"), exc)
if (len(exc.details) == 4 and
exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and
exc.details[2] == 'Failure'):
params = None
try:
# FIXME(comstud): eval is evil.
params = eval(exc.details[3])
except Exception:
raise exc
raise self.XenAPI.Failure(params)
else:
raise
except xmlrpclib.ProtocolError, exc:
LOG.debug(_("Got exception: %s"), exc)
raise
def get_rec(self, record_type, ref):
try:
return self.call_xenapi('%s.get_record' % record_type, ref)
except self.XenAPI.Failure, e:
if e.details[0] != 'HANDLE_INVALID':
raise
return None
def get_all_refs_and_recs(self, record_type):
"""Retrieve all refs and recs for a Xen record type.
Handles race-conditions where the record may be deleted between
the `get_all` call and the `get_record` call.
"""
for ref in self.call_xenapi('%s.get_all' % record_type):
rec = self.get_rec(record_type, ref)
# Check to make sure the record still exists. It may have
# been deleted between the get_all call and get_record call
if rec:
yield ref, rec
| |
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from __future__ import print_function
import io
import os
import platform
import sys
from distutils.command.build_ext import build_ext
from shutil import copytree, copy, rmtree
from setuptools import setup, Extension
if sys.version_info < (3, 6):
print("Python versions prior to 3.6 are not supported for PyFlink.",
file=sys.stderr)
sys.exit(-1)
def remove_if_exists(file_path):
if os.path.exists(file_path):
if os.path.islink(file_path) or os.path.isfile(file_path):
os.remove(file_path)
else:
assert os.path.isdir(file_path)
rmtree(file_path)
def copy_files(src_paths, output_directory):
for src_path, file_mode in src_paths:
if os.path.isdir(src_path):
child_files = os.listdir(src_path)
for child_file in child_files:
dst_path = copy(os.path.join(src_path, child_file), output_directory)
os.chmod(dst_path, file_mode)
else:
dst_path = copy(src_path, os.path.join(output_directory, os.path.basename(src_path)))
os.chmod(dst_path, file_mode)
def has_unsupported_tag(file_element):
unsupported_tags = ['includes', 'exclueds']
for unsupported_tag in unsupported_tags:
if file_element.getElementsByTagName(unsupported_tag):
print('Unsupported <{0}></{1}> tag'.format(unsupported_tag, unsupported_tag))
return True
return False
def extracted_output_files(base_dir, file_path, output_directory):
extracted_file_paths = []
from xml.dom.minidom import parse
dom = parse(file_path)
root_data = dom.documentElement
file_elements = (root_data.getElementsByTagName("files")[0]).getElementsByTagName("file")
# extracted <files><file></file></files>
for file_element in file_elements:
source = ((file_element.getElementsByTagName('source')[0]).childNodes[0]).data
file_mode = int(((file_element.getElementsByTagName('fileMode')[0]).childNodes[0]).data, 8)
try:
dst = ((file_element.getElementsByTagName('outputDirectory')[0]).childNodes[0]).data
if dst == output_directory:
if has_unsupported_tag(file_element):
sys.exit(-1)
extracted_file_paths.append((os.path.join(base_dir, source), file_mode))
except IndexError:
pass
# extracted <fileSets><fileSet></fileSet></fileSets>
file_elements = (root_data.getElementsByTagName("fileSets")[0]).getElementsByTagName("fileSet")
for file_element in file_elements:
source = ((file_element.getElementsByTagName('directory')[0]).childNodes[0]).data
file_mode = int(((file_element.getElementsByTagName('fileMode')[0]).childNodes[0]).data, 8)
try:
dst = ((file_element.getElementsByTagName('outputDirectory')[0]).childNodes[0]).data
if dst == output_directory:
if has_unsupported_tag(file_element):
sys.exit(-1)
extracted_file_paths.append((os.path.join(base_dir, source), file_mode))
except IndexError:
pass
return extracted_file_paths
# Currently Cython optimizing doesn't support Windows.
if platform.system() == 'Windows':
extensions = ([])
else:
try:
from Cython.Build import cythonize
extensions = cythonize([
Extension(
name="pyflink.fn_execution.coder_impl_fast",
sources=["pyflink/fn_execution/coder_impl_fast.pyx"],
include_dirs=["pyflink/fn_execution/"]),
Extension(
name="pyflink.fn_execution.table.aggregate_fast",
sources=["pyflink/fn_execution/table/aggregate_fast.pyx"],
include_dirs=["pyflink/fn_execution/table/"]),
Extension(
name="pyflink.fn_execution.table.window_aggregate_fast",
sources=["pyflink/fn_execution/table/window_aggregate_fast.pyx"],
include_dirs=["pyflink/fn_execution/table/"]),
Extension(
name="pyflink.fn_execution.stream_fast",
sources=["pyflink/fn_execution/stream_fast.pyx"],
include_dirs=["pyflink/fn_execution/"]),
Extension(
name="pyflink.fn_execution.beam.beam_stream_fast",
sources=["pyflink/fn_execution/beam/beam_stream_fast.pyx"],
include_dirs=["pyflink/fn_execution/beam"]),
Extension(
name="pyflink.fn_execution.beam.beam_coder_impl_fast",
sources=["pyflink/fn_execution/beam/beam_coder_impl_fast.pyx"],
include_dirs=["pyflink/fn_execution/beam"]),
Extension(
name="pyflink.fn_execution.beam.beam_operations_fast",
sources=["pyflink/fn_execution/beam/beam_operations_fast.pyx"],
include_dirs=["pyflink/fn_execution/beam"]),
])
except ImportError:
if os.path.exists("pyflink/fn_execution/coder_impl_fast.c"):
extensions = ([
Extension(
name="pyflink.fn_execution.coder_impl_fast",
sources=["pyflink/fn_execution/coder_impl_fast.c"],
include_dirs=["pyflink/fn_execution/"]),
Extension(
name="pyflink.fn_execution.table.aggregate_fast",
sources=["pyflink/fn_execution/table/aggregate_fast.c"],
include_dirs=["pyflink/fn_execution/table/"]),
Extension(
name="pyflink.fn_execution.table.window_aggregate_fast",
sources=["pyflink/fn_execution/table/window_aggregate_fast.c"],
include_dirs=["pyflink/fn_execution/table/"]),
Extension(
name="pyflink.fn_execution.stream_fast",
sources=["pyflink/fn_execution/stream_fast.c"],
include_dirs=["pyflink/fn_execution/"]),
Extension(
name="pyflink.fn_execution.beam.beam_stream_fast",
sources=["pyflink/fn_execution/beam/beam_stream_fast.c"],
include_dirs=["pyflink/fn_execution/beam"]),
Extension(
name="pyflink.fn_execution.beam.beam_coder_impl_fast",
sources=["pyflink/fn_execution/beam/beam_coder_impl_fast.c"],
include_dirs=["pyflink/fn_execution/beam"]),
Extension(
name="pyflink.fn_execution.beam.beam_operations_fast",
sources=["pyflink/fn_execution/beam/beam_operations_fast.c"],
include_dirs=["pyflink/fn_execution/beam"]),
])
else:
extensions = ([])
this_directory = os.path.abspath(os.path.dirname(__file__))
version_file = os.path.join(this_directory, 'pyflink/version.py')
try:
exec(open(version_file).read())
except IOError:
print("Failed to load PyFlink version file for packaging. " +
"'%s' not found!" % version_file,
file=sys.stderr)
sys.exit(-1)
VERSION = __version__ # noqa
with io.open(os.path.join(this_directory, 'README.md'), 'r', encoding='utf-8') as f:
long_description = f.read()
TEMP_PATH = "deps"
CONF_TEMP_PATH = os.path.join(TEMP_PATH, "conf")
LOG_TEMP_PATH = os.path.join(TEMP_PATH, "log")
EXAMPLES_TEMP_PATH = os.path.join(TEMP_PATH, "examples")
SCRIPTS_TEMP_PATH = os.path.join(TEMP_PATH, "bin")
LICENSE_FILE_TEMP_PATH = os.path.join(this_directory, "LICENSE")
README_FILE_TEMP_PATH = os.path.join("pyflink", "README.txt")
PYFLINK_UDF_RUNNER_SH = "pyflink-udf-runner.sh"
PYFLINK_UDF_RUNNER_BAT = "pyflink-udf-runner.bat"
in_flink_source = os.path.isfile("../flink-java/src/main/java/org/apache/flink/api/java/"
"ExecutionEnvironment.java")
try:
if in_flink_source:
try:
os.mkdir(TEMP_PATH)
except:
print("Temp path for symlink to parent already exists {0}".format(TEMP_PATH),
file=sys.stderr)
sys.exit(-1)
flink_version = VERSION.replace(".dev0", "-SNAPSHOT")
FLINK_HOME = os.path.abspath(
"../flink-dist/target/flink-%s-bin/flink-%s" % (flink_version, flink_version))
FLINK_ROOT = os.path.abspath("..")
FLINK_DIST = os.path.join(FLINK_ROOT, "flink-dist")
FLINK_BIN = os.path.join(FLINK_DIST, "src/main/flink-bin")
EXAMPLES_PATH = os.path.join(this_directory, "pyflink/examples")
LICENSE_FILE_PATH = os.path.join(FLINK_ROOT, "LICENSE")
README_FILE_PATH = os.path.join(FLINK_BIN, "README.txt")
FLINK_BIN_XML_FILE = os.path.join(FLINK_BIN, '../assemblies/bin.xml')
# copy conf files
os.mkdir(CONF_TEMP_PATH)
conf_paths = extracted_output_files(FLINK_DIST, FLINK_BIN_XML_FILE, 'conf')
copy_files(conf_paths, CONF_TEMP_PATH)
# copy bin files
os.mkdir(SCRIPTS_TEMP_PATH)
script_paths = extracted_output_files(FLINK_DIST, FLINK_BIN_XML_FILE, 'bin')
copy_files(script_paths, SCRIPTS_TEMP_PATH)
copy(os.path.join(this_directory, "pyflink", "bin", PYFLINK_UDF_RUNNER_SH),
os.path.join(SCRIPTS_TEMP_PATH, PYFLINK_UDF_RUNNER_SH))
copy(os.path.join(this_directory, "pyflink", "bin", PYFLINK_UDF_RUNNER_BAT),
os.path.join(SCRIPTS_TEMP_PATH, PYFLINK_UDF_RUNNER_BAT))
try:
os.symlink(EXAMPLES_PATH, EXAMPLES_TEMP_PATH)
os.symlink(LICENSE_FILE_PATH, LICENSE_FILE_TEMP_PATH)
os.symlink(README_FILE_PATH, README_FILE_TEMP_PATH)
except BaseException: # pylint: disable=broad-except
copytree(EXAMPLES_PATH, EXAMPLES_TEMP_PATH)
copy(LICENSE_FILE_PATH, LICENSE_FILE_TEMP_PATH)
copy(README_FILE_PATH, README_FILE_TEMP_PATH)
os.mkdir(LOG_TEMP_PATH)
with open(os.path.join(LOG_TEMP_PATH, "empty.txt"), 'w') as f:
f.write("This file is used to force setuptools to include the log directory. "
"You can delete it at any time after installation.")
else:
if not os.path.isdir(SCRIPTS_TEMP_PATH):
print("The flink core files are not found. Please make sure your installation package "
"is complete, or do this in the flink-python directory of the flink source "
"directory.")
sys.exit(-1)
if VERSION.find('dev0') != -1:
apache_flink_libraries_dependency = 'apache-flink-libraries==%s' % VERSION
else:
split_versions = VERSION.split('.')
split_versions[-1] = str(int(split_versions[-1]) + 1)
NEXT_VERSION = '.'.join(split_versions)
apache_flink_libraries_dependency = 'apache-flink-libraries>=%s,<%s' % \
(VERSION, NEXT_VERSION)
script_names = ["pyflink-shell.sh", "find-flink-home.sh"]
scripts = [os.path.join(SCRIPTS_TEMP_PATH, script) for script in script_names]
scripts.append("pyflink/find_flink_home.py")
PACKAGES = ['pyflink',
'pyflink.table',
'pyflink.util',
'pyflink.datastream',
'pyflink.common',
'pyflink.fn_execution',
'pyflink.fn_execution.beam',
'pyflink.fn_execution.datastream',
'pyflink.fn_execution.datastream.window',
'pyflink.fn_execution.table',
'pyflink.fn_execution.utils',
'pyflink.metrics',
'pyflink.conf',
'pyflink.log',
'pyflink.examples',
'pyflink.bin']
PACKAGE_DIR = {
'pyflink.conf': TEMP_PATH + '/conf',
'pyflink.log': TEMP_PATH + '/log',
'pyflink.examples': TEMP_PATH + '/examples',
'pyflink.bin': TEMP_PATH + '/bin'}
PACKAGE_DATA = {
'pyflink': ['README.txt'],
'pyflink.conf': ['*'],
'pyflink.log': ['*'],
'pyflink.examples': ['*.py', '*/*.py'],
'pyflink.bin': ['*']}
setup(
name='apache-flink',
version=VERSION,
packages=PACKAGES,
include_package_data=True,
package_dir=PACKAGE_DIR,
package_data=PACKAGE_DATA,
scripts=scripts,
url='https://flink.apache.org',
license='https://www.apache.org/licenses/LICENSE-2.0',
author='Apache Software Foundation',
author_email='dev@flink.apache.org',
python_requires='>=3.6',
install_requires=['py4j==0.10.8.1', 'python-dateutil==2.8.0', 'apache-beam==2.27.0',
'cloudpickle==1.2.2', 'avro-python3>=1.8.1,!=1.9.2,<1.10.0',
'pandas>=1.0,<1.2.0', 'pyarrow>=0.15.1,<3.0.0',
'pytz>=2018.3', 'numpy>=1.14.3,<1.20', 'fastavro>=0.21.4,<0.24',
'requests>=2.26.0', 'protobuf<3.18',
apache_flink_libraries_dependency],
cmdclass={'build_ext': build_ext},
tests_require=['pytest==4.4.1'],
description='Apache Flink Python API',
long_description=long_description,
long_description_content_type='text/markdown',
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'],
ext_modules=extensions
)
finally:
if in_flink_source:
remove_if_exists(TEMP_PATH)
remove_if_exists(LICENSE_FILE_TEMP_PATH)
remove_if_exists(README_FILE_TEMP_PATH)
| |
import string
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
import pytest
import seaborn as sns
from sklearn.decomposition import PCA
class TestSmushPlotterBase(object):
nrow = 10
ncol = 20
vector = np.random.negative_binomial(n=1000, p=0.2, size=nrow * ncol)
matrix = vector.reshape(nrow, ncol)
data = pd.DataFrame(matrix, index=list(string.ascii_lowercase[:nrow]),
columns=list(string.ascii_uppercase[:ncol]))
data = pd.DataFrame(matrix.copy())
data.index.name = 'Samples'
data.columns.name = 'Features'
half = int(nrow / 2.)
groupby = pd.Series((['B'] * half) + (['A'] * half))
order = ['A', 'B']
palette = 'PRGn'
color = 'DarkTeal'
symbol_kws = dict(marker='o', marker_order=None, text=False,
text_order=None, linewidth=1, linewidth_order=None,
edgecolor='k', edgecolor_order=None)
color_kws = dict(color=None, palette=None, hue=None, hue_order=None,
saturation=None)
def test_establish_reducer_make_new(self):
from cupcake.smush.base import SmushPlotterBase
pca_kws = {}
n_components = 2
reducer = PCA(n_components=n_components, **pca_kws)
p = SmushPlotterBase()
p.establish_reducer(PCA, n_components, {})
assert isinstance(p.reducer, type(reducer))
pdt.assert_dict_equal(p.reducer.get_params(), reducer.get_params())
def test_establish_reducer_use_existing(self):
from cupcake.smush.base import SmushPlotterBase
pca_kws = {}
n_components = 2
reducer = PCA(n_components=n_components, **pca_kws)
p = SmushPlotterBase()
p.establish_reducer(reducer)
assert isinstance(p.reducer, type(reducer))
pdt.assert_dict_equal(p.reducer.get_params(), reducer.get_params())
# --- Test figuring out what to reduce --- #
def test_establish_variables_matrix(self):
from cupcake.smush.base import SmushPlotterBase
p = SmushPlotterBase()
p.establish_variables(self.matrix)
assert isinstance(p.high_dimensional_data, pd.DataFrame)
pdt.assert_frame_equal(p.high_dimensional_data,
pd.DataFrame(self.matrix))
assert p.sample_label is None
assert p.feature_label is None
def test_establish_variables_dataframe_named_axes(self):
from cupcake.smush.base import SmushPlotterBase
p = SmushPlotterBase()
p.establish_variables(self.data)
pdt.assert_frame_equal(p.high_dimensional_data, self.data)
assert p.sample_label == 'Samples'
assert p.feature_label == "Features"
@pytest.mark.xfail(reason='High dimensional data provided is too small')
def test_establish_variables_too_few_axes(self):
from cupcake.smush.base import SmushPlotterBase
p = SmushPlotterBase()
matrix = self.vector.reshape(1, self.nrow * self.ncol)
p.establish_variables(matrix)
@pytest.mark.xfail(reason='High dimensional data provided has too many '
'axes')
def test_establish_variables_too_many_axes(self):
from cupcake.smush.base import SmushPlotterBase
p = SmushPlotterBase()
matrix = self.vector.reshape((1, self.nrow, self.ncol))
p.establish_variables(matrix)
# --- Test internal series making function --- #
def test__maybe_make_grouper_single_groupby(self):
from cupcake.smush.base import SmushPlotterBase
p = SmushPlotterBase()
p.establish_variables(self.data)
test_grouper = p._maybe_make_grouper('o', None, str)
true_grouper = pd.Series(['o']*self.matrix.shape[0],
index=self.data.index)
pdt.assert_series_equal(test_grouper, true_grouper)
def test__maybe_make_grouper_multiple_groupbys(self):
from cupcake.smush.base import SmushPlotterBase
p = SmushPlotterBase()
p.establish_variables(self.data)
half = int(self.nrow/2.)
groupby = pd.Series((['B'] * half) + (['A'] * half))
order = ['A', 'B']
test_grouper = p._maybe_make_grouper(groupby, order, str)
true_grouper = pd.Series(pd.Categorical(groupby, categories=order,
ordered=True), index=self.data.index)
pdt.assert_series_equal(test_grouper, true_grouper)
# --- Test assigning plotting markers --- #
def test_establish_symbols_defaults(self):
from cupcake.smush.base import SmushPlotterBase
p = SmushPlotterBase()
p.establish_variables(self.data)
p.establish_symbols(**self.symbol_kws)
pdt.assert_series_equal(p.symbol, pd.Series(['o']*self.nrow,
index=self.data.index))
pdt.assert_series_equal(p.linewidth, pd.Series([1]*self.nrow,
index=self.data.index))
pdt.assert_series_equal(p.edgecolor, pd.Series(['k']*self.nrow,
index=self.data.index))
assert not p.text
def test_establish_symbols_text_true(self):
from cupcake.smush.base import SmushPlotterBase
symbol_kws = self.symbol_kws.copy()
symbol_kws['text'] = True
p = SmushPlotterBase()
p.establish_variables(self.data)
p.establish_symbols(**symbol_kws)
pdt.assert_series_equal(p.symbol,
pd.Series(map(str, list(range(self.nrow))),
index=self.data.index))
assert p.text
def test_establish_symbols_text_series(self):
from cupcake.smush.base import SmushPlotterBase
symbol_kws = self.symbol_kws.copy()
symbol_kws['text'] = self.groupby
p = SmushPlotterBase()
p.establish_variables(self.data)
p.establish_symbols(**symbol_kws)
order = sns.utils.categorical_order(self.groupby)
symbol = pd.Series(pd.Categorical(self.groupby, categories=order,
ordered=True),
index=self.data.index)
pdt.assert_series_equal(p.symbol, symbol)
assert p.text
def test_establish_symbols_text_series_ordered(self):
from cupcake.smush.base import SmushPlotterBase
symbol_kws = self.symbol_kws.copy()
symbol_kws['text'] = self.groupby
symbol_kws['text_order'] = self.order
p = SmushPlotterBase()
p.establish_variables(self.data)
p.establish_symbols(**symbol_kws)
symbol = pd.Series(pd.Categorical(self.groupby, ordered=True,
categories=self.order), index=self.data.index)
pdt.assert_series_equal(p.symbol, symbol)
assert p.text
def test_establish_symbols_text_series_not_str(self):
from cupcake.smush.base import SmushPlotterBase
half = int(self.nrow/2.)
text = pd.Series(([1] * half) + ([2] * half)).map(str)
symbol_kws = self.symbol_kws.copy()
symbol_kws['text'] = text
p = SmushPlotterBase()
p.establish_variables(self.data)
p.establish_symbols(**symbol_kws)
symbol = pd.Series(pd.Categorical(text, ordered=True,
categories=['1', '2']), index=self.data.index)
pdt.assert_series_equal(p.symbol, symbol)
assert p.text
# --- Test assigning colors --- #
def test_establish_colors_all_none(self):
# Option 1. All parameters are set to default values
from cupcake.smush.base import SmushPlotterBase
p = SmushPlotterBase()
p.establish_variables(self.data)
p.establish_colors(**self.color_kws)
assert p.n_colors == 1
assert len(p.high_dimensional_data.groupby(p.color)) == p.n_colors
@pytest.mark.xfail
def test_establish_colors_hue_order(self):
# Option 2. hue_order is specified but nothing else is
from cupcake.smush.base import SmushPlotterBase
color_kws = self.color_kws.copy()
color_kws['hue_order'] = self.order
p = SmushPlotterBase()
p.establish_variables(self.data)
p.establish_colors(**color_kws)
def test_establish_colors_hue(self):
# Option 3. "hue" is specified but nothing else is
from cupcake.smush.base import SmushPlotterBase
color_kws = self.color_kws.copy()
color_kws['hue'] = self.groupby
p = SmushPlotterBase()
p.establish_variables(self.data)
p.establish_colors(**color_kws)
assert p.n_colors == 2
assert len(p.high_dimensional_data.groupby(p.color)) == p.n_colors
def establish_colors_hue_hue_order(self):
# Option 4. Both "hue" and "hue_order" are specified
from cupcake.smush.base import SmushPlotterBase
color_kws = self.color_kws.copy()
color_kws['hue'] = self.groupby
color_kws['hue_order'] = self.order
p = SmushPlotterBase()
p.establish_variables(self.data)
p.establish_colors(**color_kws)
test_grouped = p.high_dimensional_data.groupby(p.color)
assert p.n_colors == 2
assert len(test_grouped) == p.n_colors
def establish_colors_palette(self):
# Option 5. "palette" is specified but nothing else is
from cupcake.smush.base import SmushPlotterBase
color_kws = self.color_kws.copy()
color_kws['palette'] = self.palette
p = SmushPlotterBase()
p.establish_variables(self.data)
p.establish_colors(**color_kws)
assert p.n_colors == self.nrow
assert len(p.high_dimensional_data.groupby(p.color)) == p.n_colors
def establish_colors_palette_hue_order(self):
# Option 6a. "palette" and "hue_order" are specified
from cupcake.smush.base import SmushPlotterBase
# Reverse the index order
hue_order = self.data.index[::-1]
color_kws = self.color_kws.copy()
color_kws['palette'] = self.palette
color_kws['hue_order'] = hue_order
p = SmushPlotterBase()
p.establish_variables(self.data)
p.establish_colors(**color_kws)
assert p.n_colors == self.nrow
assert len(p.high_dimensional_data.groupby(p.color)) == p.n_colors
@pytest.mark.xfail
def establish_colors_palette_hue_order_incorrect_length(self):
# Option 6b. "palette" and "hue_order" are specified, but hue_order
# isn't correct length
from cupcake.smush.base import SmushPlotterBase
# Reverse the index order
hue_order = self.data.index[::-1]
hue_order = hue_order[:self.half]
color_kws = self.color_kws.copy()
color_kws['palette'] = self.palette
color_kws['hue_order'] = hue_order
p = SmushPlotterBase()
p.establish_variables(self.data)
p.establish_colors(**color_kws)
def establish_colors_palette_hue(self):
# Option 7. "palette" and "hue" are specified
from cupcake.smush.base import SmushPlotterBase
color_kws = self.color_kws.copy()
color_kws['palette'] = self.palette
color_kws['hue'] = self.groupby
p = SmushPlotterBase()
p.establish_variables(self.data)
p.establish_colors(**color_kws)
assert p.n_colors == 2
assert len(p.high_dimensional_data.groupby(p.color)) == p.n_colors
def establish_colors_palette_hue_hue_order(self):
# Option 8. "palette", "hue", and "hue_order" are specified
from cupcake.smush.base import SmushPlotterBase
color_kws = self.color_kws.copy()
color_kws['palette'] = self.palette
color_kws['hue'] = self.groupby
color_kws['hue_order'] = self.order
p = SmushPlotterBase()
p.establish_variables(self.data)
p.establish_colors(**color_kws)
assert p.n_colors == 2
assert len(p.high_dimensional_data.groupby(p.color)) == p.n_colors
def establish_colors_color(self):
# Option 9. "color" is specified
from cupcake.smush.base import SmushPlotterBase
color_kws = self.color_kws.copy()
color_kws['color'] = self.color
p = SmushPlotterBase()
p.establish_variables(self.data)
p.establish_colors(**color_kws)
assert p.n_colors == 1
assert len(p.high_dimensional_data.groupby(p.color)) == p.n_colors
def establish_colors_color_hue_order(self):
# Option 10. "color" and "hue_order" are specified
from cupcake.smush.base import SmushPlotterBase
# Reverse the index so hue_order is different from original order
hue_order = self.data.index[::-1]
color_kws = self.color_kws.copy()
color_kws['color'] = self.color
color_kws['hue_order'] = hue_order
p = SmushPlotterBase()
p.establish_variables(self.data)
p.establish_colors(**color_kws)
assert p.n_colors == self.nrow
assert len(p.high_dimensional_data.groupby(p.color)) == p.n_colors
@pytest.mark.xfail
def establish_colors_color_hue_order_incorrect_length(self):
# Option 10. "color" and "hue_order" are specified, but "hue_order" is
# the incorrect length
from cupcake.smush.base import SmushPlotterBase
# Reverse the index so hue_order is different from original order
hue_order = self.data.index[::-1]
hue_order = hue_order[:self.half]
color_kws = self.color_kws.copy()
color_kws['color'] = self.color
color_kws['hue_order'] = hue_order
p = SmushPlotterBase()
p.establish_variables(self.data)
p.establish_colors(**color_kws)
def establish_colors_color_hue(self):
# Option 11. "color" and "hue" are specified
from cupcake.smush.base import SmushPlotterBase
color_kws = self.color_kws.copy()
color_kws['color'] = self.color
color_kws['hue'] = self.groupby
p = SmushPlotterBase()
p.establish_variables(self.data)
p.establish_colors(**color_kws)
assert p.n_colors == 2
assert len(p.high_dimensional_data.groupby(p.color)) == p.n_colors
def establish_colors_color_hue_hue_order(self):
# Option 12. "color", "hue", and "hue_order" are specified
from cupcake.smush.base import SmushPlotterBase
color_kws = self.color_kws.copy()
color_kws['color'] = self.color
color_kws['hue'] = self.groupby
color_kws['hue_order'] = self.order
p = SmushPlotterBase()
p.establish_variables(self.data)
p.establish_colors(**color_kws)
assert p.n_colors == 2
assert len(p.high_dimensional_data.groupby(p.color)) == p.n_colors
@pytest.fixture(params=[None, 'hue'])
def hue(self, request):
if request.param is None:
return request.param
else:
return self.groupby
@pytest.fixture(params=[None, 'hue_order'])
def hue_order(self, request):
if request.param is None:
return request.param
else:
return self.order
@pytest.mark.xfail
def establish_colors_color_palette(self, hue, hue_order):
# Option 13-16. "color", and "palette" are specified but incompatible
from cupcake.smush.base import SmushPlotterBase
color_kws = self.color_kws.copy()
color_kws['color'] = self.color
color_kws['palette'] = self.palette
color_kws['hue'] = hue
color_kws['hue_order'] = hue_order
p = SmushPlotterBase()
p.establish_variables(self.data)
p.establish_colors(**color_kws)
| |
import numpy as np
import skimage.io
from scipy.ndimage import zoom
from skimage.transform import resize
try:
# Python3 will most likely not be able to load protobuf
from caffe.proto import caffe_pb2
except:
import sys
if sys.version_info >= (3, 0):
print("Failed to include caffe_pb2, things might go wrong!")
else:
raise
## proto / datum / ndarray conversion
def blobproto_to_array(blob, return_diff=False):
"""
Convert a blob proto to an array. In default, we will just return the data,
unless return_diff is True, in which case we will return the diff.
"""
# Read the data into an array
if return_diff:
data = np.array(blob.diff)
else:
data = np.array(blob.data)
# Reshape the array
if blob.HasField('num') or blob.HasField('channels') or blob.HasField('height') or blob.HasField('width'):
# Use legacy 4D shape
return data.reshape(blob.num, blob.channels, blob.height, blob.width)
else:
return data.reshape(blob.shape.dim)
def array_to_blobproto(arr, diff=None):
"""Converts a N-dimensional array to blob proto. If diff is given, also
convert the diff. You need to make sure that arr and diff have the same
shape, and this function does not do sanity check.
"""
blob = caffe_pb2.BlobProto()
blob.shape.dim.extend(arr.shape)
blob.data.extend(arr.astype(float).flat)
if diff is not None:
blob.diff.extend(diff.astype(float).flat)
return blob
def arraylist_to_blobprotovector_str(arraylist):
"""Converts a list of arrays to a serialized blobprotovec, which could be
then passed to a network for processing.
"""
vec = caffe_pb2.BlobProtoVector()
vec.blobs.extend([array_to_blobproto(arr) for arr in arraylist])
return vec.SerializeToString()
def blobprotovector_str_to_arraylist(str):
"""Converts a serialized blobprotovec to a list of arrays.
"""
vec = caffe_pb2.BlobProtoVector()
vec.ParseFromString(str)
return [blobproto_to_array(blob) for blob in vec.blobs]
def array_to_datum(arr, label=None):
"""Converts a 3-dimensional array to datum. If the array has dtype uint8,
the output data will be encoded as a string. Otherwise, the output data
will be stored in float format.
"""
if arr.ndim != 3:
raise ValueError('Incorrect array shape.')
datum = caffe_pb2.Datum()
datum.channels, datum.height, datum.width = arr.shape
if arr.dtype == np.uint8:
datum.data = arr.tostring()
else:
datum.float_data.extend(arr.flat)
if label is not None:
datum.label = label
return datum
def datum_to_array(datum):
"""Converts a datum to an array. Note that the label is not returned,
as one can easily get it by calling datum.label.
"""
if len(datum.data):
return np.fromstring(datum.data, dtype=np.uint8).reshape(
datum.channels, datum.height, datum.width)
else:
return np.array(datum.float_data).astype(float).reshape(
datum.channels, datum.height, datum.width)
## Pre-processing
class Transformer:
"""
Transform input for feeding into a Net.
Note: this is mostly for illustrative purposes and it is likely better
to define your own input preprocessing routine for your needs.
Parameters
----------
net : a Net for which the input should be prepared
"""
def __init__(self, inputs):
self.inputs = inputs
self.transpose = {}
self.channel_swap = {}
self.raw_scale = {}
self.mean = {}
self.input_scale = {}
def __check_input(self, in_):
if in_ not in self.inputs:
raise Exception('{} is not one of the net inputs: {}'.format(
in_, self.inputs))
def preprocess(self, in_, data):
"""
Format input for Caffe:
- convert to single
- resize to input dimensions (preserving number of channels)
- transpose dimensions to K x H x W
- reorder channels (for instance color to BGR)
- scale raw input (e.g. from [0, 1] to [0, 255] for ImageNet models)
- subtract mean
- scale feature
Parameters
----------
in_ : name of input blob to preprocess for
data : (H' x W' x K) ndarray
Returns
-------
caffe_in : (K x H x W) ndarray for input to a Net
"""
self.__check_input(in_)
caffe_in = data.astype(np.float32, copy=False)
transpose = self.transpose.get(in_)
channel_swap = self.channel_swap.get(in_)
raw_scale = self.raw_scale.get(in_)
mean = self.mean.get(in_)
input_scale = self.input_scale.get(in_)
in_dims = self.inputs[in_][2:]
if caffe_in.shape[:2] != in_dims:
caffe_in = resize_image(caffe_in, in_dims)
if transpose is not None:
caffe_in = caffe_in.transpose(transpose)
if channel_swap is not None:
caffe_in = caffe_in[channel_swap, :, :]
if raw_scale is not None:
caffe_in *= raw_scale
if mean is not None:
caffe_in -= mean
if input_scale is not None:
caffe_in *= input_scale
return caffe_in
def deprocess(self, in_, data):
"""
Invert Caffe formatting; see preprocess().
"""
self.__check_input(in_)
decaf_in = data.copy().squeeze()
transpose = self.transpose.get(in_)
channel_swap = self.channel_swap.get(in_)
raw_scale = self.raw_scale.get(in_)
mean = self.mean.get(in_)
input_scale = self.input_scale.get(in_)
if input_scale is not None:
decaf_in /= input_scale
if mean is not None:
decaf_in += mean
if raw_scale is not None:
decaf_in /= raw_scale
if channel_swap is not None:
decaf_in = decaf_in[np.argsort(channel_swap), :, :]
if transpose is not None:
decaf_in = decaf_in.transpose(np.argsort(transpose))
return decaf_in
def set_transpose(self, in_, order):
"""
Set the input channel order for e.g. RGB to BGR conversion
as needed for the reference ImageNet model.
Parameters
----------
in_ : which input to assign this channel order
order : the order to transpose the dimensions
"""
self.__check_input(in_)
if len(order) != len(self.inputs[in_]) - 1:
raise Exception('Transpose order needs to have the same number of '
'dimensions as the input.')
self.transpose[in_] = order
def set_channel_swap(self, in_, order):
"""
Set the input channel order for e.g. RGB to BGR conversion
as needed for the reference ImageNet model.
N.B. this assumes the channels are the first dimension AFTER transpose.
Parameters
----------
in_ : which input to assign this channel order
order : the order to take the channels.
(2,1,0) maps RGB to BGR for example.
"""
self.__check_input(in_)
if len(order) != self.inputs[in_][1]:
raise Exception('Channel swap needs to have the same number of '
'dimensions as the input channels.')
self.channel_swap[in_] = order
def set_raw_scale(self, in_, scale):
"""
Set the scale of raw features s.t. the input blob = input * scale.
While Python represents images in [0, 1], certain Caffe models
like CaffeNet and AlexNet represent images in [0, 255] so the raw_scale
of these models must be 255.
Parameters
----------
in_ : which input to assign this scale factor
scale : scale coefficient
"""
self.__check_input(in_)
self.raw_scale[in_] = scale
def set_mean(self, in_, mean):
"""
Set the mean to subtract for centering the data.
Parameters
----------
in_ : which input to assign this mean.
mean : mean ndarray (input dimensional or broadcastable)
"""
self.__check_input(in_)
ms = mean.shape
if mean.ndim == 1:
# broadcast channels
if ms[0] != self.inputs[in_][1]:
raise ValueError('Mean channels incompatible with input.')
mean = mean[:, np.newaxis, np.newaxis]
else:
# elementwise mean
if len(ms) == 2:
ms = (1,) + ms
if len(ms) != 3:
raise ValueError('Mean shape invalid')
if ms != self.inputs[in_][1:]:
#raise ValueError('Mean shape incompatible with input shape.')
print(self.inputs[in_])
in_shape = self.inputs[in_][1:]
m_min, m_max = mean.min(), mean.max()
normal_mean = (mean - m_min) / (m_max - m_min)
mean = resize_image(normal_mean.transpose((1,2,0)),in_shape[1:]).transpose((2,0,1)) * (m_max - m_min) + m_min
self.mean[in_] = mean
def set_input_scale(self, in_, scale):
"""
Set the scale of preprocessed inputs s.t. the blob = blob * scale.
N.B. input_scale is done AFTER mean subtraction and other preprocessing
while raw_scale is done BEFORE.
Parameters
----------
in_ : which input to assign this scale factor
scale : scale coefficient
"""
self.__check_input(in_)
self.input_scale[in_] = scale
## Image IO
def load_image(filename, color=True):
"""
Load an image converting from grayscale or alpha as needed.
Parameters
----------
filename : string
color : boolean
flag for color format. True (default) loads as RGB while False
loads as intensity (if image is already grayscale).
Returns
-------
image : an image with type np.float32 in range [0, 1]
of size (H x W x 3) in RGB or
of size (H x W x 1) in grayscale.
"""
img = skimage.img_as_float(skimage.io.imread(filename, as_grey=not color)).astype(np.float32)
if img.ndim == 2:
img = img[:, :, np.newaxis]
if color:
img = np.tile(img, (1, 1, 3))
elif img.shape[2] == 4:
img = img[:, :, :3]
return img
def resize_image(im, new_dims, interp_order=1):
"""
Resize an image array with interpolation.
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
"""
if im.shape[-1] == 1 or im.shape[-1] == 3:
im_min, im_max = im.min(), im.max()
if im_max > im_min:
# skimage is fast but only understands {1,3} channel images
# in [0, 1].
im_std = (im - im_min) / (im_max - im_min)
resized_std = resize(im_std, new_dims, order=interp_order)
resized_im = resized_std * (im_max - im_min) + im_min
else:
# the image is a constant -- avoid divide by 0
ret = np.empty((new_dims[0], new_dims[1], im.shape[-1]),
dtype=np.float32)
ret.fill(im_min)
return ret
else:
# ndimage interpolates anything but more slowly.
scale = tuple(np.array(new_dims, dtype=float) / np.array(im.shape[:2]))
resized_im = zoom(im, scale + (1,), order=interp_order)
return resized_im.astype(np.float32)
def oversample(images, crop_dims):
"""
Crop images into the four corners, center, and their mirrored versions.
Parameters
----------
image : iterable of (H x W x K) ndarrays
crop_dims : (height, width) tuple for the crops.
Returns
-------
crops : (10*N x H x W x K) ndarray of crops for number of inputs N.
"""
# Dimensions and center.
im_shape = np.array(images[0].shape)
crop_dims = np.array(crop_dims)
im_center = im_shape[:2] / 2.0
# Make crop coordinates
h_indices = (0, im_shape[0] - crop_dims[0])
w_indices = (0, im_shape[1] - crop_dims[1])
crops_ix = np.empty((5, 4), dtype=int)
curr = 0
for i in h_indices:
for j in w_indices:
crops_ix[curr] = (i, j, i + crop_dims[0], j + crop_dims[1])
curr += 1
crops_ix[4] = np.tile(im_center, (1, 2)) + np.concatenate([
-crop_dims / 2.0,
crop_dims / 2.0
])
crops_ix = np.tile(crops_ix, (2, 1))
# Extract crops
crops = np.empty((10 * len(images), crop_dims[0], crop_dims[1],
im_shape[-1]), dtype=np.float32)
ix = 0
for im in images:
for crop in crops_ix:
crops[ix] = im[crop[0]:crop[2], crop[1]:crop[3], :]
ix += 1
crops[ix-5:ix] = crops[ix-5:ix, :, ::-1, :] # flip for mirrors
return crops
| |
# Derived from nova/network/linux_net.py
#
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implements vlans, bridges, and iptables rules using linux utilities."""
import os
from os_vif.internal.ip.api import ip as ip_lib
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_log import log as logging
from vif_plug_linux_bridge import privsep
LOG = logging.getLogger(__name__)
_IPTABLES_MANAGER = None
def _set_device_mtu(dev, mtu):
"""Set the device MTU."""
if mtu:
ip_lib.set(dev, mtu=mtu, check_exit_code=[0, 2, 254])
else:
LOG.debug("MTU not set on %(interface_name)s interface",
{'interface_name': dev})
def _ip_bridge_cmd(action, params, device):
"""Build commands to add/del ips to bridges/devices."""
cmd = ['ip', 'addr', action]
cmd.extend(params)
cmd.extend(['dev', device])
return cmd
@privsep.vif_plug.entrypoint
def ensure_vlan_bridge(vlan_num, bridge, bridge_interface,
net_attrs=None, mac_address=None,
mtu=None):
"""Create a vlan and bridge unless they already exist."""
interface = _ensure_vlan_privileged(vlan_num, bridge_interface,
mac_address, mtu=mtu)
_ensure_bridge_privileged(bridge, interface, net_attrs)
_ensure_bridge_filtering(bridge, None)
return interface
@lockutils.synchronized('nova-lock_vlan', external=True)
def _ensure_vlan_privileged(vlan_num, bridge_interface, mac_address, mtu):
"""Create a vlan unless it already exists.
This assumes the caller is already annotated to run
with elevated privileges.
"""
interface = 'vlan%s' % vlan_num
if not ip_lib.exists(interface):
LOG.debug('Starting VLAN interface %s', interface)
ip_lib.add(interface, 'vlan', link=bridge_interface,
vlan_id=vlan_num, check_exit_code=[0, 2, 254])
# (danwent) the bridge will inherit this address, so we want to
# make sure it is the value set from the NetworkManager
if mac_address:
ip_lib.set(interface, address=mac_address,
check_exit_code=[0, 2, 254])
ip_lib.set(interface, state='up', check_exit_code=[0, 2, 254])
# NOTE(vish): set mtu every time to ensure that changes to mtu get
# propagated
_set_device_mtu(interface, mtu)
return interface
@lockutils.synchronized('nova-lock_bridge', external=True)
def ensure_bridge(bridge, interface, net_attrs=None, gateway=True,
filtering=True, mtu=None):
_ensure_bridge_privileged(bridge, interface, net_attrs, gateway,
filtering=filtering, mtu=mtu)
if filtering:
_ensure_bridge_filtering(bridge, gateway)
# TODO(sean-k-mooney): extract into common module
def _disable_ipv6(bridge):
"""disable ipv6 for bridge if available, must be called from
privsep context.
:param bridge: string bridge name
"""
# NOTE(sean-k-mooney): os-vif disables ipv6 to ensure the Bridge
# does not aquire an ipv6 auto config or link local adress.
# This is required to prevent bug 1302080.
# https://bugs.launchpad.net/neutron/+bug/1302080
disv6 = ('/proc/sys/net/ipv6/conf/%s/disable_ipv6' % bridge)
if os.path.exists(disv6):
with open(disv6, 'w') as f:
f.write('1')
# TODO(ralonsoh): extract into common module
def _arp_filtering(bridge):
"""Prevent the bridge from replying to ARP messages with machine local IPs
1. Reply only if the target IP address is local address configured on the
incoming interface.
2. Always use the best local address.
"""
arp_params = [('/proc/sys/net/ipv4/conf/%s/arp_ignore' % bridge, '1'),
('/proc/sys/net/ipv4/conf/%s/arp_announce' % bridge, '2')]
for parameter, value in arp_params:
if os.path.exists(parameter):
with open(parameter, 'w') as f:
f.write(value)
def _update_bridge_routes(interface, bridge):
"""Updates routing table for a given bridge and interface.
:param interface: string interface name
:param bridge: string bridge name
"""
# TODO(sean-k-mooney): investigate deleting all this route
# handling code. The vm tap devices should never have an ip,
# this is old nova networks code and i dont think it will ever
# be needed in os-vif.
# NOTE(vish): This will break if there is already an ip on the
# interface, so we move any ips to the bridge
# NOTE(danms): We also need to copy routes to the bridge so as
# not to break existing connectivity on the interface
old_routes = []
out, _ = processutils.execute('ip', 'route', 'show', 'dev',
interface)
for line in out.split('\n'):
fields = line.split()
if fields and 'via' in fields:
old_routes.append(fields)
processutils.execute('ip', 'route', 'del', *fields)
out, _ = processutils.execute('ip', 'addr', 'show', 'dev',
interface, 'scope', 'global')
for line in out.split('\n'):
fields = line.split()
if fields and fields[0] == 'inet':
if fields[-2] in ('secondary', 'dynamic', ):
params = fields[1:-2]
else:
params = fields[1:-1]
processutils.execute(*_ip_bridge_cmd('del', params,
fields[-1]),
check_exit_code=[0, 2, 254])
processutils.execute(*_ip_bridge_cmd('add', params,
bridge),
check_exit_code=[0, 2, 254])
for fields in old_routes:
processutils.execute('ip', 'route', 'add', *fields)
@privsep.vif_plug.entrypoint
def _ensure_bridge_privileged(bridge, interface, net_attrs, gateway,
filtering=True, mtu=None):
"""Create a bridge unless it already exists.
:param interface: the interface to create the bridge on.
:param net_attrs: dictionary with attributes used to create bridge.
:param gateway: whether or not the bridge is a gateway.
:param filtering: whether or not to create filters on the bridge.
:param mtu: MTU of bridge.
If net_attrs is set, it will add the net_attrs['gateway'] to the bridge
using net_attrs['broadcast'] and net_attrs['cidr']. It will also add
the ip_v6 address specified in net_attrs['cidr_v6'] if use_ipv6 is set.
The code will attempt to move any ips that already exist on the
interface onto the bridge and reset the default gateway if necessary.
"""
if not ip_lib.exists(bridge):
LOG.debug('Starting Bridge %s', bridge)
ip_lib.add(bridge, 'bridge')
_disable_ipv6(bridge)
_arp_filtering(bridge)
ip_lib.set(bridge, state='up')
if interface and ip_lib.exists(interface):
LOG.debug('Adding interface %(interface)s to bridge %(bridge)s',
{'interface': interface, 'bridge': bridge})
ip_lib.set(interface, master=bridge, state='up',
check_exit_code=[0, 2, 254])
_set_device_mtu(interface, mtu)
_update_bridge_routes(interface, bridge)
# NOTE(sean-k-mooney):
# The bridge mtu cannot be set until after an
# interface is added due to bug:
# https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1399064
_set_device_mtu(bridge, mtu)
def _ensure_bridge_filtering(bridge, gateway):
# This method leaves privsep usage to iptables manager
# Don't forward traffic unless we were told to be a gateway
LOG.debug("Ensuring filtering %s to %s", bridge, gateway)
global _IPTABLES_MANAGER
ipv4_filter = _IPTABLES_MANAGER.ipv4['filter']
if gateway:
for rule in _IPTABLES_MANAGER.get_gateway_rules(bridge):
ipv4_filter.add_rule(*rule)
else:
ipv4_filter.add_rule('FORWARD',
('--in-interface %s -j %s'
% (bridge,
_IPTABLES_MANAGER.iptables_drop_action)))
ipv4_filter.add_rule('FORWARD',
('--out-interface %s -j %s'
% (bridge,
_IPTABLES_MANAGER.iptables_drop_action)))
_IPTABLES_MANAGER.apply()
def configure(iptables_mgr):
"""Configure the iptables manager impl.
:param iptables_mgr: the iptables manager instance
"""
global _IPTABLES_MANAGER
_IPTABLES_MANAGER = iptables_mgr
| |
import datetime
import ftplib
import logging
import os.path
from airflow.hooks.base_hook import BaseHook
from past.builtins import basestring
def mlsd(conn, path="", facts=None):
'''
BACKPORT FROM PYTHON3 FTPLIB
List a directory in a standardized format by using MLSD
command (RFC-3659). If path is omitted the current directory
is assumed. "facts" is a list of strings representing the type
of information desired (e.g. ["type", "size", "perm"]).
Return a generator object yielding a tuple of two elements
for every file found in path.
First element is the file name, the second one is a dictionary
including a variable number of "facts" depending on the server
and whether "facts" argument has been provided.
'''
facts = facts or []
if facts:
conn.sendcmd("OPTS MLST " + ";".join(facts) + ";")
if path:
cmd = "MLSD %s" % path
else:
cmd = "MLSD"
lines = []
conn.retrlines(cmd, lines.append)
for line in lines:
facts_found, _, name = line.rstrip(ftplib.CRLF).partition(' ')
entry = {}
for fact in facts_found[:-1].split(";"):
key, _, value = fact.partition("=")
entry[key.lower()] = value
yield (name, entry)
class FTPHook(BaseHook):
"""
Interact with FTP.
Errors that may occur throughout but should be handled
downstream.
"""
def __init__(self, ftp_conn_id='ftp_default'):
self.ftp_conn_id = ftp_conn_id
self.conn = None
def get_conn(self):
"""
Returns a FTP connection object
"""
if self.conn is None:
params = self.get_connection(self.ftp_conn_id)
self.conn = ftplib.FTP(params.host, params.login, params.password)
return self.conn
def close_conn(self):
"""
Closes the connection. An error will occur if the
connection wasnt ever opened.
"""
conn = self.conn
conn.quit()
def describe_directory(self, path):
"""
Returns a dictionary of {filename: {attributes}} for all files
on the remote system (where the MLSD command is supported).
:param path: full path to the remote directory
:type path: str
"""
conn = self.get_conn()
conn.cwd(path)
try:
# only works in Python 3
files = dict(conn.mlsd())
except AttributeError:
files = dict(mlsd(conn))
return files
def list_directory(self, path, nlst=False):
"""
Returns a list of files on the remote system.
:param path: full path to the remote directory to list
:type path: str
"""
conn = self.get_conn()
conn.cwd(path)
files = conn.nlst()
return files
def create_directory(self, path):
"""
Creates a directory on the remote system.
:param path: full path to the remote directory to create
:type path: str
"""
conn = self.get_conn()
conn.mkd(path)
def delete_directory(self, path):
"""
Deletes a directory on the remote system.
:param path: full path to the remote directory to delete
:type path: str
"""
conn = self.get_conn()
conn.rmd(path)
def retrieve_file(self, remote_full_path, local_full_path_or_buffer):
"""
Transfers the remote file to a local location.
If local_full_path_or_buffer is a string path, the file will be put
at that location; if it is a file-like buffer, the file will
be written to the buffer but not closed.
:param remote_full_path: full path to the remote file
:type remote_full_path: str
:param local_full_path_or_buffer: full path to the local file or a
file-like buffer
:type local_full_path: str or file-like buffer
"""
conn = self.get_conn()
is_path = isinstance(local_full_path_or_buffer, basestring)
if is_path:
output_handle = open(local_full_path_or_buffer, 'wb')
else:
output_handle = local_full_path_or_buffer
remote_path, remote_file_name = os.path.split(remote_full_path)
conn.cwd(remote_path)
logging.info('Retrieving file from FTP: {}'.format(remote_full_path))
conn.retrbinary('RETR %s' % remote_file_name, output_handle.write)
logging.info('Finished etrieving file from FTP: {}'.format(
remote_full_path))
if is_path:
output_handle.close()
def store_file(self, remote_full_path, local_full_path_or_buffer):
"""
Transfers a local file to the remote location.
If local_full_path_or_buffer is a string path, the file will be read
from that location; if it is a file-like buffer, the file will
be read from the buffer but not closed.
:param remote_full_path: full path to the remote file
:type remote_full_path: str
:param local_full_path_or_buffer: full path to the local file or a
file-like buffer
:type local_full_path_or_buffer: str or file-like buffer
"""
conn = self.get_conn()
is_path = isinstance(local_full_path_or_buffer, basestring)
if is_path:
input_handle = open(local_full_path_or_buffer, 'rb')
else:
input_handle = local_full_path_or_buffer
remote_path, remote_file_name = os.path.split(remote_full_path)
conn.cwd(remote_path)
conn.storbinary('STOR %s' % remote_file_name, input_handle)
if is_path:
input_handle.close()
def delete_file(self, path):
"""
Removes a file on the FTP Server
:param path: full path to the remote file
:type path: str
"""
conn = self.get_conn()
conn.delete(path)
def get_mod_time(self, path):
conn = self.get_conn()
ftp_mdtm = conn.sendcmd('MDTM ' + path)
return datetime.datetime.strptime(ftp_mdtm[4:], '%Y%m%d%H%M%S')
class FTPSHook(FTPHook):
def get_conn(self):
"""
Returns a FTPS connection object
"""
if self.conn is None:
params = self.get_connection(self.ftp_conn_id)
self.conn = ftplib.FTP_TLS(
params.host, params.login, params.password
)
return self.conn
| |
# Copyright (c) 2009-2021 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause
# License.
"""Install pybind11, cereal, and eigen."""
import os
import sys
import tempfile
import pathlib
import subprocess
import copy
import logging
import argparse
import urllib.request
import tarfile
log = logging.getLogger(__name__)
def find_cmake_package(name,
version,
location_variable=None,
ignore_system=False):
"""Find a package with cmake.
Return True if the package is found
"""
if location_variable is None:
location_variable = name + "_DIR"
find_package_options = ''
if ignore_system:
find_package_options += 'NO_SYSTEM_ENVIRONMENT_PATH ' \
'NO_CMAKE_PACKAGE_REGISTRY NO_CMAKE_SYSTEM_PATH ' \
'NO_CMAKE_SYSTEM_PACKAGE_REGISTRY'
with tempfile.TemporaryDirectory() as tmpdirname:
tmp_path = pathlib.Path(tmpdirname)
# write the cmakelists file
with open(tmp_path / 'CMakeLists.txt', 'w') as f:
f.write(f"""
project(test)
set(PYBIND11_PYTHON_VERSION 3)
cmake_minimum_required(VERSION 3.9)
find_package({name} {version} CONFIG REQUIRED {find_package_options})
""")
# add the python prefix to the cmake prefix path
env = copy.copy(os.environ)
env['CMAKE_PREFIX_PATH'] = sys.prefix
os.mkdir(tmp_path / 'build')
cmake_out = subprocess.run(['cmake', tmpdirname],
cwd=tmp_path / 'build',
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
timeout=120,
env=env,
encoding='UTF-8')
log.debug(cmake_out.stdout.strip())
# if cmake completed correctly, the package was found
if cmake_out.returncode == 0:
location = ''
with open(tmp_path / 'build' / 'CMakeCache.txt', 'r') as f:
for line in f.readlines():
if line.startswith(location_variable):
location = line.strip()
log.info(f"Found {name}: {location}")
return True
else:
log.debug(cmake_out.stdout.strip())
return False
def install_cmake_package(url, cmake_options):
"""Install a cmake package."""
with tempfile.TemporaryDirectory() as tmpdirname:
tmp_path = pathlib.Path(tmpdirname)
log.info(f"Fetching {url}")
req = urllib.request.Request(url, headers={'User-Agent': 'Mozilla/5.0'})
with open(tmp_path / 'file.tar.gz', 'wb') as f:
f.write(urllib.request.urlopen(req).read())
with tarfile.open(tmp_path / 'file.tar.gz') as tar:
tar.extractall(path=tmp_path)
root = tar.getnames()[0]
if '/' in root:
root = os.path.dirname(root)
# add the python prefix to the cmake prefix path
env = copy.copy(os.environ)
env['CMAKE_PREFIX_PATH'] = sys.prefix
log.info(f"Configuring {root}")
os.mkdir(tmp_path / 'build')
cmake_out = subprocess.run(
['cmake', tmp_path / root, f'-DCMAKE_INSTALL_PREFIX={sys.prefix}']
+ cmake_options,
cwd=tmp_path / 'build',
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
timeout=120,
env=env,
encoding='UTF-8')
log.debug(cmake_out.stdout.strip())
if cmake_out.returncode != 0:
log.error(f"Error configuring {root} (run with -v to see detailed "
"error messages)")
raise RuntimeError('Failed to configure package')
log.info(f"Installing {root}")
cmake_out = subprocess.run(
['cmake', '--build', tmp_path / 'build', '--', 'install'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
timeout=120,
env=env,
encoding='UTF-8')
log.debug(cmake_out.stdout.strip())
if cmake_out.returncode != 0:
log.error(f"Error installing {root} (run with -v to see detailed "
"error messages)")
raise RuntimeError('Failed to install package')
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Install header-only libraries needed to build HOOMD-blue.')
parser.add_argument('-q',
action='store_true',
default=False,
help='Suppress info messages.')
parser.add_argument('-v',
action='store_true',
default=False,
help='Show debug messages (overrides -q).')
parser.add_argument('-y',
action='store_true',
default=False,
help='Skip user input and force installation.')
parser.add_argument('--ignore-system',
action='store_true',
default=False,
help='Ignore packages installed at the system level.')
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
if args.q:
log.setLevel(level=logging.WARNING)
if args.v:
log.setLevel(level=logging.DEBUG)
log.info(f"Searching for packages in: {sys.prefix}")
pybind = find_cmake_package('pybind11',
'2.0',
ignore_system=args.ignore_system)
cereal = find_cmake_package('cereal', '', ignore_system=args.ignore_system)
eigen = find_cmake_package('Eigen3',
'3.2',
ignore_system=args.ignore_system)
all_found = all([pybind, cereal, eigen])
if all_found:
log.info("Done. Found all packages.")
else:
missing_packages = ''
if not pybind:
missing_packages += 'pybind11, '
if not cereal:
missing_packages += 'cereal, '
if not eigen:
missing_packages += 'Eigen, '
missing_packages = missing_packages[:-2]
if args.y:
proceed = 'y'
else:
print(f"*** About to install {missing_packages} into {sys.prefix}")
proceed = input('Proceed (y/n)? ')
if proceed == 'y':
log.info(f"Installing packages in: {sys.prefix}")
if not pybind:
install_cmake_package(
'https://github.com/pybind/pybind11/archive/v2.6.0.tar.gz',
cmake_options=[
'-DPYBIND11_INSTALL=on', '-DPYBIND11_TEST=off'
])
if not cereal:
install_cmake_package(
'https://github.com/USCiLab/cereal/archive/v1.3.0.tar.gz',
cmake_options=['-DJUST_INSTALL_CEREAL=on'])
if not eigen:
install_cmake_package(
'https://gitlab.com/libeigen/eigen/-/archive/3.3.8/'
'eigen-3.3.8.tar.gz',
cmake_options=[
'-DBUILD_TESTING=off', '-DEIGEN_TEST_NOQT=on'
])
log.info('Done.')
else:
print('Cancelled')
| |
"""
Define a set of scopes to be used by COS Internal OAuth implementation, specifically tailored to work with APIv2.
List of scopes, nomenclature, and rationale can be found in the relevant "Login as OSF- phase 2" proposal document
"""
from collections import namedtuple
from website import settings
# Public scopes are described with 3 pieces of information: list of constituent scopes, a description, and whether or
# not this scope is available to be requested by the general public
scope = namedtuple('scope', ['parts', 'description', 'is_public'])
class CoreScopes(object):
"""
The smallest units of permission that can be granted- all other scopes are built out of these.
Each named constant is a single string."""
USERS_READ = 'users_read'
USERS_WRITE = 'users_write'
NODE_BASE_READ = 'nodes.base_read'
NODE_BASE_WRITE = 'nodes.base_write'
NODE_CHILDREN_READ = 'nodes.children_read'
NODE_CHILDREN_WRITE = 'nodes.children_write'
NODE_CONTRIBUTORS_READ = 'nodes.contributors_read'
NODE_CONTRIBUTORS_WRITE = 'nodes.contributors_write'
NODE_FILE_READ = 'nodes.files_read'
NODE_FILE_WRITE = 'nodes.files_write'
NODE_LINKS_READ = 'nodes.links_read'
NODE_LINKS_WRITE = 'nodes.links_write'
NODE_REGISTRATIONS_READ = 'nodes.registrations_read'
NODE_REGISTRATIONS_WRITE = 'nodes.registrations_write'
NODE_COMMENTS_READ = 'comments.data_read'
NODE_COMMENTS_WRITE = 'comments.data_write'
COMMENT_REPORTS_READ = 'comments.reports_read'
COMMENT_REPORTS_WRITE = 'comments.reports_write'
APPLICATIONS_READ = 'applications_read'
APPLICATIONS_WRITE = 'applications_write'
NODE_LOG_READ = 'nodes.logs_read'
TOKENS_READ = 'tokens_read'
TOKENS_WRITE = 'tokens_write'
NULL = 'null'
ORGANIZER_COLLECTIONS_BASE_READ = 'collections.base_read'
ORGANIZER_COLLECTIONS_BASE_WRITE = 'collections.base_write'
class ComposedScopes(object):
"""
Composed scopes, listed in increasing order of access (most restrictive first). Each named constant is a tuple.
"""
# Users collection
USERS_READ = (CoreScopes.USERS_READ,)
USERS_WRITE = USERS_READ + (CoreScopes.USERS_WRITE,)
# Applications collection
APPLICATIONS_READ = (CoreScopes.APPLICATIONS_READ,)
APPLICATIONS_WRITE = APPLICATIONS_READ + (CoreScopes.APPLICATIONS_WRITE,)
# Tokens collection
TOKENS_READ = (CoreScopes.TOKENS_READ,)
TOKENS_WRITE = TOKENS_READ + (CoreScopes.TOKENS_WRITE,)
# Comment reports collection
COMMENT_REPORTS_READ = (CoreScopes.COMMENT_REPORTS_READ,)
COMMENT_REPORTS_WRITE = COMMENT_REPORTS_READ + (CoreScopes.COMMENT_REPORTS_WRITE,)
# Nodes collection.
# Base node data includes node metadata, links, and children.
NODE_METADATA_READ = (CoreScopes.NODE_BASE_READ, CoreScopes.NODE_CHILDREN_READ, CoreScopes.NODE_LINKS_READ,
CoreScopes.NODE_COMMENTS_READ)
NODE_METADATA_WRITE = NODE_METADATA_READ + \
(CoreScopes.NODE_BASE_WRITE, CoreScopes.NODE_CHILDREN_WRITE, CoreScopes.NODE_LINKS_WRITE,
CoreScopes.NODE_COMMENTS_WRITE)
# Organizer Collections collection
# Using Organizer Collections and the node links they collect. Reads Node Metadata.
ORGANIZER_READ = (CoreScopes.ORGANIZER_COLLECTIONS_BASE_READ, NODE_METADATA_READ)
ORGANIZER_WRITE = ORGANIZER_READ + (CoreScopes.ORGANIZER_COLLECTIONS_BASE_WRITE, CoreScopes.NODE_LINKS_WRITE)
# Privileges relating to editing content uploaded under that node # TODO: Add wiki etc when implemented
NODE_DATA_READ = (CoreScopes.NODE_FILE_READ, )
NODE_DATA_WRITE = NODE_DATA_READ + \
(CoreScopes.NODE_FILE_WRITE, )
# Privileges relating to who can access a node (via contributors or registrations)
NODE_ACCESS_READ = (CoreScopes.NODE_CONTRIBUTORS_READ, CoreScopes.NODE_REGISTRATIONS_READ)
NODE_ACCESS_WRITE = NODE_ACCESS_READ + \
(CoreScopes.NODE_CONTRIBUTORS_WRITE, CoreScopes.NODE_REGISTRATIONS_WRITE)
# Combine all sets of node permissions into one convenience level
NODE_ALL_READ = NODE_METADATA_READ + NODE_DATA_READ + NODE_ACCESS_READ
NODE_ALL_WRITE = NODE_ALL_READ + NODE_METADATA_WRITE + NODE_DATA_WRITE + NODE_ACCESS_WRITE
# Full permissions: all routes intended to be exposed to third party API users
FULL_READ = NODE_ALL_READ + USERS_READ + ORGANIZER_READ
FULL_WRITE = NODE_ALL_WRITE + USERS_WRITE + ORGANIZER_WRITE
# Admin permissions- includes functionality not intended for third-party use
ADMIN_LEVEL = FULL_WRITE + APPLICATIONS_WRITE + TOKENS_WRITE + COMMENT_REPORTS_WRITE
# List of all publicly documented scopes, mapped to composed scopes defined above.
# Return as sets to enable fast comparisons of provided scopes vs those required by a given node
# These are the ***only*** scopes that will be recognized from CAS
public_scopes = {
'osf.full_read': scope(parts=frozenset(ComposedScopes.FULL_READ),
description='View all information associated with this account, including for '
'private projects.',
is_public=True),
'osf.full_write': scope(parts=frozenset(ComposedScopes.FULL_WRITE),
description='View and edit all information associated with this account, including for '
'private projects.',
is_public=True),
}
if settings.DEV_MODE:
public_scopes.update({
'osf.users.all_read': scope(parts=frozenset(ComposedScopes.USERS_READ),
description='Read your profile data',
is_public=True),
'osf.users.all_write': scope(parts=frozenset(ComposedScopes.USERS_WRITE),
description='Read and edit your profile data',
is_public=True),
'osf.nodes.metadata_read': scope(parts=frozenset(ComposedScopes.NODE_METADATA_READ),
description='Read a list of all public and private nodes accessible to this '
'account, and view associated metadata such as project descriptions '
'and titles',
is_public=True),
'osf.nodes.metadata_write': scope(parts=frozenset(ComposedScopes.NODE_METADATA_WRITE),
description='Read a list of all public and private nodes accessible to this '
'account, and view and edit associated metadata such as project '
'descriptions and titles',
is_public=True),
'osf.nodes.data_read': scope(parts=frozenset(ComposedScopes.NODE_DATA_READ),
description='List and view files associated with any public or private projects '
'accessible to this account.',
is_public=True),
'osf.nodes.data_write': scope(parts=frozenset(ComposedScopes.NODE_DATA_WRITE),
description='List, view, and update files associated with any public or private '
'projects accessible to this account.',
is_public=True),
'osf.nodes.access_read': scope(parts=frozenset(ComposedScopes.NODE_ACCESS_READ),
description='View the contributors list and any established registrations '
'associated with public or private projects',
is_public=True),
'osf.nodes.access_write': scope(parts=frozenset(ComposedScopes.NODE_ACCESS_WRITE),
description='View and edit the contributors list associated with public or '
'private projects accessible to this account. Also view and create '
'registrations.',
is_public=True), # TODO: Language: Does registrations endpoint allow creation of registrations? Is that planned?
'osf.nodes.all_read': scope(parts=frozenset(ComposedScopes.NODE_ALL_READ),
description='View all metadata, files, and access rights associated with all public '
'and private projects accessible to this account.',
is_public=True),
'osf.nodes.all_write': scope(parts=frozenset(ComposedScopes.NODE_ALL_WRITE),
description='View and edit all metadata, files, and access rights associated with '
'all public and private projects accessible to this account.',
is_public=True),
# Undocumented scopes that can not be requested by third parties (per CAS restriction)
'osf.admin': scope(parts=frozenset(ComposedScopes.ADMIN_LEVEL),
description='This permission should only be granted to OSF administrators. Allows a site to '
'create, read, edit, and delete all information associated with this account.',
is_public=False),
})
def normalize_scopes(scopes):
"""
Given a list of public-facing scope names from a CAS token, return the list of internal scopes
This is useful for converting a single broad scope name (from CAS) into the small constituent parts
(as used by views)
:param list scopes: a list public facing scopes
"""
all_scopes = set()
for sc in scopes:
try:
scope_tuple = public_scopes[sc]
all_scopes |= scope_tuple.parts
except KeyError:
pass
return all_scopes
if __name__ == '__main__':
# Print some data to console, to help audit what views/core scopes map to a given public/composed scope
# Although represented internally as a set, print as a sorted list for readability.
from pprint import pprint as pp
pp({k: sorted(v.parts)
for k, v in public_scopes.iteritems()})
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Shared functions and classes for tfdbg command-line interface."""
import math
import numpy as np
import six
from tensorflow.python.debug.cli import command_parser
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import tensor_format
from tensorflow.python.debug.lib import common
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
RL = debugger_cli_common.RichLine
# Default threshold number of elements above which ellipses will be used
# when printing the value of the tensor.
DEFAULT_NDARRAY_DISPLAY_THRESHOLD = 2000
COLOR_BLACK = "black"
COLOR_BLUE = "blue"
COLOR_CYAN = "cyan"
COLOR_GRAY = "gray"
COLOR_GREEN = "green"
COLOR_MAGENTA = "magenta"
COLOR_RED = "red"
COLOR_WHITE = "white"
COLOR_YELLOW = "yellow"
TIME_UNIT_US = "us"
TIME_UNIT_MS = "ms"
TIME_UNIT_S = "s"
TIME_UNITS = [TIME_UNIT_US, TIME_UNIT_MS, TIME_UNIT_S]
def bytes_to_readable_str(num_bytes, include_b=False):
"""Generate a human-readable string representing number of bytes.
The units B, kB, MB and GB are used.
Args:
num_bytes: (`int` or None) Number of bytes.
include_b: (`bool`) Include the letter B at the end of the unit.
Returns:
(`str`) A string representing the number of bytes in a human-readable way,
including a unit at the end.
"""
if num_bytes is None:
return str(num_bytes)
if num_bytes < 1024:
result = "%d" % num_bytes
elif num_bytes < 1048576:
result = "%.2fk" % (num_bytes / 1024.0)
elif num_bytes < 1073741824:
result = "%.2fM" % (num_bytes / 1048576.0)
else:
result = "%.2fG" % (num_bytes / 1073741824.0)
if include_b:
result += "B"
return result
def time_to_readable_str(value_us, force_time_unit=None):
"""Convert time value to human-readable string.
Args:
value_us: time value in microseconds.
force_time_unit: force the output to use the specified time unit. Must be
in TIME_UNITS.
Returns:
Human-readable string representation of the time value.
Raises:
ValueError: if force_time_unit value is not in TIME_UNITS.
"""
if not value_us:
return "0"
if force_time_unit:
if force_time_unit not in TIME_UNITS:
raise ValueError("Invalid time unit: %s" % force_time_unit)
order = TIME_UNITS.index(force_time_unit)
time_unit = force_time_unit
return "{:.10g}{}".format(value_us / math.pow(10.0, 3*order), time_unit)
else:
order = min(len(TIME_UNITS) - 1, int(math.log(value_us, 10) / 3))
time_unit = TIME_UNITS[order]
return "{:.3g}{}".format(value_us / math.pow(10.0, 3*order), time_unit)
def parse_ranges_highlight(ranges_string):
"""Process ranges highlight string.
Args:
ranges_string: (str) A string representing a numerical range of a list of
numerical ranges. See the help info of the -r flag of the print_tensor
command for more details.
Returns:
An instance of tensor_format.HighlightOptions, if range_string is a valid
representation of a range or a list of ranges.
"""
ranges = None
def ranges_filter(x):
r = np.zeros(x.shape, dtype=bool)
for range_start, range_end in ranges:
r = np.logical_or(r, np.logical_and(x >= range_start, x <= range_end))
return r
if ranges_string:
ranges = command_parser.parse_ranges(ranges_string)
return tensor_format.HighlightOptions(
ranges_filter, description=ranges_string)
else:
return None
def numpy_printoptions_from_screen_info(screen_info):
if screen_info and "cols" in screen_info:
return {"linewidth": screen_info["cols"]}
else:
return {}
def format_tensor(tensor,
tensor_name,
np_printoptions,
print_all=False,
tensor_slicing=None,
highlight_options=None,
include_numeric_summary=False,
write_path=None):
"""Generate formatted str to represent a tensor or its slices.
Args:
tensor: (numpy ndarray) The tensor value.
tensor_name: (str) Name of the tensor, e.g., the tensor's debug watch key.
np_printoptions: (dict) Numpy tensor formatting options.
print_all: (bool) Whether the tensor is to be displayed in its entirety,
instead of printing ellipses, even if its number of elements exceeds
the default numpy display threshold.
(Note: Even if this is set to true, the screen output can still be cut
off by the UI frontend if it consist of more lines than the frontend
can handle.)
tensor_slicing: (str or None) Slicing of the tensor, e.g., "[:, 1]". If
None, no slicing will be performed on the tensor.
highlight_options: (tensor_format.HighlightOptions) options to highlight
elements of the tensor. See the doc of tensor_format.format_tensor()
for more details.
include_numeric_summary: Whether a text summary of the numeric values (if
applicable) will be included.
write_path: A path to save the tensor value (after any slicing) to
(optional). `numpy.save()` is used to save the value.
Returns:
An instance of `debugger_cli_common.RichTextLines` representing the
(potentially sliced) tensor.
"""
if tensor_slicing:
# Validate the indexing.
value = command_parser.evaluate_tensor_slice(tensor, tensor_slicing)
sliced_name = tensor_name + tensor_slicing
else:
value = tensor
sliced_name = tensor_name
auxiliary_message = None
if write_path:
with gfile.Open(write_path, "wb") as output_file:
np.save(output_file, value)
line = debugger_cli_common.RichLine("Saved value to: ")
line += debugger_cli_common.RichLine(write_path, font_attr="bold")
line += " (%sB)" % bytes_to_readable_str(gfile.Stat(write_path).length)
auxiliary_message = debugger_cli_common.rich_text_lines_from_rich_line_list(
[line, debugger_cli_common.RichLine("")])
if print_all:
np_printoptions["threshold"] = value.size
else:
np_printoptions["threshold"] = DEFAULT_NDARRAY_DISPLAY_THRESHOLD
return tensor_format.format_tensor(
value,
sliced_name,
include_metadata=True,
include_numeric_summary=include_numeric_summary,
auxiliary_message=auxiliary_message,
np_printoptions=np_printoptions,
highlight_options=highlight_options)
def error(msg):
"""Generate a RichTextLines output for error.
Args:
msg: (str) The error message.
Returns:
(debugger_cli_common.RichTextLines) A representation of the error message
for screen output.
"""
return debugger_cli_common.rich_text_lines_from_rich_line_list([
RL("ERROR: " + msg, COLOR_RED)])
def _recommend_command(command, description, indent=2, create_link=False):
"""Generate a RichTextLines object that describes a recommended command.
Args:
command: (str) The command to recommend.
description: (str) A description of what the command does.
indent: (int) How many spaces to indent in the beginning.
create_link: (bool) Whether a command link is to be applied to the command
string.
Returns:
(RichTextLines) Formatted text (with font attributes) for recommending the
command.
"""
indent_str = " " * indent
if create_link:
font_attr = [debugger_cli_common.MenuItem("", command), "bold"]
else:
font_attr = "bold"
lines = [RL(indent_str) + RL(command, font_attr) + ":",
indent_str + " " + description]
return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)
def get_tfdbg_logo():
"""Make an ASCII representation of the tfdbg logo."""
lines = [
"",
"TTTTTT FFFF DDD BBBB GGG ",
" TT F D D B B G ",
" TT FFF D D BBBB G GG",
" TT F D D B B G G",
" TT F DDD BBBB GGG ",
"",
]
return debugger_cli_common.RichTextLines(lines)
_HORIZONTAL_BAR = "======================================"
def get_run_start_intro(run_call_count,
fetches,
feed_dict,
tensor_filters,
is_callable_runner=False):
"""Generate formatted intro for run-start UI.
Args:
run_call_count: (int) Run call counter.
fetches: Fetches of the `Session.run()` call. See doc of `Session.run()`
for more details.
feed_dict: Feeds to the `Session.run()` call. See doc of `Session.run()`
for more details.
tensor_filters: (dict) A dict from tensor-filter name to tensor-filter
callable.
is_callable_runner: (bool) whether a runner returned by
Session.make_callable is being run.
Returns:
(RichTextLines) Formatted intro message about the `Session.run()` call.
"""
fetch_lines = common.get_flattened_names(fetches)
if not feed_dict:
feed_dict_lines = [debugger_cli_common.RichLine(" (Empty)")]
else:
feed_dict_lines = []
for feed_key in feed_dict:
feed_key_name = common.get_graph_element_name(feed_key)
feed_dict_line = debugger_cli_common.RichLine(" ")
feed_dict_line += debugger_cli_common.RichLine(
feed_key_name,
debugger_cli_common.MenuItem(None, "pf '%s'" % feed_key_name))
# Surround the name string with quotes, because feed_key_name may contain
# spaces in some cases, e.g., SparseTensors.
feed_dict_lines.append(feed_dict_line)
feed_dict_lines = debugger_cli_common.rich_text_lines_from_rich_line_list(
feed_dict_lines)
out = debugger_cli_common.RichTextLines(_HORIZONTAL_BAR)
if is_callable_runner:
out.append("Running a runner returned by Session.make_callable()")
else:
out.append("Session.run() call #%d:" % run_call_count)
out.append("")
out.append("Fetch(es):")
out.extend(debugger_cli_common.RichTextLines(
[" " + line for line in fetch_lines]))
out.append("")
out.append("Feed dict:")
out.extend(feed_dict_lines)
out.append(_HORIZONTAL_BAR)
out.append("")
out.append("Select one of the following commands to proceed ---->")
out.extend(
_recommend_command(
"run",
"Execute the run() call with debug tensor-watching",
create_link=True))
out.extend(
_recommend_command(
"run -n",
"Execute the run() call without debug tensor-watching",
create_link=True))
out.extend(
_recommend_command(
"run -t <T>",
"Execute run() calls (T - 1) times without debugging, then "
"execute run() once more with debugging and drop back to the CLI"))
out.extend(
_recommend_command(
"run -f <filter_name>",
"Keep executing run() calls until a dumped tensor passes a given, "
"registered filter (conditional breakpoint mode)"))
more_lines = [" Registered filter(s):"]
if tensor_filters:
filter_names = []
for filter_name in tensor_filters:
filter_names.append(filter_name)
command_menu_node = debugger_cli_common.MenuItem(
"", "run -f %s" % filter_name)
more_lines.append(RL(" * ") + RL(filter_name, command_menu_node))
else:
more_lines.append(" (None)")
out.extend(
debugger_cli_common.rich_text_lines_from_rich_line_list(more_lines))
out.append("")
out.append_rich_line(RL("For more details, see ") +
RL("help.", debugger_cli_common.MenuItem("", "help")) +
".")
out.append("")
# Make main menu for the run-start intro.
menu = debugger_cli_common.Menu()
menu.append(debugger_cli_common.MenuItem("run", "run"))
menu.append(debugger_cli_common.MenuItem("exit", "exit"))
out.annotations[debugger_cli_common.MAIN_MENU_KEY] = menu
return out
def get_run_short_description(run_call_count,
fetches,
feed_dict,
is_callable_runner=False):
"""Get a short description of the run() call.
Args:
run_call_count: (int) Run call counter.
fetches: Fetches of the `Session.run()` call. See doc of `Session.run()`
for more details.
feed_dict: Feeds to the `Session.run()` call. See doc of `Session.run()`
for more details.
is_callable_runner: (bool) whether a runner returned by
Session.make_callable is being run.
Returns:
(str) A short description of the run() call, including information about
the fetche(s) and feed(s).
"""
if is_callable_runner:
return "runner from make_callable()"
description = "run #%d: " % run_call_count
if isinstance(fetches, (ops.Tensor, ops.Operation, variables.Variable)):
description += "1 fetch (%s); " % common.get_graph_element_name(fetches)
else:
# Could be (nested) list, tuple, dict or namedtuple.
num_fetches = len(common.get_flattened_names(fetches))
if num_fetches > 1:
description += "%d fetches; " % num_fetches
else:
description += "%d fetch; " % num_fetches
if not feed_dict:
description += "0 feeds"
else:
if len(feed_dict) == 1:
for key in feed_dict:
description += "1 feed (%s)" % (
key if isinstance(key, six.string_types) or not hasattr(key, "name")
else key.name)
else:
description += "%d feeds" % len(feed_dict)
return description
def get_error_intro(tf_error):
"""Generate formatted intro for TensorFlow run-time error.
Args:
tf_error: (errors.OpError) TensorFlow run-time error object.
Returns:
(RichTextLines) Formatted intro message about the run-time OpError, with
sample commands for debugging.
"""
if hasattr(tf_error, "op") and hasattr(tf_error.op, "name"):
op_name = tf_error.op.name
else:
op_name = None
intro_lines = [
"--------------------------------------",
RL("!!! An error occurred during the run !!!", "blink"),
"",
]
out = debugger_cli_common.rich_text_lines_from_rich_line_list(intro_lines)
if op_name is not None:
out.extend(debugger_cli_common.RichTextLines(
["You may use the following commands to debug:"]))
out.extend(
_recommend_command("ni -a -d -t %s" % op_name,
"Inspect information about the failing op.",
create_link=True))
out.extend(
_recommend_command("li -r %s" % op_name,
"List inputs to the failing op, recursively.",
create_link=True))
out.extend(
_recommend_command(
"lt",
"List all tensors dumped during the failing run() call.",
create_link=True))
else:
out.extend(debugger_cli_common.RichTextLines([
"WARNING: Cannot determine the name of the op that caused the error."]))
more_lines = [
"",
"Op name: %s" % op_name,
"Error type: " + str(type(tf_error)),
"",
"Details:",
str(tf_error),
"",
"--------------------------------------",
"",
]
out.extend(debugger_cli_common.RichTextLines(more_lines))
return out
| |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import mock
from oslo_config import cfg
from oslo_policy import policy
from oslo_utils import timeutils
from six.moves.urllib import parse as urlparse
from wsme import types as wtypes
from magnum.api.controllers.v1 import pod as api_pod
from magnum.common import utils
from magnum.conductor import api as rpcapi
from magnum import objects
from magnum.tests import base
from magnum.tests.unit.api import base as api_base
from magnum.tests.unit.api import utils as apiutils
from magnum.tests.unit.objects import utils as obj_utils
class TestPodObject(base.TestCase):
def test_pod_init(self):
pod_dict = apiutils.pod_post_data(bay_uuid=None)
del pod_dict['desc']
pod = api_pod.Pod(**pod_dict)
self.assertEqual(wtypes.Unset, pod.desc)
class TestListPod(api_base.FunctionalTest):
def setUp(self):
super(TestListPod, self).setUp()
obj_utils.create_test_bay(self.context)
def test_empty(self):
response = self.get_json('/pods')
self.assertEqual([], response['pods'])
def _assert_pod_fields(self, pod):
pod_fields = ['name', 'bay_uuid', 'desc', 'images', 'labels',
'status', 'host']
for field in pod_fields:
self.assertIn(field, pod)
def test_one(self):
pod = obj_utils.create_test_pod(self.context)
response = self.get_json('/pods')
self.assertEqual(pod.uuid, response['pods'][0]["uuid"])
self._assert_pod_fields(response['pods'][0])
def test_get_one(self):
pod = obj_utils.create_test_pod(self.context)
response = self.get_json('/pods/%s' % pod['uuid'])
self.assertEqual(pod.uuid, response['uuid'])
self._assert_pod_fields(response)
def test_get_one_by_name(self):
pod = obj_utils.create_test_pod(self.context)
response = self.get_json('/pods/%s' % pod['name'])
self.assertEqual(pod.uuid, response['uuid'])
self._assert_pod_fields(response)
def test_get_one_by_name_not_found(self):
response = self.get_json('/pods/not_found', expect_errors=True)
self.assertEqual(response.status_int, 404)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_get_one_by_name_multiple_pod(self):
obj_utils.create_test_pod(self.context, name='test_pod',
uuid=utils.generate_uuid())
obj_utils.create_test_pod(self.context, name='test_pod',
uuid=utils.generate_uuid())
response = self.get_json('/pods/test_pod', expect_errors=True)
self.assertEqual(response.status_int, 409)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_get_all_with_pagination_marker(self):
pod_list = []
for id_ in range(4):
pod = obj_utils.create_test_pod(self.context, id=id_,
uuid=utils.generate_uuid())
pod_list.append(pod.uuid)
response = self.get_json('/pods?limit=3&marker=%s' % pod_list[2])
self.assertEqual(1, len(response['pods']))
self.assertEqual(pod_list[-1], response['pods'][0]['uuid'])
def test_detail(self):
pod = obj_utils.create_test_pod(self.context)
response = self.get_json('/pods/detail')
self.assertEqual(pod.uuid, response['pods'][0]["uuid"])
self._assert_pod_fields(response['pods'][0])
def test_detail_with_pagination_marker(self):
pod_list = []
for id_ in range(4):
pod = obj_utils.create_test_pod(self.context, id=id_,
uuid=utils.generate_uuid())
pod_list.append(pod.uuid)
response = self.get_json('/pods/detail?limit=3&marker=%s'
% pod_list[2])
self.assertEqual(1, len(response['pods']))
self.assertEqual(pod_list[-1], response['pods'][0]['uuid'])
self._assert_pod_fields(response['pods'][0])
def test_detail_against_single(self):
pod = obj_utils.create_test_pod(self.context)
response = self.get_json('/pods/%s/detail' % pod['uuid'],
expect_errors=True)
self.assertEqual(404, response.status_int)
def test_many(self):
pod_list = []
for id_ in range(5):
pod = obj_utils.create_test_pod(self.context, id=id_,
uuid=utils.generate_uuid())
pod_list.append(pod.uuid)
response = self.get_json('/pods')
self.assertEqual(len(pod_list), len(response['pods']))
uuids = [p['uuid'] for p in response['pods']]
self.assertEqual(sorted(pod_list), sorted(uuids))
def test_links(self):
uuid = utils.generate_uuid()
obj_utils.create_test_pod(self.context, id=1, uuid=uuid)
response = self.get_json('/pods/%s' % uuid)
self.assertIn('links', response.keys())
self.assertEqual(2, len(response['links']))
self.assertIn(uuid, response['links'][0]['href'])
for l in response['links']:
bookmark = l['rel'] == 'bookmark'
self.assertTrue(self.validate_link(l['href'], bookmark=bookmark))
def test_collection_links(self):
for id_ in range(5):
obj_utils.create_test_pod(self.context, id=id_,
uuid=utils.generate_uuid())
response = self.get_json('/pods/?limit=3')
self.assertEqual(3, len(response['pods']))
next_marker = response['pods'][-1]['uuid']
self.assertIn(next_marker, response['next'])
def test_collection_links_default_limit(self):
cfg.CONF.set_override('max_limit', 3, 'api')
for id_ in range(5):
obj_utils.create_test_pod(self.context, id=id_,
uuid=utils.generate_uuid())
response = self.get_json('/pods')
self.assertEqual(3, len(response['pods']))
next_marker = response['pods'][-1]['uuid']
self.assertIn(next_marker, response['next'])
class TestPatch(api_base.FunctionalTest):
def setUp(self):
super(TestPatch, self).setUp()
obj_utils.create_test_bay(self.context)
self.pod = obj_utils.create_test_pod(self.context,
desc='pod_example_A_desc',
status='Running')
@mock.patch('oslo_utils.timeutils.utcnow')
def test_replace_ok(self, mock_utcnow):
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
new_desc = 'pod_example_B_desc'
response = self.get_json('/pods/%s' % self.pod.uuid)
self.assertNotEqual(new_desc, response['desc'])
response = self.patch_json('/pods/%s' % self.pod.uuid,
[{'path': '/desc', 'value': new_desc,
'op': 'replace'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
response = self.get_json('/pods/%s' % self.pod.uuid)
self.assertEqual(new_desc, response['desc'])
return_updated_at = timeutils.parse_isotime(
response['updated_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_updated_at)
def test_replace_bay_uuid(self):
another_bay = obj_utils.create_test_bay(self.context,
uuid=utils.generate_uuid())
response = self.patch_json('/pods/%s' % self.pod.uuid,
[{'path': '/bay_uuid',
'value': another_bay.uuid,
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
def test_replace_non_existent_bay_uuid(self):
response = self.patch_json('/pods/%s' % self.pod.uuid,
[{'path': '/bay_uuid',
'value': utils.generate_uuid(),
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['error_message'])
def test_replace_internal_field(self):
response = self.patch_json(
'/pods/%s' % self.pod.uuid,
[{'path': '/labels', 'value': {}, 'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['error_message'])
def test_replace_non_existent_pod(self):
response = self.patch_json('/pods/%s' % utils.generate_uuid(),
[{'path': '/desc',
'value': 'pod_example_B_desc',
'op': 'replace'}],
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
@mock.patch.object(rpcapi.API, 'pod_update')
@mock.patch.object(api_pod.Pod, 'parse_manifest')
def test_replace_with_manifest(self, parse_manifest, pod_update):
response = self.patch_json('/pods/%s' % self.pod.uuid,
[{'path': '/manifest',
'value': '{}',
'op': 'replace'}])
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
parse_manifest.assert_called_once_with()
self.assertTrue(pod_update.is_called)
def test_add_ok(self):
new_desc = 'pod_example_B_desc'
response = self.patch_json(
'/pods/%s' % self.pod.uuid,
[{'path': '/desc', 'value': new_desc, 'op': 'add'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_int)
response = self.get_json('/pods/%s' % self.pod.uuid)
self.assertEqual(new_desc, response['desc'])
def test_add_multi(self):
new_status = 'Stopped'
new_desc = 'pod_example_B_desc'
response = self.get_json('/pods/%s' % self.pod.uuid)
self.assertNotEqual(new_status, response['status'])
self.assertNotEqual(new_desc, response['desc'])
json = [
{
'path': '/status',
'value': new_status,
'op': 'add'
},
{
'path': '/desc',
'value': new_desc,
'op': 'add'
}
]
response = self.patch_json('/pods/%s' % self.pod.uuid, json)
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
response = self.get_json('/pods/%s' % self.pod.uuid)
self.assertEqual(new_status, response['status'])
self.assertEqual(new_desc, response['desc'])
def test_add_non_existent_property(self):
response = self.patch_json(
'/pods/%s' % self.pod.uuid,
[{'path': '/foo', 'value': 'bar', 'op': 'add'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['error_message'])
def test_remove_ok(self):
response = self.get_json('/pods/%s' % self.pod.uuid)
self.assertIsNotNone(response['desc'])
response = self.patch_json('/pods/%s' % self.pod.uuid,
[{'path': '/desc', 'op': 'remove'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
response = self.get_json('/pods/%s' % self.pod.uuid)
self.assertIsNone(response['desc'])
def test_remove_uuid(self):
response = self.patch_json('/pods/%s' % self.pod.uuid,
[{'path': '/uuid', 'op': 'remove'}],
expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_remove_bay_uuid(self):
response = self.patch_json('/pods/%s' % self.pod.uuid,
[{'path': '/bay_uuid', 'op': 'remove'}],
expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_remove_internal_field(self):
response = self.patch_json('/pods/%s' % self.pod.uuid,
[{'path': '/labels', 'op': 'remove'}],
expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_remove_non_existent_property(self):
response = self.patch_json(
'/pods/%s' % self.pod.uuid,
[{'path': '/non-existent', 'op': 'remove'}],
expect_errors=True)
self.assertEqual(400, response.status_code)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
@mock.patch('oslo_utils.timeutils.utcnow')
def test_replace_ok_by_name(self, mock_utcnow):
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.patch_json('/pods/%s' % self.pod.name,
[{'path': '/desc', 'op': 'remove'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
response = self.get_json('/pods/%s' % self.pod.uuid)
self.assertEqual('pod1', response['name'])
return_updated_at = timeutils.parse_isotime(
response['updated_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_updated_at)
@mock.patch('oslo_utils.timeutils.utcnow')
def test_replace_ok_by_name_not_found(self, mock_utcnow):
name = 'not_found'
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.patch_json('/pods/%s' % name,
[{'path': '/desc', 'op': 'remove'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(404, response.status_code)
@mock.patch('oslo_utils.timeutils.utcnow')
def test_replace_ok_by_name_multiple_pod(self, mock_utcnow):
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
obj_utils.create_test_pod(self.context, name='test_pod',
uuid=utils.generate_uuid())
obj_utils.create_test_pod(self.context, name='test_pod',
uuid=utils.generate_uuid())
response = self.patch_json('/pods/test_pod',
[{'path': '/desc', 'op': 'remove'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(409, response.status_code)
class TestPost(api_base.FunctionalTest):
def setUp(self):
super(TestPost, self).setUp()
obj_utils.create_test_bay(self.context)
p = mock.patch.object(rpcapi.API, 'pod_create')
self.mock_pod_create = p.start()
self.mock_pod_create.side_effect = self._simulate_rpc_pod_create
self.addCleanup(p.stop)
p = mock.patch('magnum.objects.BayModel.get_by_uuid')
self.mock_baymodel_get_by_uuid = p.start()
self.mock_baymodel_get_by_uuid.return_value.coe = 'kubernetes'
self.addCleanup(p.stop)
def _simulate_rpc_pod_create(self, pod):
pod.create()
return pod
@mock.patch('oslo_utils.timeutils.utcnow')
def test_create_pod(self, mock_utcnow):
pdict = apiutils.pod_post_data()
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.post_json('/pods', pdict)
self.assertEqual('application/json', response.content_type)
self.assertEqual(201, response.status_int)
# Check location header
self.assertIsNotNone(response.location)
expected_location = '/v1/pods/%s' % pdict['uuid']
self.assertEqual(urlparse.urlparse(response.location).path,
expected_location)
self.assertEqual(pdict['uuid'], response.json['uuid'])
self.assertNotIn('updated_at', response.json.keys)
return_created_at = timeutils.parse_isotime(
response.json['created_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_created_at)
def test_create_pod_set_project_id_and_user_id(self):
pdict = apiutils.pod_post_data()
def _simulate_rpc_pod_create(pod):
self.assertEqual(pod.project_id, self.context.project_id)
self.assertEqual(pod.user_id, self.context.user_id)
pod.create()
return pod
self.mock_pod_create.side_effect = _simulate_rpc_pod_create
self.post_json('/pods', pdict)
def test_create_pod_doesnt_contain_id(self):
with mock.patch.object(self.dbapi, 'create_pod',
wraps=self.dbapi.create_pod) as cc_mock:
pdict = apiutils.pod_post_data(desc='pod_example_A_desc')
response = self.post_json('/pods', pdict)
self.assertEqual(pdict['desc'], response.json['desc'])
cc_mock.assert_called_once_with(mock.ANY)
# Check that 'id' is not in first arg of positional args
self.assertNotIn('id', cc_mock.call_args[0][0])
def test_create_pod_generate_uuid(self):
pdict = apiutils.pod_post_data()
del pdict['uuid']
response = self.post_json('/pods', pdict)
self.assertEqual('application/json', response.content_type)
self.assertEqual(201, response.status_int)
self.assertEqual(pdict['desc'], response.json['desc'])
self.assertTrue(utils.is_uuid_like(response.json['uuid']))
def test_create_pod_no_bay_uuid(self):
pdict = apiutils.pod_post_data()
del pdict['bay_uuid']
response = self.post_json('/pods', pdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
def test_create_pod_with_non_existent_bay_uuid(self):
pdict = apiutils.pod_post_data(bay_uuid=utils.generate_uuid())
response = self.post_json('/pods', pdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['error_message'])
def test_create_pod_with_invalid_manifest(self):
pdict = apiutils.pod_post_data()
pdict['manifest'] = 'wrong manifest'
response = self.post_json('/pods', pdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['error_message'])
def test_create_pod_no_manifest(self):
pdict = apiutils.pod_post_data()
del pdict['manifest']
response = self.post_json('/pods', pdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['error_message'])
def test_create_pod_no_id_in_manifest(self):
pdict = apiutils.pod_post_data()
pdict['manifest'] = {}
response = self.post_json('/pods', pdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['error_message'])
class TestDelete(api_base.FunctionalTest):
def setUp(self):
super(TestDelete, self).setUp()
obj_utils.create_test_bay(self.context)
self.pod = obj_utils.create_test_pod(self.context)
p = mock.patch.object(rpcapi.API, 'pod_delete')
self.mock_pod_delete = p.start()
self.mock_pod_delete.side_effect = self._simulate_rpc_pod_delete
self.addCleanup(p.stop)
def _simulate_rpc_pod_delete(self, pod_uuid):
pod = objects.Pod.get_by_uuid(self.context, pod_uuid)
pod.destroy()
def test_delete_pod(self):
self.delete('/pods/%s' % self.pod.uuid)
response = self.get_json('/pods/%s' % self.pod.uuid,
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_delete_pod_by_name(self):
self.delete('/pods/%s' % self.pod.name)
response = self.get_json('/pods/%s' % self.pod.name,
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_delete_pod_by_name_not_found(self):
response = self.delete('/pods/not_found', expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_delete_multiple_pod_by_name(self):
obj_utils.create_test_pod(self.context, name='test_pod',
uuid=utils.generate_uuid())
obj_utils.create_test_pod(self.context, name='test_pod',
uuid=utils.generate_uuid())
response = self.delete('/pods/test_pod', expect_errors=True)
self.assertEqual(409, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_delete_pod_not_found(self):
uuid = utils.generate_uuid()
response = self.delete('/pods/%s' % uuid, expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
class TestPodPolicyEnforcement(api_base.FunctionalTest):
def _common_policy_check(self, rule, func, *arg, **kwarg):
self.policy.set_rules({rule: 'project:non_fake'})
exc = self.assertRaises(policy.PolicyNotAuthorized,
func, *arg, **kwarg)
self.assertTrue(exc.message.startswith(rule))
self.assertTrue(exc.message.endswith('disallowed by policy'))
def test_policy_disallow_get_all(self):
self._common_policy_check(
'pod:get_all', self.get_json, '/pods')
def test_policy_disallow_get_one(self):
self._common_policy_check(
'pod:get', self.get_json, '/pods/111-222-333')
def test_policy_disallow_detail(self):
self._common_policy_check(
'pod:detail', self.get_json, '/pods/111-222-333/detail')
def test_policy_disallow_update(self):
pod = obj_utils.create_test_pod(self.context,
desc='test pod',
uuid=utils.generate_uuid())
self._common_policy_check(
'pod:update', self.patch_json,
'/pods/%s' % pod.uuid,
[{'path': '/desc', 'value': 'new test pod', 'op': 'replace'}])
def test_policy_disallow_create(self):
pdict = apiutils.pod_post_data()
self._common_policy_check(
'pod:create', self.post_json, '/pods', pdict)
def test_policy_disallow_delete(self):
pod = obj_utils.create_test_pod(self.context,
name='test_pod',
uuid=utils.generate_uuid())
self._common_policy_check(
'pod:delete', self.delete,
'/pods/%s' % pod.uuid)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for anchor_generators.multiscale_grid_anchor_generator_test.py."""
import numpy as np
import tensorflow as tf
from object_detection.anchor_generators import multiscale_grid_anchor_generator as mg
from object_detection.utils import test_case
class MultiscaleGridAnchorGeneratorTest(test_case.TestCase):
def test_construct_single_anchor(self):
min_level = 5
max_level = 5
anchor_scale = 4.0
aspect_ratios = [1.0]
scales_per_octave = 1
im_height = 64
im_width = 64
feature_map_shape_list = [(2, 2)]
exp_anchor_corners = [[-48, -48, 80, 80],
[-48, -16, 80, 112],
[-16, -48, 112, 80],
[-16, -16, 112, 112]]
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(
feature_map_shape_list, im_height=im_height, im_width=im_width)
anchor_corners = anchors_list[0].get()
with self.test_session():
anchor_corners_out = anchor_corners.eval()
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_single_anchor_in_normalized_coordinates(self):
min_level = 5
max_level = 5
anchor_scale = 4.0
aspect_ratios = [1.0]
scales_per_octave = 1
im_height = 64
im_width = 128
feature_map_shape_list = [(2, 2)]
exp_anchor_corners = [[-48./64, -48./128, 80./64, 80./128],
[-48./64, -16./128, 80./64, 112./128],
[-16./64, -48./128, 112./64, 80./128],
[-16./64, -16./128, 112./64, 112./128]]
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=True)
anchors_list = anchor_generator.generate(
feature_map_shape_list, im_height=im_height, im_width=im_width)
anchor_corners = anchors_list[0].get()
with self.test_session():
anchor_corners_out = anchor_corners.eval()
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_num_anchors_per_location(self):
min_level = 5
max_level = 6
anchor_scale = 4.0
aspect_ratios = [1.0, 2.0]
scales_per_octave = 3
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=False)
self.assertEqual(anchor_generator.num_anchors_per_location(), [6, 6])
def test_construct_single_anchor_fails_with_tensor_image_size(self):
min_level = 5
max_level = 5
anchor_scale = 4.0
aspect_ratios = [1.0]
scales_per_octave = 1
im_height = tf.constant(64)
im_width = tf.constant(64)
feature_map_shape_list = [(2, 2)]
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=False)
with self.assertRaises(ValueError):
anchor_generator.generate(
feature_map_shape_list, im_height=im_height, im_width=im_width)
def test_construct_single_anchor_with_odd_input_dimension(self):
def graph_fn():
min_level = 5
max_level = 5
anchor_scale = 4.0
aspect_ratios = [1.0]
scales_per_octave = 1
im_height = 65
im_width = 65
feature_map_shape_list = [(3, 3)]
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(
feature_map_shape_list, im_height=im_height, im_width=im_width)
anchor_corners = anchors_list[0].get()
return (anchor_corners,)
anchor_corners_out = self.execute(graph_fn, [])
exp_anchor_corners = [[-64, -64, 64, 64],
[-64, -32, 64, 96],
[-64, 0, 64, 128],
[-32, -64, 96, 64],
[-32, -32, 96, 96],
[-32, 0, 96, 128],
[0, -64, 128, 64],
[0, -32, 128, 96],
[0, 0, 128, 128]]
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_single_anchor_on_two_feature_maps(self):
def graph_fn():
min_level = 5
max_level = 6
anchor_scale = 4.0
aspect_ratios = [1.0]
scales_per_octave = 1
im_height = 64
im_width = 64
feature_map_shape_list = [(2, 2), (1, 1)]
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(feature_map_shape_list,
im_height=im_height,
im_width=im_width)
anchor_corners = [anchors.get() for anchors in anchors_list]
return anchor_corners
anchor_corners_out = np.concatenate(self.execute(graph_fn, []), axis=0)
exp_anchor_corners = [[-48, -48, 80, 80],
[-48, -16, 80, 112],
[-16, -48, 112, 80],
[-16, -16, 112, 112],
[-96, -96, 160, 160]]
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_single_anchor_with_two_scales_per_octave(self):
def graph_fn():
min_level = 6
max_level = 6
anchor_scale = 4.0
aspect_ratios = [1.0]
scales_per_octave = 2
im_height = 64
im_width = 64
feature_map_shape_list = [(1, 1)]
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(feature_map_shape_list,
im_height=im_height,
im_width=im_width)
anchor_corners = [anchors.get() for anchors in anchors_list]
return anchor_corners
# There are 4 set of anchors in this configuration. The order is:
# [[2**0.0 intermediate scale + 1.0 aspect],
# [2**0.5 intermediate scale + 1.0 aspect]]
exp_anchor_corners = [[-96., -96., 160., 160.],
[-149.0193, -149.0193, 213.0193, 213.0193]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_single_anchor_with_two_scales_per_octave_and_aspect(self):
def graph_fn():
min_level = 6
max_level = 6
anchor_scale = 4.0
aspect_ratios = [1.0, 2.0]
scales_per_octave = 2
im_height = 64
im_width = 64
feature_map_shape_list = [(1, 1)]
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(feature_map_shape_list,
im_height=im_height,
im_width=im_width)
anchor_corners = [anchors.get() for anchors in anchors_list]
return anchor_corners
# There are 4 set of anchors in this configuration. The order is:
# [[2**0.0 intermediate scale + 1.0 aspect],
# [2**0.5 intermediate scale + 1.0 aspect],
# [2**0.0 intermediate scale + 2.0 aspect],
# [2**0.5 intermediate scale + 2.0 aspect]]
exp_anchor_corners = [[-96., -96., 160., 160.],
[-149.0193, -149.0193, 213.0193, 213.0193],
[-58.50967, -149.0193, 122.50967, 213.0193],
[-96., -224., 160., 288.]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_single_anchors_on_feature_maps_with_dynamic_shape(self):
def graph_fn(feature_map1_height, feature_map1_width, feature_map2_height,
feature_map2_width):
min_level = 5
max_level = 6
anchor_scale = 4.0
aspect_ratios = [1.0]
scales_per_octave = 1
im_height = 64
im_width = 64
feature_map_shape_list = [(feature_map1_height, feature_map1_width),
(feature_map2_height, feature_map2_width)]
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(feature_map_shape_list,
im_height=im_height,
im_width=im_width)
anchor_corners = [anchors.get() for anchors in anchors_list]
return anchor_corners
anchor_corners_out = np.concatenate(
self.execute_cpu(graph_fn, [
np.array(2, dtype=np.int32),
np.array(2, dtype=np.int32),
np.array(1, dtype=np.int32),
np.array(1, dtype=np.int32)
]),
axis=0)
exp_anchor_corners = [[-48, -48, 80, 80],
[-48, -16, 80, 112],
[-16, -48, 112, 80],
[-16, -16, 112, 112],
[-96, -96, 160, 160]]
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
if __name__ == '__main__':
tf.test.main()
| |
#!/usr/bin/env python
#@file runner.py
import os
import sys
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '..\..\Python_functions'))
#sys.path.insert(0,'C:\Users\ArminAskari\Desktop\Berkeley 2013-2017\Research\CVT\ArminModify\Python_functions')
#from getTrajectory import *
import optparse
import subprocess
import random
import pdb
import math
import matplotlib.pyplot as plt
import numpy as np
import settings
settings.init()
from platoon_functions import *
# import python modules from $SUMO_HOME/tools directory
try:
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(
__file__)), '..', "tools"))
sys.path.append(os.path.join(os.environ.get("SUMO_HOME", os.path.join(
os.path.dirname(os.path.realpath(
__file__)), "..")), "tools"))
from sumolib import checkBinary
except ImportError:
sys.exit("please declare environment variable 'SUMO_HOME' as the root directory of your sumo installation (it should contain folders 'bin', 'tools' and 'docs')")
import traci
PORT = 8873 # the port used for communicating with your sumo instance
# designates the phases definitions, one letter for each direction and turn type, this is for intersection 13
NSGREEN = "GGGgrrrrGGGgrrrr"
WEGREEN = "rrrrGGGgrrrrGGGg"
ALLRED = "rrrrrrrrrrrrrrrr"
cycle_length = 2*60
redClearTime = 3
PROGRAM = [WEGREEN]*(cycle_length/2-redClearTime)
PROGRAM.extend([ALLRED]*redClearTime)
PROGRAM.extend([NSGREEN]*(cycle_length/2-redClearTime))
PROGRAM.extend([ALLRED]*redClearTime)
PROGRAM2 = PROGRAM[::-1]
#PROGRAM.reverse()
step_length = 0.05
light_delay = 2*60/step_length
run_time = 1*60*60+light_delay*step_length# seconds*minutes
leaving_times = [[]]
flow_array = []
flow_array_ss = []
ssValue = lambda x: 1/2.5*(x-light_delay*step_length)
flowTime_array = []
tau_cars = []
time_cars = []
binRate = 1/step_length
## Platoons Settings
platoon_check = 50; # how often platoons update and are checked, every X ticks
platoon_comm = 20; # how often platoons communicate, every X ticks
numplatoons = 0;
start_range = 1; end_range = 120;
targetTau = 0.1; targetMinGap = 2.0;
accTau = 0.1; accMinGap = 2.0;
def flowCount(sensor_data,sensor_str,prev_veh_id):
carCount = 0
car_ids = []
for idx in range(len(sensor_data)):
if len(sensor_data[idx]) != 0:
veh_id = sensor_data[idx][0][0]
car_ids.append(veh_id)
last_id = prev_veh_id[idx]
if veh_id != last_id:
carCount += 1
if sensor_data[idx][0][3] != -1: #if the vehicle is leaving the sensor, record the time it left
leaving_times[idx].extend([sensor_data[idx][0][2]])
return carCount, car_ids
# Runs the simulation, and allows you to change traffic phase
def run(run_time):
## execute the TraCI control loop
traci.init(PORT)
programPointer = 0 # initiates at start # len(PROGRAM) - 1 # initiates at end
step = 0
flow_count = 0
first_car = True
prev_veh_id = ' '
prev_veh_id2 = ' '
pointer_offset = 0
car_speeds = []
while traci.simulation.getMinExpectedNumber() > 0 and step <= run_time*(1/step_length):
traci.simulationStep() # advance a simulation step
programPointer = int(math.floor(step/(int(1/step_length))))%len(PROGRAM) - pointer_offset
sensor_data = traci.inductionloop.getVehicleData("sensor")
if len(sensor_data) != 0:
flow_increment,prev_veh_id = flowCount([sensor_data],["sensor"],prev_veh_id)
car_speeds.append(traci.vehicle.getSpeed(sensor_data[0][0]))
flow_count += flow_increment
#print (step*step_length,flow_count)
if first_car: #if its the first car, record the time that it comes in
first_time = sensor_data[0][2]
first_car = False
if step < light_delay: #24960, let queue accumulate
traci.trafficlights.setRedYellowGreenState("0", ALLRED)
else:
traci.trafficlights.setRedYellowGreenState("0",PROGRAM[programPointer])
traci.trafficlights.setRedYellowGreenState("2",PROGRAM2[programPointer])
if step % binRate == 0:
flow_array.append(flow_count)
flowTime_array.append(step*step_length)
flow_array_ss.append(ssValue(step*step_length))
#print flow_count
step += 1
#print str(step)
print "\n \n"
print "-------------------------------------------------------- \n"
print "Total number of cars that have passed: " + str(flow_count)
tau = np.diff(leaving_times)
tau_cars.extend(tau)
time_cars.extend(leaving_times[0][1:])
extrap_flow = flow_count*(3600/(run_time-first_time))
print "Total throughput extrapolated to 1hr: " + str(extrap_flow)
print "Average car speed: " + str(np.mean(car_speeds))
print "Max Theoretical throughput: " + str(3600/min(min(tau)))
print "Min Theoretical throughput: " + str(3600/max(max(tau)))
print tau
print "Mean tau: " + str(np.mean(tau)) + "\n"
print "Var tau: " + str(np.var(tau)) + "\n"
print "Standard Dev tau: " + str(np.std(tau)) +"\n"
print "Min Tau:" + str(np.min(tau))
print "Max Tau:" + str(np.max(tau))
traci.close()
sys.stdout.flush()
return extrap_flow #[np.mean(tau),np.var(tau),np.std(tau)]
#get_options function for SUMO
def get_options():
optParser = optparse.OptionParser()
optParser.add_option("--nogui", action="store_true",
default=True, help="run the commandline version of sumo")
options, args = optParser.parse_args()
return options
# this is the main entry point of this script
if __name__ == "__main__":
options = get_options()
sumoBinary = checkBinary('sumo')
output = []
file_name = "cross2ltl"+"_"+str(0)+".sumocfg"
path = "cross2ltl/"+file_name
print path
sumoProcess = subprocess.Popen([sumoBinary, "-c", path,"--step-length", str(step_length), "--remote-port", str(PORT)], stdout=sys.stdout, stderr=sys.stderr)
run(run_time)
#output.append(run(run_times[0]))
print output
filestr = '2min'+str(redClearTime)+'RCT_taus'
filestrTime = filestr+'_time'
np.savetxt(filestr,np.divide(3600,tau_cars[0]),fmt='%d')
np.savetxt(filestrTime,time_cars,fmt='%d')
##########################################
##
## Plot instantenous flow
##
###########################################
plt.figure(1)
m1, = plt.plot(time_cars,np.divide(3600,tau_cars[0]))
#plt.legend(handles=[m1],loc='upper left')
plt.xlabel("Time (s)")
plt.ylabel("Throughput per hr")
plt.title("Total throughput vs time for 2 min cycle")
plt.axes([240,580,400,2000])
plt.show()
| |
#
# Copyright 2017 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from struct import pack, unpack
from twisted.internet import reactor
from twisted.internet.defer import DeferredQueue, inlineCallbacks, returnValue
from scapy.automaton import ATMT
import structlog
from voltha.adapters.microsemi_olt.BaseOltAutomaton import BaseOltAutomaton
from voltha.adapters.microsemi_olt.PAS5211 import PAS5211EventOnuActivation, PAS5211MsgGetActivationAuthMode, \
PAS5211MsgGetActivationAuthModeResponse, PAS5211MsgSetOnuOmciPortId, \
PAS5211MsgSetOnuOmciPortIdResponse, PAS5211MsgSendFrame, PAS5211MsgSendFrameResponse, \
PAS5211MsgGetLogicalObjectStatus, PAS5211MsgGetLogicalObjectStatusResponse, PAS5211MsgSetOnuAllocId, \
PAS5211MsgGetDbaMode, PAS5211MsgGetDbaModeResponse, PAS5211MsgSendDbaAlgorithmMsg, \
PAS5211MsgSendDbaAlgorithmMsgResponse, PAS5211EventDbaAlgorithm, PAS5211MsgSetPortIdConfig, \
PAS5211MsgSetPortIdConfigResponse, PAS5211MsgGetOnuIdByPortId, PAS5211MsgGetOnuIdByPortIdResponse, \
PAS5211SetVlanUplinkConfiguration, PAS5211SetVlanUplinkConfigurationResponse, PAS5211MsgSetOnuAllocIdResponse, \
PAS5211MsgHeader, PAS5211MsgGetOltVersionResponse, PAS5211EventOnuDeactivation, PAS5211EventAlarmNotification
#PAS5211EventAlarmNotification, PAS5211EventOnuDeactivation
from voltha.adapters.microsemi_olt.PAS5211_constants import PON_ACTIVATION_AUTH_AUTO, PON_ENABLE, PON_PORT_PON, \
PON_LOGICAL_OBJECT_TYPE_ALLOC_ID, PON_LOGICAL_OBJECT_TYPE_ONU_ID_BY_ALLOC_ID, PON_TRUE, \
PMC_OFAL_MAX_BI_DIRECTIONAL_FLOW_PER_ONU, PMC_OFAL_START_FLOW_ID_BASE, PON_DBA_MODE_RUNNING, \
PYTHAGORAS_UPDATE_AID_SLA, SLA_be_bw_gros, SLA_gr_bw_gros, SLA_gr_bw_fine, SLA_be_bw_fine, PYTHAGORAS_DBA_DATA_COS, \
PYTHAGORAS_DBA_STATUS_REPORT_NSR, PYTHAGORAS_SET_SLA_RESP_SIZE, PON_PORT_TYPE_GEM, PON_PORT_DESTINATION_CNI0, \
PON_FALSE, PON_DISABLE, PON_ALARM_LOS, PASCOMM_RETRIES, \
PON_ALARM_LOSI, PON_ALARM_DOWI, PON_ALARM_LOFI, PON_ALARM_RDII, PON_ALARM_LOAMI, PON_ALARM_LCDGI, \
PON_ALARM_LOAI, PON_ALARM_SDI, PON_ALARM_SFI, PON_ALARM_PEE, PON_ALARM_DGI, PON_ALARM_LOKI, PON_ALARM_TIWI, \
PON_ALARM_TIA, PON_ALARM_AUTH_FAILED_IN_REGISTRATION_ID_MODE, PON_ALARM_SUFI,\
PON_DOWNSTREAM_PLOAM_MESSAGE_ENCRYPTED_PORT_ID, PON_DOWNSTREAM_PLOAM_MESSAGE_ASSIGN_ALLOC_ID, \
PON_DOWNSTREAM_PLOAM_MESSAGE_CONFIGURE_PORT_ID, PON_DOWNSTREAM_PLOAM_MESSAGE_BER_INTERVAL, \
PON_DOWNSTREAM_PLOAM_MESSAGE_KEY_SWITCHING, PON_ALARM_SDI_RAISE, PON_ALARM_SDI_CLEAR, \
PON_ALARM_RAISE, PON_ALARM_CLEAR
from voltha.extensions.omci.omci_entities import CircuitPack
from voltha.extensions.omci.omci_frame import OmciFrame
from voltha.extensions.omci.omci_messages import OmciGet, OmciGetResponse, OmciAlarmNotification
from twisted.internet import reactor
from voltha.protos.events_pb2 import AlarmEvent, AlarmEventType, \
AlarmEventSeverity, AlarmEventState, AlarmEventCategory, AlarmEventCategory
log = structlog.get_logger()
_verbose = False
MAX_RETRIES = 10
def alloc_id(onu_id):
for i in range(0, PMC_OFAL_MAX_BI_DIRECTIONAL_FLOW_PER_ONU):
alloc_id = PMC_OFAL_START_FLOW_ID_BASE + \
(onu_id * PMC_OFAL_MAX_BI_DIRECTIONAL_FLOW_PER_ONU) + i
yield alloc_id
def hexstring(string):
return ":".join("{:02x}".format(ord(c)) for c in string)
class ActivationManager(BaseOltAutomaton):
onu_id = None
serial_number = None
onu_session_id = None
port_id = None
channel_id = None
alloc_id = None
vendor = None
olt_adapter = None
retries = 0
def parse_args(self, debug=0, store=0,**kwargs):
self.onu_id = kwargs.pop('onu_id')
self.serial_number = kwargs.pop('serial_number')
self.onu_session_id = kwargs.pop('onu_session_id')
self.port_id = self.onu_id
self.channel_id = kwargs.pop('channel_id')
self.alloc_id = alloc_id(self.onu_id)
self.activation_watcher = kwargs.pop('activation_watcher')
self.olt_adapter = kwargs.pop('olt_adapter')
if self.onu_id is None or self.serial_number is None or \
self.onu_session_id is None or self.channel_id is None:
raise ValueError('ONU is not well defined')
BaseOltAutomaton.parse_args(self, debug=debug, store=store, **kwargs)
"""
Master filter: Do not allow PAS5211MsgGetOltVersionResponse
"""
def master_filter(self, pkt):
if not super(ActivationManager, self).master_filter(pkt):
return False
if OmciFrame in pkt:
if pkt[OmciFrame].message_type in (16, 17):
return False
if PAS5211MsgGetOltVersionResponse not in pkt:
if PAS5211MsgHeader in pkt:
if pkt[PAS5211MsgHeader].channel_id == self.channel_id:
return True
return False
def create_default_data_flow_olt_config(self):
# PAS_set_onu_alloc_id
# PYTHAGORAS_set_SLA
# PAS_map_port_id_to_alloc_id
# PAS_set_vlan_uplink_configuration
pass
# def register_activation_watcher(self, activation_watcher):
# self.activation_watcher = activation_watcher
"""
States
"""
@ATMT.state(initial=1)
def got_activation_event(self):
pass
@ATMT.state()
def wait_get_auth_mode(self):
pass
@ATMT.state()
def got_auth_mode(self):
pass
@ATMT.state()
def wait_omci_port_id(self):
pass
@ATMT.state()
def got_omci_port_id(self):
pass
@ATMT.state()
def wait_send_frame(self):
pass
@ATMT.state()
def wait_omci_get(self):
pass
@ATMT.state()
def wait_logical_object_status(self):
pass
@ATMT.state()
def wait_set_alloc_id(self):
pass
@ATMT.state()
def wait_dba_mode(self):
pass
@ATMT.state(final=1)
def end(self):
log.debug("activation-manager-end")
self.activation_watcher.next_activation()
@ATMT.state(error=1)
def error(self, msg):
log.error(msg)
raise self.end()
"""
Utility Methods
"""
def px(self, pkt):
return self.p(pkt, channel_id=self.channel_id,
onu_id=self.onu_id, onu_session_id=self.onu_session_id)
def detect_onu(self):
try:
log.info("Activated {} ONT, channel_id={}, onu_id={}, session_id={}, serial={} ".format(
self.vendor, self.channel_id, self.onu_id, self.onu_session_id, hexstring(self.serial_number)))
parent_port = self.channel_id * 32 + (self.onu_id + 1)
self.olt_adapter.add_onu_info(parent_port, self.onu_id, self.onu_session_id)
self.device.onu_detected(
parent_port_no=parent_port,
child_device_type='%s_onu' % self.vendor.lower(),
onu_id=self.onu_id,
serial_number=hexstring(self.serial_number),
onu_session_id=self.onu_session_id,
channel_id=self.channel_id
)
except Exception as e:
log.exception('detect-onu-failed', e=e)
# raise e
"""
Transitions
"""
# Transition from got_activation_event
@ATMT.condition(got_activation_event)
def send_get_activation_auth_mode(self):
log.debug('PAS5211MsgGetActivationAuthMode, channel_id={}'.format(self.channel_id))
auth_mode = PAS5211MsgGetActivationAuthMode()
self.send(self.p(auth_mode, channel_id=self.channel_id))
raise self.wait_get_auth_mode()
# Transitions from wait_get_auth_mode
@ATMT.timeout(wait_get_auth_mode, 3)
def timeout_get_auth_mode(self):
if self.retries < MAX_RETRIES:
self.retries += 1
self.send_get_activation_auth_mode()
else:
raise self.error('Could not get auth mode for OLT {}; dropping activation event for {}'
.format(self.target, hexstring(self.serial_number)))
@ATMT.receive_condition(wait_get_auth_mode)
def wait_for_get_auth_mode(self, pkt):
log.debug('wait_for_get_auth_mode')
if PAS5211MsgGetActivationAuthModeResponse in pkt:
log.debug('PAS5211MsgGetActivationAuthModeResponse')
pkt = pkt[PAS5211MsgGetActivationAuthModeResponse]
if pkt.mode == PON_ACTIVATION_AUTH_AUTO:
raise self.got_auth_mode()
else:
# TODO There may be something that can be done here.
# See line 2497 of PAS_onu_mode_change_thread.c
log.error(
'Got unknown auth mode {}; dropping activation event'.format(pkt.mode))
raise self.end()
# Transitions from got auth_mode
@ATMT.condition(got_auth_mode)
def send_omci_port_id(self):
log.debug('send_omci_port_id')
omci_port_id = PAS5211MsgSetOnuOmciPortId(
port_id=self.port_id, activate=PON_ENABLE)
self.send(self.px(omci_port_id))
raise self.wait_omci_port_id()
# Transitions from wait_omci_port_id
@ATMT.timeout(wait_omci_port_id, 3)
def timeout_omci_port_id(self):
if self.retries < MAX_RETRIES:
self.retries += 1
self.send_omci_port_id()
else:
raise self.error('Could not set omci port id for OLT {}; dropping activation event for {}'
.format(self.target, hexstring(self.serial_number)))
@ATMT.receive_condition(wait_omci_port_id)
def wait_for_omci_port_id(self, pkt):
log.debug('wait_for_omci_port_id')
if PAS5211MsgSetOnuOmciPortIdResponse in pkt:
log.debug('PAS5211MsgSetOnuOmciPortIdResponse')
msg_header = pkt[PAS5211MsgHeader]
if msg_header.opcode == PAS5211MsgSetOnuOmciPortIdResponse.opcode and \
msg_header.onu_id == self.onu_id and msg_header.onu_session_id == self.onu_session_id and \
msg_header.channel_id == self.channel_id:
raise self.got_omci_port_id()
# Transitions from got_omci_port_id
@ATMT.condition(got_omci_port_id)
def send_omci_identity_frame(self):
log.debug('send_omci_identity_frame')
message = OmciGet(entity_class=CircuitPack.class_id, entity_id=257,
attributes_mask=2048)
# TODO fix transaction id
frame = OmciFrame(transaction_id=0, message_type=OmciGet.message_id,
omci_message=message)
omci_frame = PAS5211MsgSendFrame(port_type=PON_PORT_PON, port_id=self.port_id,
management_frame=PON_ENABLE, frame=frame)
self.send(self.px(omci_frame))
raise self.wait_send_frame()
# Transitions from wait_send_frame
@ATMT.timeout(wait_send_frame, 3)
def timeout_send_frame(self):
if self.retries < MAX_RETRIES:
self.retries += 1
self.send_omci_identity_frame()
else:
raise self.error('Could not send omci to OLT {}; dropping activation event for {}'
.format(self.target, hexstring(self.serial_number)))
@ATMT.receive_condition(wait_send_frame)
def wait_for_send_frame(self, pkt):
log.debug('wait_for_send_frame')
if PAS5211MsgSendFrameResponse in pkt:
log.debug('PAS5211MsgSendFrameResponse')
raise self.wait_omci_get()
# Transitions from wait_omci_get
@ATMT.timeout(wait_omci_get, 3)
def timeout_omci_get(self):
if self.retries < MAX_RETRIES:
self.retries += 1
self.send_omci_identity_frame()
else:
raise self.error('Did not receive omci get event from OLT {}; dropping activation event for {}'
.format(self.target, hexstring(self.serial_number)))
@ATMT.receive_condition(wait_omci_get)
def wait_for_omci_get(self, pkt):
log.debug('wait_for_omci_get')
if OmciGetResponse in pkt:
log.debug('OmciGetResponse')
self.allocId = self.alloc_id.next()
#self.vendor = pkt['OmciGetResponse'].data['vendor_id']
self.vendor = pkt[OmciGetResponse].data['vendor_id']
log.debug('wait_for_omci_get vendor_id:' + self.vendor)
l_obj_status = PAS5211MsgGetLogicalObjectStatus(
type=PON_LOGICAL_OBJECT_TYPE_ALLOC_ID,
value=self.allocId)
self.send(self.p(l_obj_status, channel_id=self.channel_id))
raise self.wait_logical_object_status()
# Transitions from wait_logical_object_status
@ATMT.timeout(wait_logical_object_status, 3)
def timeout_logical_object_status(self):
if self.retries < MAX_RETRIES:
self.retries += 1
l_obj_status = PAS5211MsgGetLogicalObjectStatus(
type=PON_LOGICAL_OBJECT_TYPE_ALLOC_ID,
value=self.allocId)
self.send(self.p(l_obj_status, channel_id=self.channel_id))
else:
raise self.error('Did not receive info about alloc id status for {}; dropping activation event for {}'
.format(self.target, hexstring(self.serial_number)))
@ATMT.receive_condition(wait_logical_object_status)
def wait_for_logical_object_status(self, pkt):
log.debug('wait_for_logical_object_status')
if PAS5211MsgGetLogicalObjectStatusResponse in pkt:
pkt = pkt[PAS5211MsgGetLogicalObjectStatusResponse]
log.debug('PAS5211MsgGetLogicalObjectStatusResponse pkt.type=' + str(pkt.type) + ' pkt.return_value=' + str(
pkt.return_value))
if pkt.type == PON_LOGICAL_OBJECT_TYPE_ALLOC_ID: # PASCOMM_GPON_api_parser.c line:11994
if pkt.return_value == 0:
log.debug(
'PAS5211MsgGetLogicalObjectStatusResponse (pkt.return_value == 0)')
# alloc-id not set
set_alloc_id = PAS5211MsgSetOnuAllocId(
alloc_id=self.allocId,
allocate=PON_ENABLE
)
# self.onu_id = -1
self.port_id = self.allocId
self.send(self.px(set_alloc_id))
raise self.wait_set_alloc_id()
else:
log.debug(
'PAS5211MsgGetLogicalObjectStatusResponse (pkt.return_value != 0)')
l_obj_status = PAS5211MsgGetLogicalObjectStatus(
type=PON_LOGICAL_OBJECT_TYPE_ONU_ID_BY_ALLOC_ID,
value=self.allocId)
self.send(self.px(l_obj_status))
raise self.wait_logical_object_status()
elif pkt.type == PON_LOGICAL_OBJECT_TYPE_ONU_ID_BY_ALLOC_ID:
log.debug(
'PAS5211MsgGetLogicalObjectStatusResponse (pkt.type == PON_LOGICAL_OBJECT_TYPE_ALLOC_ID)')
# That's your onu id.
self.onu_id = pkt.return_value
# FIXME Need to iterate to get the port id as
# in PMC_OFAL_flow_db.c line 656
set_alloc_id = PAS5211MsgSetOnuAllocId(
alloc_id=self.allocId,
allocate=PON_ENABLE
)
self.send(self.px(set_alloc_id))
raise self.wait_set_alloc_id() # are we done? probably not but check
# Transitions from wait_set_alloc_id
@ATMT.timeout(wait_set_alloc_id, 3)
def timeout_set_alloc_id(self):
if self.retries < MAX_RETRIES:
self.retries += 1
set_alloc_id = PAS5211MsgSetOnuAllocId(
alloc_id=self.allocId,
allocate=PON_ENABLE
)
self.send(self.px(set_alloc_id))
else:
raise self.error('Was not able to set alloc id for {}; dropping activation event for {}'
.format(self.target, hexstring(self.serial_number)))
@ATMT.receive_condition(wait_set_alloc_id)
def wait_for_set_alloc_id(self, pkt):
log.debug('wait_for_set_alloc_id')
if PAS5211MsgSetOnuAllocIdResponse in pkt:
self.send(self.p(PAS5211MsgGetDbaMode(),
channel_id=self.channel_id))
raise self.wait_dba_mode()
# Transitions from wait for dba mode (See Pythagoras_api.c line 344 &
# PMC_OFAL.c 2062)
@ATMT.timeout(wait_dba_mode, 3)
def timeout_wait_dba_mode(self):
if self.retries < MAX_RETRIES:
self.retries += 1
self.send(self.p(PAS5211MsgGetDbaMode(),
channel_id=self.channel_id))
else:
raise self.error('Did not get DBA mode for {}; dropping activation event for {}'
.format(self.target, hexstring(self.serial_number)))
@ATMT.receive_condition(wait_dba_mode)
def wait_for_dba_mode(self, pkt):
if PAS5211MsgGetDbaModeResponse in pkt:
pkt = pkt[PAS5211MsgGetDbaModeResponse]
if pkt.dba_mode != PON_DBA_MODE_RUNNING:
raise self.error('DBA is not running; dropping activation event for {}'
.format(hexstring(self.serial_number)))
self.detect_onu()
raise self.end()
class ActivationWatcher(BaseOltAutomaton):
"""
Master filter: Do not allow PAS5211MsgGetOltVersionResponse
"""
pending_activation_events = []
activation_lock = False
olt_adapter = None
def master_filter(self, pkt):
if not super(ActivationWatcher, self).master_filter(pkt):
return False
if PAS5211EventOnuActivation in pkt:
return True
elif PAS5211EventOnuDeactivation in pkt:
return True
elif PAS5211EventAlarmNotification in pkt:
return True
elif OmciAlarmNotification in pkt:
return True
return False
# Callback from activation manager
def next_activation(self):
log.debug("next-activation")
if self.pending_activation_events:
self.activation_lock=True
# Retrieve last element from list
pkt = self.pending_activation_events.pop()
self.activate_onu(pkt)
else:
self.activation_lock = False
def parse_args(self, debug=0, store=0,**kwargs):
self.olt_adapter = kwargs.pop('olt_adapter')
BaseOltAutomaton.parse_args(self, **kwargs)
def activate_onu(self, pkt):
log.debug("activate-onu")
msg_header = pkt[PAS5211MsgHeader]
msg = pkt[PAS5211EventOnuActivation]
log.debug('{} activated'.format(hexstring(msg.serial_number)))
onu_activation = ActivationManager(iface=self.iface, target=self.target, comm=self.comm,
onu_id=msg_header.onu_id, serial_number=msg.serial_number,
onu_session_id=msg_header.onu_session_id,
channel_id=msg_header.channel_id, device=self.device, activation_watcher=self, olt_adapter=self.olt_adapter)
onu_activation.runbg()
def deactivate_onu(self, pkt):
log.debug("deactivate-onu")
msg_header = pkt[PAS5211MsgHeader]
try:
log.debug("Deactivating ONT, channel_id={}, onu_id={}, session_id={}".format(
msg_header.channel_id, msg_header.onu_id, msg_header.onu_session_id))
self.device.deactivate_onu(channel_id=msg_header.channel_id,
onu_id=msg_header.onu_id,
onu_session_id=msg_header.onu_session_id)
log.debug("Deactivated ONT, channel_id={}, onu_id={}, session_id={}".format(
msg_header.channel_id, msg_header.onu_id, msg_header.onu_session_id))
except Exception as e:
log.exception('deactivate-onu failed', e=e)
"""
States
"""
@ATMT.state(initial=1)
def wait_onu_activation_event(self):
log.debug('activation-watcher-start')
@ATMT.state(final=1)
def end(self):
log.debug('activation-watcher-end')
"""
Transitions
"""
# Transitions from wait_onu_activation_event
@ATMT.receive_condition(wait_onu_activation_event)
def wait_for_onu_activation_event(self, pkt):
if PAS5211EventOnuActivation in pkt:
log.debug('PAS5211EventOnuActivation Received')
self.pending_activation_events.append(pkt)
if not self.activation_lock:
self.next_activation()
elif PAS5211EventOnuDeactivation in pkt:
log.debug('PAS5211EventOnuDeactivation Received')
self.deactivate_onu(pkt)
elif PAS5211EventAlarmNotification in pkt:
msg = pkt[PAS5211EventAlarmNotification]
log.debug('PAS5211EventAlarmNotification Received', code=msg.code, parameter1= msg.parameter1, parameter2= msg.parameter2,
parameter3= msg.parameter3, parameter4= msg.parameter4)
try:
self.process_alarm(pkt)
except Exception as e:
log.exception('wait-for-onu-activation-alarm-event-error', e=e)
elif OmciAlarmNotification in pkt:
log.debug('OmciAlarmNotification Received')
try:
self.process_omci_alarm(pkt)
except Exception as e:
log.exception('wait-for-onu-activation-omci-alarm-event-error', e=e)
else:
pass
raise self.wait_onu_activation_event()
#Method to parse alarm and send it to DeviceManager
def process_alarm(self, pkt):
log.debug('proccess-alarm-start')
msg_header = pkt[PAS5211MsgHeader]
msg = pkt[PAS5211EventAlarmNotification]
code = msg.code
ctx = {
'alarm_code': str(code),
}
alarm = dict(
id='voltha.{}.{}.olt'.format(self.device.adapter_agent.adapter_name, self.device.device.id),
resource_id=self.device.device.id,
type=AlarmEventType.EQUIPMENT,
category=AlarmEventCategory.OLT,
severity=AlarmEventSeverity.MAJOR,
context=ctx
)
if msg_header.onu_id >= 0:
ctx['onu_id'] = str(msg_header.onu_id)
if msg_header.channel_id >= 0:
ctx['channel_id'] = str(msg_header.channel_id)
if msg_header.onu_session_id >= 0:
ctx['onu_session_id'] = str(msg_header.onu_session_id)
if code == PON_ALARM_LOS:
alarm['description'] = 'Loss of signal: OLT does not receive transmissions in the upstream'
alarm['state'] = msg.parameter2
elif code == PON_ALARM_LOSI:
alarm['description'] = 'Loss of signal for ONUi: no signal from the ONU when expected'
alarm['state'] = msg.parameter2
elif code == PON_ALARM_DOWI:
alarm['description'] = 'Loss of signal for ONUi: no signal from the ONU when expected'
alarm['state'] = msg.parameter2
elif code == PON_ALARM_LOFI:
alarm['description'] = 'Loss of frame of ONUi: no valid optical signal is received from the ONU'
alarm['state'] = msg.parameter2
elif code == PON_ALARM_RDII:
alarm['description'] = 'Remote Defect Indication of ONUi: OLT transmissions is received with defect at the ONUi'
alarm['state'] = msg.parameter2
elif code == PON_ALARM_LOAMI:
alarm['description'] = 'Loss of PLOAM for ONUi: 3 messages of ONU are missing after OLT sends PLOAMu request'
alarm['state'] = msg.parameter2
elif code == PON_ALARM_LCDGI:
alarm['description'] = 'Loss of GEM channel delineation: GEM fragment delineation of ONUi is lost'
alarm['state'] = msg.parameter2
elif code == PON_ALARM_LOAI:
alarm['description'] = 'Loss of acknowledge with ONUi: OLT does not receive ack from ONUi'
if msg.parameter1 in (PON_DOWNSTREAM_PLOAM_MESSAGE_ENCRYPTED_PORT_ID, PON_DOWNSTREAM_PLOAM_MESSAGE_ASSIGN_ALLOC_ID,
PON_DOWNSTREAM_PLOAM_MESSAGE_CONFIGURE_PORT_ID, PON_DOWNSTREAM_PLOAM_MESSAGE_BER_INTERVAL,
PON_DOWNSTREAM_PLOAM_MESSAGE_KEY_SWITCHING):
ctx['downstream_ploam_message_id'] = str(msg.parameter1)
alarm['state'] = PON_ALARM_RAISE
else:
log.error('Error, ignored OLT Alarm {} from OLT device {} because Invalid PLOAM message id in OLT device'.format(code, self.device))
return
elif code == PON_ALARM_SDI:
alarm['description'] = 'Signal Degraded of ONUi: raised when the upstream BER of ONUi goes below certain level'
if msg.parameter1 in (PON_ALARM_SDI_RAISE, PON_ALARM_SDI_CLEAR):
ctx['onu_id'] = str(msg_header.onu_id)
ctx['parameter'] = str(msg.parameter1)
alarm['state'] = PON_ALARM_RAISE
else:
log.error('Error, ignored OLT Alarm {} from OLT device {} because Invalid parameter of alarm SDI'.format(code, self.device))
return
elif code == PON_ALARM_SFI:
alarm['description'] = 'Signal Fail of ONUi: raised when the upstream of ONUi becomes greater than some level'
alarm['state'] = msg.parameter1
elif code == PON_ALARM_PEE:
alarm['description'] = 'Physical Equipment Error of ONUi: raised when the OLT receives a PEE message from the ONU'
alarm['state'] = msg.parameter2
elif code == PON_ALARM_DGI:
alarm['description'] = 'Dying Gasp of ONUi: raised when the OLT receives DG message from ONUi'
alarm['state'] = msg.parameter2
elif code == PON_ALARM_LOKI:
alarm['description'] = 'Loss of key synch with ONUi: Key transmission from ONU fails 3 times'
alarm['state'] = msg.parameter2
elif code == PON_ALARM_TIWI:
alarm['description'] = 'Transmission interference warning: raised when the drift of ONU transmissions exceeds specified threshold'
alarm['state'] = msg.parameter2
elif code == PON_ALARM_TIA:
alarm['description'] = 'Transmission Interference Alarm: an ONU turns on its laser at another ONUs time'
alarm['state'] = msg.parameter2
else:
log.error('Error, unsupported OLT Alarm {} received from OLT device {}'.format(code, self.device))
return
log.warn('Alarm', alarm=alarm)
self.device.publish_alarm(alarm)
log.debug('proccess-alarm-stop')
def process_omci_alarm(self, pkt):
log.debug('proccess-omci-alarm-start')
msg_header = pkt[PAS5211MsgHeader]
msg_omci_alarm = pkt[OmciAlarmNotification]
ctx = {
'entity_class': str(msg_omci_alarm.entity_class),
'entity_id': str(msg_omci_alarm.entity_id),
'alarm_bit_map': str(msg_omci_alarm.alarm_bit_map),
'alarm_sequence_number': str(msg_omci_alarm.alarm_sequence_number)
}
if msg_header.onu_id >= 0:
ctx['onu_id'] = str(msg_header.onu_id)
if msg_header.channel_id >= 0:
ctx['channel_id'] = str(msg_header.channel_id)
if msg_header.onu_session_id >= 0:
ctx['onu_session_id'] = str(msg_header.onu_session_id)
alarm = dict(
id='voltha.{}.{}.ont'.format(self.device.adapter_agent.adapter_name, self.device.device.id),
resource_id=self.device.device.id,
type=AlarmEventType.EQUIPMENT,
category=AlarmEventCategory.OLT,
context=ctx
)
self.device.publish_alarm(alarm)
log.warn('Alarm', alarm=alarm)
log.debug('proccess-alarm-stop')
| |
"""
File: shell.py
Description: shell command source
Author: Oleg Khalidov <brooth@gmail.com>
License: MIT
"""
from pprint import pprint
from locale import getpreferredencoding
from sys import platform
from .far_glob import load_ignore_rules,far_glob,GlobError,IgnoreFileError,rg_rules_glob,rg_ignore_globs
import logging
import subprocess
import re
import os
import tempfile
import pathlib
import json
from json import JSONDecodeError
logger = logging.getLogger('far')
def search(ctx, args, cmdargs):
logger.debug('search(%s, %s, %s)', str(ctx), str(args), str(cmdargs))
final_result = {'warning': ''}
if not args.get('cmd'):
return {'error': 'no cmd in args'}
source = ctx['source']
pattern = ctx['pattern']
regex = ctx['regex']
case_sensitive = ctx['case_sensitive']
file_mask = ctx['file_mask']
submatch_type = args.get('submatch')
root = ctx['cwd']
limit = int(ctx['limit'])
max_columns = args.get('max_columns')
ignore_files = args.get('ignore_files')
glob_mode = args.get('glob_mode', 'far')
rules = file_mask.split(',')
native_glob_args = None
is_win32 = (platform == 'win32')
preferred_encoding = getpreferredencoding()
# Perform file globbing if non-native
if glob_mode == 'far':
# Use built-in globbing strategy
ignore_rules = []
for ignore_file in ignore_files:
try:
ignore_rules.extend(
load_ignore_rules(ignore_file)
)
except IgnoreFileError as e:
final_result['warning'] += ' | Invalid ignore-rule files. '+str(e)
try:
files = far_glob(root, rules, ignore_rules)
except GlobError as e:
return {'error': 'Invalid glob expression. '+str(e)}
if len(files) == 0:
return {'error': 'No files matching the glob expression'}
elif glob_mode == 'rg':
# Use ripgrep to glob
logger.debug(f'Globbing with ripgrep: rg --files {rg_rules_glob(rules)} {rg_ignore_globs(ignore_files)}')
files = os.popen(f'rg --files {rg_rules_glob(rules)} {rg_ignore_globs(ignore_files)}').read().split('\n')
if len(files) and files[-1] == '':
files.pop()
if len(files) == 0:
return {'error': 'No files matching the glob expression'}
elif glob_mode == 'native':
# Pass the mask directly to the search tool.
# For rg, the file mask is converted into -g option glob rules. For everything else,
# the mask is passed directly as an agument (and typically treated as a directory).
if source in ('rg', 'rgnvim'):
native_glob_args = rg_rules_glob(rules, False) + rg_ignore_globs(ignore_files, False)
else:
return {'error': 'Invalid glob_mode'}
# Build search command
cmd = []
if glob_mode != 'native':
# Run each for each globbed file
cmd.append('xargs')
cmd.append('-0')
for c in args['cmd']:
if c != '{file_mask}' or (glob_mode == 'native' and file_mask and not native_glob_args):
cmd.append(c.format(limit=limit, pattern=pattern, file_mask=file_mask))
if args.get('expand_cmdargs', '0') != '0':
cmd += cmdargs
if native_glob_args:
cmd += native_glob_args
logger.debug('cmd:' + str(cmd))
# Determine how to handle stdin for the command
if glob_mode != 'native':
proc_stdin = subprocess.PIPE
else:
proc_stdin = subprocess.DEVNULL
# Execute search command
try:
proc = subprocess.Popen(cmd, cwd=ctx['cwd'], stdin=proc_stdin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except Exception as e:
return {'error': str(e)}
# If non-native glob, pipe the file list to stdin for xargs to handle
if glob_mode != 'native':
sep = '\0'
if is_win32:
proc.stdin.write((sep.join(files).replace("\\", "/") + sep).encode(preferred_encoding))
else:
proc.stdin.write((sep.join(files) + sep).encode(preferred_encoding))
proc.stdin.close()
logger.debug('type(proc) = ' + str(type(proc)))
range_ = tuple(ctx['range'])
result = {}
if source == 'rg' or source == 'rgnvim' :
while limit > 0:
line = proc.stdout.readline()
try:
line = line.decode('utf-8').rstrip()
except UnicodeDecodeError:
logger.debug("UnicodeDecodeError: line = line.decode('utf-8').rstrip() failed, line:")
continue
if not line:
if len(result) == 0:
err = proc.stderr.readline()
if err:
err = err.decode('utf-8')
logger.debug('error:' + err)
return {'error': err}
if proc.poll() is not None:
logger.debug('end of proc. break')
break
continue
logger.debug('proc readline: ' + line)
try:
item = json.loads(line)
except JSONDecodeError as err:
logger.debug('json error: ' + err)
continue
if type(item) != dict or 'type' not in item:
logger.debug('json error: item is not dict or item has no key "type". item =' + str(item))
continue
if item['type'] == 'match':
data = item['data']
file_name = data['path']['text']
lnum = data['line_number']
try:
text = data['lines']['text']
except KeyError:
text = data['lines']['bytes']
except:
logger.debug("item['data']['lines'] has neigher key 'test' nor key 'bytes'. item =" + str(item))
continue
if len(text) > max_columns:
logger.debug(
"File '{file_name}' line {lnum} is too long, longer than max_column {max_columns}."
.format(file_name=file_name, lnum=lnum, max_columns=max_columns))
continue
text = text.split('\n')[0]
text = text.rstrip()
for submatch in data['submatches']:
match = submatch['match']['text']
cnum = submatch['start'] + 1
item_idx = (file_name, lnum, cnum)
if 'one_file_result' in locals() or 'one_file_result' in globals():
if item_idx in one_file_result:
continue
else:
one_file_result.append(item_idx)
if (range_[0] != -1 and range_[0] > lnum) or \
(range_[1] != -1 and range_[1] < lnum):
continue
if not file_name in result:
result[file_name] = {
'fname': file_name,
'items': []
}
item_ctx = {
'lnum': lnum,
'cnum': cnum,
'text': text,
'match': match
}
result[file_name]['items'].append(item_ctx)
limit -= 1
else:
if submatch_type == 'first':
if regex != '0':
try:
if case_sensitive == '0':
cpat = re.compile(pattern, re.IGNORECASE)
else:
cpat = re.compile(pattern)
except Exception as e:
return {'error': 'invalid pattern: ' + str(e) }
while limit > 0:
line = proc.stdout.readline()
try:
line = line.decode('utf-8').rstrip()
except UnicodeDecodeError:
logger.debug("UnicodeDecodeError: line = line.decode('utf-8').rstrip() failed, line:")
continue
if not line:
if len(result) == 0:
err = proc.stderr.readline()
if err:
err = err.decode('utf-8')
logger.debug('error:' + err)
return {'error': err}
if proc.poll() is not None:
logger.debug('end of proc. break')
break
continue
items = re.split(':', line, 3)
if len(items) != 4:
logger.error('broken line:' + line)
continue
file_name = items[0]
lnum = int(items[1])
cnum = int(items[2])
text = items[3]
if (range_[0] != -1 and range_[0] > lnum) or \
(range_[1] != -1 and range_[1] < lnum):
continue
if len(text) > max_columns:
logger.debug(
"File '{file_name}' line {lnum} is too long, longer than max_column {max_columns}."
.format(file_name=file_name, lnum=lnum, max_columns=max_columns))
continue
item_idx = (file_name, lnum, cnum)
if 'one_file_result' in locals() or 'one_file_result' in globals():
if item_idx in one_file_result:
continue
else:
one_file_result.append(item_idx)
if not file_name in result:
file_ctx = {
'fname': file_name,
'items': []
}
result[file_name] = file_ctx
file_ctx = result[file_name]
item_ctx = {}
item_ctx['text'] = text
item_ctx['lnum'] = lnum
item_ctx['cnum'] = cnum
file_ctx['items'].append(item_ctx)
limit -= 1
if submatch_type == 'first':
byte_num = item_ctx['cnum']
char_num = len( text.encode('utf-8')[:byte_num-1].decode('utf-8') )
move_cnum = char_num + 1
if regex == '0':
while True:
next_item_ctx = {}
next_item_ctx['text'] = text
next_item_ctx['lnum'] = int(lnum)
if case_sensitive == '0':
next_char_num = text.lower().find(pattern.lower(), move_cnum)
else:
next_char_num = text.find(pattern, move_cnum)
if next_char_num == -1:
break
move_cnum = next_char_num + 1
prefix = text[:next_char_num]
next_item_ctx['cnum'] = len(prefix.encode('utf-8')) + 1
file_ctx['items'].append(next_item_ctx)
limit -= 1
if limit <= 0:
break
else:
for cp in cpat.finditer(text, move_cnum):
next_item_ctx = {}
next_item_ctx['text'] = text
next_item_ctx['lnum'] = int(lnum)
prefix = text[:cp.span()[0]]
next_item_ctx['cnum'] = len(prefix.encode('utf-8')) + 1
file_ctx['items'].append(next_item_ctx)
limit -= 1
if limit <= 0:
break
try:
proc.terminate()
except Exception as e:
logger.error('failed to terminate proc: ' + str(e))
if int(ctx['limit']) - limit >= args.get('items_file_min', 250):
with tempfile.NamedTemporaryFile(mode='w', delete=False, encoding='utf-8') as fp:
for file_ctx in result.values():
json.dump(file_ctx, fp, ensure_ascii=False)
fp.write('\n')
logger.debug('items_file:' + fp.name)
final_result['items_file'] = fp.name
else:
final_result['items'] = list(result.values())
return final_result
| |
from __future__ import unicode_literals
import datetime
from django.contrib import admin
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.templatetags.admin_list import pagination
from django.contrib.admin.tests import AdminSeleniumWebDriverTestCase
from django.contrib.admin.views.main import ALL_VAR, SEARCH_VAR, ChangeList
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.template import Context, Template
from django.test import TestCase, override_settings
from django.test.client import RequestFactory
from django.utils import formats, six
from .admin import (
BandAdmin, ChildAdmin, ChordsBandAdmin, CustomPaginationAdmin,
CustomPaginator, DynamicListDisplayChildAdmin,
DynamicListDisplayLinksChildAdmin, DynamicListFilterChildAdmin,
DynamicSearchFieldsChildAdmin, FilteredChildAdmin, GroupAdmin,
InvitationAdmin, NoListDisplayLinksParentAdmin, ParentAdmin, QuartetAdmin,
SwallowAdmin, site as custom_site,
)
from .models import (
Band, Child, ChordsBand, ChordsMusician, CustomIdUser, Event, Genre, Group,
Invitation, Membership, Musician, OrderedObject, Parent, Quartet, Swallow,
UnorderedObject,
)
@override_settings(ROOT_URLCONF="admin_changelist.urls")
class ChangeListTests(TestCase):
def setUp(self):
self.factory = RequestFactory()
def _create_superuser(self, username):
return User.objects.create(username=username, is_superuser=True)
def _mocked_authenticated_request(self, url, user):
request = self.factory.get(url)
request.user = user
return request
def test_select_related_preserved(self):
"""
Regression test for #10348: ChangeList.get_queryset() shouldn't
overwrite a custom select_related provided by ModelAdmin.get_queryset().
"""
m = ChildAdmin(Child, admin.site)
request = self.factory.get('/child/')
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
self.assertEqual(cl.queryset.query.select_related, {
'parent': {'name': {}}
})
def test_select_related_as_tuple(self):
ia = InvitationAdmin(Invitation, admin.site)
request = self.factory.get('/invitation/')
cl = ChangeList(request, Child, ia.list_display, ia.list_display_links,
ia.list_filter, ia.date_hierarchy, ia.search_fields,
ia.list_select_related, ia.list_per_page,
ia.list_max_show_all, ia.list_editable, ia)
self.assertEqual(cl.queryset.query.select_related, {'player': {}})
def test_select_related_as_empty_tuple(self):
ia = InvitationAdmin(Invitation, admin.site)
ia.list_select_related = ()
request = self.factory.get('/invitation/')
cl = ChangeList(request, Child, ia.list_display, ia.list_display_links,
ia.list_filter, ia.date_hierarchy, ia.search_fields,
ia.list_select_related, ia.list_per_page,
ia.list_max_show_all, ia.list_editable, ia)
self.assertEqual(cl.queryset.query.select_related, False)
def test_result_list_empty_changelist_value(self):
"""
Regression test for #14982: EMPTY_CHANGELIST_VALUE should be honored
for relationship fields
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
m = ChildAdmin(Child, admin.site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, Child, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = '<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th><td class="field-parent nowrap">(None)</td></tr></tbody>' % link
self.assertNotEqual(table_output.find(row_html), -1,
'Failed to find expected row element: %s' % table_output)
def test_result_list_html(self):
"""
Verifies that inclusion tag result_list generates a table when with
default ModelAdmin settings.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, admin.site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, Child, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = '<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th><td class="field-parent nowrap">Parent object</td></tr></tbody>' % link
self.assertNotEqual(table_output.find(row_html), -1,
'Failed to find expected row element: %s' % table_output)
def test_result_list_editable_html(self):
"""
Regression tests for #11791: Inclusion tag result_list generates a
table and this checks that the items are nested within the table
element tags.
Also a regression test for #13599, verifies that hidden fields
when list_editable is enabled are rendered in a div outside the
table.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, admin.site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
FormSet = m.get_changelist_formset(request)
cl.formset = FormSet(queryset=cl.result_list)
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
# make sure that hidden fields are in the correct place
hiddenfields_div = '<div class="hiddenfields"><input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" /></div>' % new_child.id
self.assertInHTML(hiddenfields_div, table_output, msg_prefix='Failed to find hidden fields')
# make sure that list editable fields are rendered in divs correctly
editable_name_field = '<input name="form-0-name" value="name" class="vTextField" maxlength="30" type="text" id="id_form-0-name" />'
self.assertInHTML('<td class="field-name">%s</td>' % editable_name_field, table_output, msg_prefix='Failed to find "name" list_editable field')
def test_result_list_editable(self):
"""
Regression test for #14312: list_editable with pagination
"""
new_parent = Parent.objects.create(name='parent')
for i in range(200):
Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/', data={'p': -1}) # Anything outside range
m = ChildAdmin(Child, admin.site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
self.assertRaises(IncorrectLookupParameters, lambda:
ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m))
def test_custom_paginator(self):
new_parent = Parent.objects.create(name='parent')
for i in range(200):
Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/')
m = CustomPaginationAdmin(Child, admin.site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
self.assertIsInstance(cl.paginator, CustomPaginator)
def test_distinct_for_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Basic ManyToMany.
"""
blues = Genre.objects.create(name='Blues')
band = Band.objects.create(name='B.B. King Review', nr_of_members=11)
band.genres.add(blues)
band.genres.add(blues)
m = BandAdmin(Band, admin.site)
request = self.factory.get('/band/', data={'genres': blues.pk})
cl = ChangeList(request, Band, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_through_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. With an intermediate model.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = GroupAdmin(Group, admin.site)
request = self.factory.get('/group/', data={'members': lead.pk})
cl = ChangeList(request, Group, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_inherited_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Model managed in the
admin inherits from the one that defins the relationship.
"""
lead = Musician.objects.create(name='John')
four = Quartet.objects.create(name='The Beatles')
Membership.objects.create(group=four, music=lead, role='lead voice')
Membership.objects.create(group=four, music=lead, role='guitar player')
m = QuartetAdmin(Quartet, admin.site)
request = self.factory.get('/quartet/', data={'members': lead.pk})
cl = ChangeList(request, Quartet, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Quartet instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_m2m_to_inherited_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Target of the relationship
inherits from another.
"""
lead = ChordsMusician.objects.create(name='Player A')
three = ChordsBand.objects.create(name='The Chords Trio')
Invitation.objects.create(band=three, player=lead, instrument='guitar')
Invitation.objects.create(band=three, player=lead, instrument='bass')
m = ChordsBandAdmin(ChordsBand, admin.site)
request = self.factory.get('/chordsband/', data={'members': lead.pk})
cl = ChangeList(request, ChordsBand, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one ChordsBand instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_non_unique_related_object_in_list_filter(self):
"""
Regressions tests for #15819: If a field listed in list_filters
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
# Two children with the same name
Child.objects.create(parent=parent, name='Daniel')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, admin.site)
request = self.factory.get('/parent/', data={'child__name': 'Daniel'})
cl = ChangeList(request, Parent, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
# Make sure distinct() was called
self.assertEqual(cl.queryset.count(), 1)
def test_distinct_for_non_unique_related_object_in_search_fields(self):
"""
Regressions tests for #15819: If a field listed in search_fields
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
Child.objects.create(parent=parent, name='Danielle')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, admin.site)
request = self.factory.get('/parent/', data={SEARCH_VAR: 'daniel'})
cl = ChangeList(request, Parent, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
# Make sure distinct() was called
self.assertEqual(cl.queryset.count(), 1)
def test_pagination(self):
"""
Regression tests for #12893: Pagination in admins changelist doesn't
use queryset set by modeladmin.
"""
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
request = self.factory.get('/child/')
# Test default queryset
m = ChildAdmin(Child, admin.site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all,
m.list_editable, m)
self.assertEqual(cl.queryset.count(), 60)
self.assertEqual(cl.paginator.count, 60)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3, 4, 5, 6])
# Test custom queryset
m = FilteredChildAdmin(Child, admin.site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all,
m.list_editable, m)
self.assertEqual(cl.queryset.count(), 30)
self.assertEqual(cl.paginator.count, 30)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3])
def test_computed_list_display_localization(self):
"""
Regression test for #13196: output of functions should be localized
in the changelist.
"""
User.objects.create_superuser(
username='super', email='super@localhost', password='secret')
self.client.login(username='super', password='secret')
event = Event.objects.create(date=datetime.date.today())
response = self.client.get(reverse('admin:admin_changelist_event_changelist'))
self.assertContains(response, formats.localize(event.date))
self.assertNotContains(response, six.text_type(event.date))
def test_dynamic_list_display(self):
"""
Regression tests for #14206: dynamic list_display support.
"""
parent = Parent.objects.create(name='parent')
for i in range(10):
Child.objects.create(name='child %s' % i, parent=parent)
user_noparents = self._create_superuser('noparents')
user_parents = self._create_superuser('parents')
# Test with user 'noparents'
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertNotContains(response, 'Parent object')
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ['name', 'age'])
self.assertEqual(list_display_links, ['name'])
# Test with user 'parents'
m = DynamicListDisplayChildAdmin(Child, admin.site)
request = self._mocked_authenticated_request('/child/', user_parents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
custom_site.unregister(Child)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['parent'])
# Test default implementation
custom_site.register(Child, ChildAdmin)
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
def test_show_all(self):
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
# Add "show all" parameter to request
request = self.factory.get('/child/', data={ALL_VAR: ''})
# Test valid "show all" request (number of total objects is under max)
m = ChildAdmin(Child, admin.site)
# 200 is the max we'll pass to ChangeList
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, 200, m.list_editable, m)
cl.get_results(request)
self.assertEqual(len(cl.result_list), 60)
# Test invalid "show all" request (number of total objects over max)
# falls back to paginated pages
m = ChildAdmin(Child, admin.site)
# 30 is the max we'll pass to ChangeList for this test
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, 30, m.list_editable, m)
cl.get_results(request)
self.assertEqual(len(cl.result_list), 10)
def test_dynamic_list_display_links(self):
"""
Regression tests for #16257: dynamic list_display_links support.
"""
parent = Parent.objects.create(name='parent')
for i in range(1, 10):
Child.objects.create(id=i, name='child %s' % i, parent=parent, age=i)
m = DynamicListDisplayLinksChildAdmin(Child, admin.site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/child/', superuser)
response = m.changelist_view(request)
for i in range(1, 10):
link = reverse('admin:admin_changelist_child_change', args=(i,))
self.assertContains(response, '<a href="%s">%s</a>' % (link, i))
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['age'])
def test_no_list_display_links(self):
"""#15185 -- Allow no links from the 'change list' view grid."""
p = Parent.objects.create(name='parent')
m = NoListDisplayLinksParentAdmin(Parent, admin.site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/parent/', superuser)
response = m.changelist_view(request)
link = reverse('admin:admin_changelist_parent_change', args=(p.pk,))
self.assertNotContains(response, '<a href="%s">' % link)
def test_tuple_list_display(self):
"""
Regression test for #17128
(ChangeList failing under Python 2.5 after r16319)
"""
swallow = Swallow.objects.create(
origin='Africa', load='12.34', speed='22.2')
model_admin = SwallowAdmin(Swallow, admin.site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/swallow/', superuser)
response = model_admin.changelist_view(request)
# just want to ensure it doesn't blow up during rendering
self.assertContains(response, six.text_type(swallow.origin))
self.assertContains(response, six.text_type(swallow.load))
self.assertContains(response, six.text_type(swallow.speed))
def test_deterministic_order_for_unordered_model(self):
"""
Ensure that the primary key is systematically used in the ordering of
the changelist's results to guarantee a deterministic order, even
when the Model doesn't have any default ordering defined.
Refs #17198.
"""
superuser = self._create_superuser('superuser')
for counter in range(1, 51):
UnorderedObject.objects.create(id=counter, bool=True)
class UnorderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
admin.site.register(UnorderedObject, UnorderedObjectAdmin)
model_admin = UnorderedObjectAdmin(UnorderedObject, admin.site)
counter = 0 if ascending else 51
for page in range(0, 5):
request = self._mocked_authenticated_request('/unorderedobject/?p=%s' % page, superuser)
response = model_admin.changelist_view(request)
for result in response.context_data['cl'].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
admin.site.unregister(UnorderedObject)
# When no order is defined at all, everything is ordered by '-pk'.
check_results_order()
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as well.
UnorderedObjectAdmin.ordering = ['bool']
check_results_order()
# When order fields are defined, including the pk itself, use them.
UnorderedObjectAdmin.ordering = ['bool', '-pk']
check_results_order()
UnorderedObjectAdmin.ordering = ['bool', 'pk']
check_results_order(ascending=True)
UnorderedObjectAdmin.ordering = ['-id', 'bool']
check_results_order()
UnorderedObjectAdmin.ordering = ['id', 'bool']
check_results_order(ascending=True)
def test_deterministic_order_for_model_ordered_by_its_manager(self):
"""
Ensure that the primary key is systematically used in the ordering of
the changelist's results to guarantee a deterministic order, even
when the Model has a manager that defines a default ordering.
Refs #17198.
"""
superuser = self._create_superuser('superuser')
for counter in range(1, 51):
OrderedObject.objects.create(id=counter, bool=True, number=counter)
class OrderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
admin.site.register(OrderedObject, OrderedObjectAdmin)
model_admin = OrderedObjectAdmin(OrderedObject, admin.site)
counter = 0 if ascending else 51
for page in range(0, 5):
request = self._mocked_authenticated_request('/orderedobject/?p=%s' % page, superuser)
response = model_admin.changelist_view(request)
for result in response.context_data['cl'].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
admin.site.unregister(OrderedObject)
# When no order is defined at all, use the model's default ordering (i.e. 'number')
check_results_order(ascending=True)
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as well.
OrderedObjectAdmin.ordering = ['bool']
check_results_order()
# When order fields are defined, including the pk itself, use them.
OrderedObjectAdmin.ordering = ['bool', '-pk']
check_results_order()
OrderedObjectAdmin.ordering = ['bool', 'pk']
check_results_order(ascending=True)
OrderedObjectAdmin.ordering = ['-id', 'bool']
check_results_order()
OrderedObjectAdmin.ordering = ['id', 'bool']
check_results_order(ascending=True)
def test_dynamic_list_filter(self):
"""
Regression tests for ticket #17646: dynamic list_filter support.
"""
parent = Parent.objects.create(name='parent')
for i in range(10):
Child.objects.create(name='child %s' % i, parent=parent)
user_noparents = self._create_superuser('noparents')
user_parents = self._create_superuser('parents')
# Test with user 'noparents'
m = DynamicListFilterChildAdmin(Child, admin.site)
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].list_filter, ['name', 'age'])
# Test with user 'parents'
m = DynamicListFilterChildAdmin(Child, admin.site)
request = self._mocked_authenticated_request('/child/', user_parents)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].list_filter, ('parent', 'name', 'age'))
def test_dynamic_search_fields(self):
child = self._create_superuser('child')
m = DynamicSearchFieldsChildAdmin(Child, admin.site)
request = self._mocked_authenticated_request('/child/', child)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].search_fields, ('name', 'age'))
def test_pagination_page_range(self):
"""
Regression tests for ticket #15653: ensure the number of pages
generated for changelist views are correct.
"""
# instantiating and setting up ChangeList object
m = GroupAdmin(Group, admin.site)
request = self.factory.get('/group/')
cl = ChangeList(request, Group, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
per_page = cl.list_per_page = 10
for page_num, objects_count, expected_page_range in [
(0, per_page, []),
(0, per_page * 2, list(range(2))),
(5, per_page * 11, list(range(11))),
(5, per_page * 12, [0, 1, 2, 3, 4, 5, 6, 7, 8, '.', 10, 11]),
(6, per_page * 12, [0, 1, '.', 3, 4, 5, 6, 7, 8, 9, 10, 11]),
(6, per_page * 13, [0, 1, '.', 3, 4, 5, 6, 7, 8, 9, '.', 11, 12]),
]:
# assuming we have exactly `objects_count` objects
Group.objects.all().delete()
for i in range(objects_count):
Group.objects.create(name='test band')
# setting page number and calculating page range
cl.page_num = page_num
cl.get_results(request)
real_page_range = pagination(cl)['page_range']
self.assertListEqual(
expected_page_range,
list(real_page_range),
)
class AdminLogNodeTestCase(TestCase):
def test_get_admin_log_templatetag_custom_user(self):
"""
Regression test for ticket #20088: admin log depends on User model
having id field as primary key.
The old implementation raised an AttributeError when trying to use
the id field.
"""
context = Context({'user': CustomIdUser()})
template_string = '{% load log %}{% get_admin_log 10 as admin_log for_user user %}'
template = Template(template_string)
# Rendering should be u'' since this templatetag just logs,
# it doesn't render any string.
self.assertEqual(template.render(context), '')
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_changelist.urls")
class SeleniumFirefoxTests(AdminSeleniumWebDriverTestCase):
available_apps = ['admin_changelist'] + AdminSeleniumWebDriverTestCase.available_apps
fixtures = ['users.json']
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def test_add_row_selection(self):
"""
Ensure that the status line for selected rows gets updated correcly (#22038)
"""
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:auth_user_changelist')))
form_id = '#changelist-form'
# Test amount of rows in the Changelist
rows = self.selenium.find_elements_by_css_selector(
'%s #result_list tbody tr' % form_id)
self.assertEqual(len(rows), 1)
# Test current selection
selection_indicator = self.selenium.find_element_by_css_selector(
'%s .action-counter' % form_id)
self.assertEqual(selection_indicator.text, "0 of 1 selected")
# Select a row and check again
row_selector = self.selenium.find_element_by_css_selector(
'%s #result_list tbody tr:first-child .action-select' % form_id)
row_selector.click()
self.assertEqual(selection_indicator.text, "1 of 1 selected")
class SeleniumChromeTests(SeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class SeleniumIETests(SeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
| |
"""
raven.contrib.flask
~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
try:
from flask_login import current_user
except ImportError:
has_flask_login = False
else:
has_flask_login = True
import sys
import os
import logging
from flask import request, current_app, g
from flask.signals import got_request_exception, request_finished
from raven.conf import setup_logging
from raven.base import Client
from raven.middleware import Sentry as SentryMiddleware
from raven.handlers.logging import SentryHandler
from raven.utils.compat import _urlparse
from raven.utils.wsgi import get_headers, get_environ
from werkzeug.exceptions import ClientDisconnected
def make_client(client_cls, app, dsn=None):
return client_cls(
dsn=dsn or app.config.get('SENTRY_DSN') or os.environ.get('SENTRY_DSN'),
include_paths=set(app.config.get('SENTRY_INCLUDE_PATHS', [])) | set([app.import_name]),
exclude_paths=app.config.get('SENTRY_EXCLUDE_PATHS'),
servers=app.config.get('SENTRY_SERVERS'),
name=app.config.get('SENTRY_NAME'),
public_key=app.config.get('SENTRY_PUBLIC_KEY'),
secret_key=app.config.get('SENTRY_SECRET_KEY'),
project=app.config.get('SENTRY_PROJECT'),
site=app.config.get('SENTRY_SITE_NAME'),
processors=app.config.get('SENTRY_PROCESSORS'),
string_max_length=app.config.get('SENTRY_MAX_LENGTH_STRING'),
list_max_length=app.config.get('SENTRY_MAX_LENGTH_LIST'),
auto_log_stacks=app.config.get('SENTRY_AUTO_LOG_STACKS'),
extra={
'app': app,
},
)
class Sentry(object):
"""
Flask application for Sentry.
Look up configuration from ``os.environ['SENTRY_DSN']``::
>>> sentry = Sentry(app)
Pass an arbitrary DSN::
>>> sentry = Sentry(app, dsn='http://public:secret@example.com/1')
Pass an explicit client::
>>> sentry = Sentry(app, client=client)
Automatically configure logging::
>>> sentry = Sentry(app, logging=True, level=logging.ERROR)
Capture an exception::
>>> try:
>>> 1 / 0
>>> except ZeroDivisionError:
>>> sentry.captureException()
Capture a message::
>>> sentry.captureMessage('hello, world!')
By default, the Flask integration will do the following:
- Hook into the `got_request_exception` signal. This can be disabled by
passing `register_signal=False`.
- Wrap the WSGI application. This can be disabled by passing
`wrap_wsgi=False`.
- Capture information from Flask-Login (if available).
"""
# TODO(dcramer): the client isn't using local context and therefore
# gets shared by every app that does init on it
def __init__(self, app=None, client=None, client_cls=Client, dsn=None,
logging=False, level=logging.NOTSET, wrap_wsgi=True,
register_signal=True):
self.dsn = dsn
self.logging = logging
self.client_cls = client_cls
self.client = client
self.level = level
self.wrap_wsgi = wrap_wsgi
self.register_signal = register_signal
if app:
self.init_app(app)
@property
def last_event_id(self):
return getattr(self, '_last_event_id', None)
@last_event_id.setter
def last_event_id(self, value):
self._last_event_id = value
try:
g.sentry_event_id = value
except Exception:
pass
def handle_exception(self, *args, **kwargs):
if not self.client:
return
ignored_exc_type_list = current_app.config.get('RAVEN_IGNORE_EXCEPTIONS', [])
exc = sys.exc_info()[1]
if any((isinstance(exc, ignored_exc_type) for ignored_exc_type in ignored_exc_type_list)):
return
self.captureException(exc_info=kwargs.get('exc_info'))
def get_user_info(self, request):
"""
Requires Flask-Login (https://pypi.python.org/pypi/Flask-Login/) to be installed
and setup
"""
if not has_flask_login:
return
if not hasattr(current_app, 'login_manager'):
return
try:
is_authenticated = current_user.is_authenticated
except AttributeError:
# HACK: catch the attribute error thrown by flask-login is not attached
# > current_user = LocalProxy(lambda: _request_ctx_stack.top.user)
# E AttributeError: 'RequestContext' object has no attribute 'user'
return {}
if is_authenticated:
user_info = {
'is_authenticated': True,
'is_anonymous': current_user.is_anonymous,
'id': current_user.get_id(),
}
if 'SENTRY_USER_ATTRS' in current_app.config:
for attr in current_app.config['SENTRY_USER_ATTRS']:
if hasattr(current_user, attr):
user_info[attr] = getattr(current_user, attr)
else:
user_info = {
'is_authenticated': False,
'is_anonymous': current_user.is_anonymous,
}
return user_info
def get_http_info(self, request):
urlparts = _urlparse.urlsplit(request.url)
try:
formdata = request.form
except ClientDisconnected:
formdata = {}
return {
'url': '%s://%s%s' % (urlparts.scheme, urlparts.netloc, urlparts.path),
'query_string': urlparts.query,
'method': request.method,
'data': formdata,
'headers': dict(get_headers(request.environ)),
'env': dict(get_environ(request.environ)),
}
def before_request(self, *args, **kwargs):
self.last_event_id = None
self.client.http_context(self.get_http_info(request))
self.client.user_context(self.get_user_info(request))
def add_sentry_id_header(self, sender, response, *args, **kwargs):
response.headers['X-Sentry-ID'] = self.last_event_id
return response
def init_app(self, app, dsn=None, logging=None, level=None, wrap_wsgi=None,
register_signal=None):
if dsn is not None:
self.dsn = dsn
if level is not None:
self.level = level
if wrap_wsgi is not None:
self.wrap_wsgi = wrap_wsgi
if register_signal is not None:
self.register_signal = register_signal
if logging is not None:
self.logging = logging
if not self.client:
self.client = make_client(self.client_cls, app, self.dsn)
if self.logging:
setup_logging(SentryHandler(self.client, level=self.level))
if self.wrap_wsgi:
app.wsgi_app = SentryMiddleware(app.wsgi_app, self.client)
app.before_request(self.before_request)
if self.register_signal:
got_request_exception.connect(self.handle_exception, sender=app)
request_finished.connect(self.add_sentry_id_header, sender=app)
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['sentry'] = self
def captureException(self, *args, **kwargs):
assert self.client, 'captureException called before application configured'
result = self.client.captureException(*args, **kwargs)
if result:
self.last_event_id = self.client.get_ident(result)
else:
self.last_event_id = None
return result
def captureMessage(self, *args, **kwargs):
assert self.client, 'captureMessage called before application configured'
result = self.client.captureMessage(*args, **kwargs)
if result:
self.last_event_id = self.client.get_ident(result)
else:
self.last_event_id = None
return result
| |
'''ssh computer cluster and run job'''
import ConfigParser
import paramiko
import os
import time
import misc
import subprocess
class RunCommandRemotely:
'''Run remote command on server'''
def __init__(self, server, subdir=''):
self.config = ConfigParser.RawConfigParser()
hostsFile = __file__[:__file__.rfind("/")] + "/hosts.ini"
self.config.read(hostsFile)
self.locdir = os.getcwd()
self.server = server
self.hostname = self.config.get(server, 'hostname')
self.username = self.config.get(server, 'user')
self.workdir = self.config.get(server, 'workdir')
self.subdir = subdir
self.remdir = ''
self.subsubdir = ''
self.pkeyfile = os.path.expanduser('~/.ssh/id_rsa_fw')
self.mykey = paramiko.RSAKey.from_private_key_file(
self.pkeyfile)
self.trnsprt = paramiko.Transport((self.hostname, 22))
self.trnsprt.connect(username=self.username, pkey=self.mykey)
self.sftp = paramiko.SFTPClient.from_transport(self.trnsprt)
self.ssh = paramiko.SSHClient()
self.stdin = ''
self.stdout = ''
self.stderr = ''
self.maxtrials = 20
self.numprocflag = self.config.get(server, 'numprocflag')
self.queuespec = self.config.get(server, 'queuespec' )
self.queuespecn = self.config.get(server, 'queuespecn')
self.quejobidcol = self.config.get(server, 'quejobidcol')
self.joblog = "logdir"
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# Check that md5sum is present on both local and remote machines.
misc.assertProgramIsReachable('md5sum')
self.assertRemoteProgramIsReachable('md5sum')
# Connect to remote server, choose directory, and create working directory
self.overwrite = True
if self.subdir != "":
self.overwrite = False
self.chooseDir(overwrite=self.overwrite)
def getDir(self):
return self.remdir
def die(self):
for line in self.stdout.readlines():
print line
for line in self.stderr.readlines():
print line
exit(1)
def setSubSubDir(self, subSubDir):
# Only allow relative paths. Die if subDir starts with "/"
if subSubDir[0] == "/":
print "Error. createSubSubDir only allows relative paths:",subSubDir
exit(1)
self.execCmd("mkdir -p " + self.remdir + "/" + subSubDir)
self.subsubdir = subSubDir
return
def connectSSH(self):
if hasattr(self.ssh,"is_active") is False:
self.ssh.connect(self.hostname, username=self.username)
print "SSH connection to",self.hostname,"established"
def disconnectSSH(self):
if hasattr(self.ssh,"is_active"):
self.ssh.close()
def chooseDir(self, overwrite=False):
'''Determine unused directory on remote server'''
self.connectSSH()
if self.subdir == "":
freeDir = False
dirCounter = 0
dirName = ''
while freeDir is False:
dirName = "dir%03d" % dirCounter
sin, sout, serr = self.ssh.exec_command( "find " +
self.workdir + " -maxdepth 1 -name " + dirName + " | wc -l")
stat = sout.channel.recv_exit_status()
if sout.readlines()[0].strip() == "0":
freeDir = True
dirCounter += 1
if dirCounter > 999:
print "No free directory in",self.workdir
exit(1)
self.subdir = dirName
else:
if overwrite is True:
# If folder already exists, move it to .bak
self.execCmd( "rm -rf " + self.workdir
+ "/" + self.subdir + ".bak")
self.execCmd( "mv " + self.workdir + "/" + self.subdir
+ " " + self.workdir + "/" + self.subdir + ".bak")
self.remdir = self.workdir + "/" + self.subdir
self.execCmd("mkdir -p " + self.remdir)
self.execCmd("mkdir -p " + self.joblog)
self.disconnectSSH()
def remoteFileExists(self, myFile):
baseFile = myFile
if baseFile.find("/") != -1:
baseFile = myFile[myFile.rfind("/")+1:]
destFile = self.remdir + "/" + self.subsubdir + "/" + baseFile
try:
self.sftp.stat(destFile)
return True
except:
return False
def localFileExists(self, myFile):
baseFile = myFile
if baseFile.find("/") != -1:
baseFile = myFile[myFile.rfind("/")+1:]
oriFile = os.path.abspath(myFile)
return os.path.exists(oriFile)
def putFile(self, myFile):
baseFile = myFile
if baseFile.find("/") != -1:
baseFile = myFile[myFile.rfind("/")+1:]
destFile = self.remdir + "/" + self.subsubdir + "/" + baseFile
oriFile = os.path.abspath(myFile)
if self.bothFilesIdentical(myFile):
return
trials = 0
putSuccess = False
while trials < self.maxtrials and putSuccess == False:
try:
self.sftp.put(oriFile, destFile)
putSuccess = True
except:
trials += 1
time.sleep(1)
continue
break
if trials == self.maxtrials:
print "Error. Can't copy", myFile
exit(1)
# print "copied",baseFile,"to the remote server"
return
def getFile(self, myFile):
baseFile = myFile
if baseFile.find("/") != -1:
baseFile = myFile[myFile.rfind("/")+1:]
remFile = self.remdir + "/" + self.subsubdir + "/" + baseFile
# We first check whether the file exists on the local machine. If it is,
# no need to copy.
if self.bothFilesIdentical(myFile):
return
trials = 0
getSuccess = False
while trials < self.maxtrials and getSuccess == False:
try:
self.sftp.get(remFile, self.locdir + "/" + baseFile)
getSuccess = True
except:
trials += 1
time.sleep(1)
continue
break
if trials == self.maxtrials:
print "Error. Can't copy", myFile
exit(1)
# print "copied",baseFile,"from the remote server"
return
def bothFilesIdentical(self, myFile):
# Checks whether myFile is present on both the local and remote machines
# and if they're identical using a MD5 checksum.
baseFile = myFile
if baseFile.find("/") != -1:
baseFile = myFile[myFile.rfind("/")+1:]
remFile = self.remdir + "/" + self.subsubdir + "/" + baseFile
locFile = self.locdir + "/" + baseFile
if self.remoteFileExists(myFile) is False:
return False
if self.localFileExists(myFile) is False:
return False
# Both files exist. Check the MD5 checksum
md5Loc = subprocess.check_output("md5sum " + locFile,
shell=True).split()
if len(md5Loc) != 2:
# Should be of the form
# <MD5SUM> file.name
return False
md5Loc = md5Loc[0]
if self.execCmd("md5sum " + remFile) != 0:
# Non-zero exit status
return False
md5Rem = self.stdout.readlines()[0].split()[0]
if md5Loc == md5Rem:
return True
else:
return False
def delFile(self, myFile, force=False):
baseFile = myFile
if baseFile.find("/") != -1:
baseFile = myFile[myFile.rfind("/")+1:]
remFile = self.remdir + "/" + self.subsubdir + "/" + baseFile
trials = 0
delSuccess = False
while trials < self.maxtrials and delSuccess == False:
try:
self.sftp.remove(remFile)
delSuccess = True
except:
trials += 1
time.sleep(1)
if force == False:
print "Warning: couldn't delete remote file",remFile
delSuccess = True
continue
break
if trials == self.maxtrials:
return False
return True
def execCmd(self, cmd):
self.stdin, self.stdout, self.stderr = self.ssh.exec_command(cmd)
# The following is a blocking command.
return self.stdout.channel.recv_exit_status()
def submitJob(self, jobName, numProc, inpCmd, dependID=0):
# Submit job to queueing system. Return job ID.
queuesub = ""
numprocsub = ""
depend = ""
if int(numProc) > 1:
numprocsub = self.numprocflag + " " + str(numProc)
if int(numProc) >= int(self.queuespecn):
queuesub = "-q " + self.queuespec
if dependID > 0:
depend = "-hold_jid " + str(dependID)
trials = 0
subSuccess = False
while trials < self.maxtrials and subSuccess == False:
status = self.execCmd("qsub -S /bin/sh -cwd -N " + jobName \
+ " -j y " + numprocsub + " " + queuesub + " " + depend \
+ " -o " + self.joblog + "/" + jobName + ".log "
+ inpCmd)
if status == 0:
subSuccess = True
else:
trials += 1
time.sleep(1)
if subSuccess is False:
print "Error: qsub submission failed. Error code", status
self.die()
# Return job ID
self.execCmd("qstat | grep " + self.username + " | grep " \
+ jobName[:10] + " | awk '{print $" + self.quejobidcol + "}'")
time.sleep(2)
return int(self.stdout.readlines()[0].split()[0])
def jobIsRunning(self, jobName):
if jobName == "":
return False
# Has the job experienced an error when running the script? Look for "Eqw"
# status.
self.execCmd("qstat | grep " + self.username + " | grep " \
+ str(jobName) + " | grep Eqw")
numLines = int(len(self.stdout.readlines()))
if numLines > 0:
# Delete this job
self.execCmd("qdel " + str(jobName))
# We return true in case another instance of this job is currently
# running. This will be checked during the next call to this routine.
return True
self.execCmd("qstat | grep " + self.username + " | grep " \
+ str(jobName))
numLines = int(len(self.stdout.readlines()))
if numLines > 0:
return True
else:
return False
def assertRemoteProgramIsReachable(self, progName):
# Assert that progName is reachable on remote machine
self.connectSSH()
if self.execCmd("type " + progName + " > /dev/null") != 0:
print "Error. Can't find " + progName + " on remote machine."
self.die()
self.disconnectSSH()
def delRemoteSubDir(self):
self.execCmd("rm -rf " + self.remdir)
def delRemoteSubSubDir(self):
self.execCmd("rm -rf " + self.remdir + "/" + self.subsubdir)
def getStdin(self):
return self.stdin.readlines()
def getStdout(self):
return self.stdout.readlines()
def getStderr(self):
return self.stderr.readlines()
def __del__(self):
self.disconnectSSH()
if __name__ == "__main__":
# Testing with verdi
server = 'verdi'
cmd = "hostname; which charmmsub"
remoteCmd = RunCommandRemotely(server, 'dir000000')
remoteCmd.execCmd(cmd)
remoteCmd.putFile('hosts.ini')
remoteCmd.getFile('hosts.ini')
print remoteCmd.getStdout()
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import uuidutils
from ironic.common import driver_factory
from ironic.common import exception
from ironic.common import states
from ironic.conductor import task_manager
from ironic.conductor import utils as conductor_utils
from ironic import objects
from ironic.tests import base as tests_base
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base
from ironic.tests.unit.db import utils
from ironic.tests.unit.objects import utils as obj_utils
class NodeSetBootDeviceTestCase(base.DbTestCase):
def test_node_set_boot_device_non_existent_device(self):
mgr_utils.mock_the_extension_manager(driver="fake_ipmitool")
self.driver = driver_factory.get_driver("fake_ipmitool")
ipmi_info = utils.get_test_ipmi_info()
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake_ipmitool',
driver_info=ipmi_info)
task = task_manager.TaskManager(self.context, node.uuid)
self.assertRaises(exception.InvalidParameterValue,
conductor_utils.node_set_boot_device,
task,
device='fake')
def test_node_set_boot_device_valid(self):
mgr_utils.mock_the_extension_manager(driver="fake_ipmitool")
self.driver = driver_factory.get_driver("fake_ipmitool")
ipmi_info = utils.get_test_ipmi_info()
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake_ipmitool',
driver_info=ipmi_info)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(self.driver.management,
'set_boot_device') as mock_sbd:
conductor_utils.node_set_boot_device(task,
device='pxe')
mock_sbd.assert_called_once_with(task,
device='pxe',
persistent=False)
class NodePowerActionTestCase(base.DbTestCase):
def setUp(self):
super(NodePowerActionTestCase, self).setUp()
mgr_utils.mock_the_extension_manager()
self.driver = driver_factory.get_driver("fake")
def test_node_power_action_power_on(self):
"""Test node_power_action to turn node power on."""
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
power_state=states.POWER_OFF)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(self.driver.power,
'get_power_state') as get_power_mock:
get_power_mock.return_value = states.POWER_OFF
conductor_utils.node_power_action(task, states.POWER_ON)
node.refresh()
get_power_mock.assert_called_once_with(mock.ANY)
self.assertEqual(states.POWER_ON, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNone(node['last_error'])
def test_node_power_action_power_off(self):
"""Test node_power_action to turn node power off."""
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
power_state=states.POWER_ON)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(self.driver.power,
'get_power_state') as get_power_mock:
get_power_mock.return_value = states.POWER_ON
conductor_utils.node_power_action(task, states.POWER_OFF)
node.refresh()
get_power_mock.assert_called_once_with(mock.ANY)
self.assertEqual(states.POWER_OFF, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNone(node['last_error'])
def test_node_power_action_power_reboot(self):
"""Test for reboot a node."""
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
power_state=states.POWER_ON)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(self.driver.power, 'reboot') as reboot_mock:
conductor_utils.node_power_action(task, states.REBOOT)
node.refresh()
reboot_mock.assert_called_once_with(mock.ANY)
self.assertEqual(states.POWER_ON, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNone(node['last_error'])
def test_node_power_action_invalid_state(self):
"""Test for exception when changing to an invalid power state."""
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
power_state=states.POWER_ON)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(self.driver.power,
'get_power_state') as get_power_mock:
get_power_mock.return_value = states.POWER_ON
self.assertRaises(exception.InvalidParameterValue,
conductor_utils.node_power_action,
task,
"INVALID_POWER_STATE")
node.refresh()
get_power_mock.assert_called_once_with(mock.ANY)
self.assertEqual(states.POWER_ON, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNotNone(node['last_error'])
# last_error is cleared when a new transaction happens
conductor_utils.node_power_action(task, states.POWER_OFF)
node.refresh()
self.assertEqual(states.POWER_OFF, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNone(node['last_error'])
def test_node_power_action_already_being_processed(self):
"""Test node power action after aborted power action.
The target_power_state is expected to be None so it isn't
checked in the code. This is what happens if it is not None.
(Eg, if a conductor had died during a previous power-off
attempt and left the target_power_state set to states.POWER_OFF,
and the user is attempting to power-off again.)
"""
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
power_state=states.POWER_ON,
target_power_state=states.POWER_OFF)
task = task_manager.TaskManager(self.context, node.uuid)
conductor_utils.node_power_action(task, states.POWER_OFF)
node.refresh()
self.assertEqual(states.POWER_OFF, node['power_state'])
self.assertEqual(states.NOSTATE, node['target_power_state'])
self.assertIsNone(node['last_error'])
@mock.patch.object(conductor_utils, 'LOG', autospec=True)
def test_node_power_action_in_same_state(self, log_mock):
"""Test setting node state to its present state.
Test that we don't try to set the power state if the requested
state is the same as the current state.
"""
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
last_error='anything but None',
power_state=states.POWER_ON)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(self.driver.power,
'get_power_state') as get_power_mock:
get_power_mock.return_value = states.POWER_ON
with mock.patch.object(self.driver.power,
'set_power_state') as set_power_mock:
conductor_utils.node_power_action(task, states.POWER_ON)
node.refresh()
get_power_mock.assert_called_once_with(mock.ANY)
self.assertFalse(set_power_mock.called,
"set_power_state unexpectedly called")
self.assertEqual(states.POWER_ON, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNone(node['last_error'])
log_mock.warning.assert_called_once_with(
u"Not going to change node %(node)s power state because "
u"current state = requested state = '%(state)s'.",
{'state': states.POWER_ON, 'node': node.uuid})
def test_node_power_action_in_same_state_db_not_in_sync(self):
"""Test setting node state to its present state if DB is out of sync.
Under rare conditions (see bug #1403106) database might contain stale
information, make sure we fix it.
"""
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
last_error='anything but None',
power_state=states.POWER_ON)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(self.driver.power,
'get_power_state') as get_power_mock:
get_power_mock.return_value = states.POWER_OFF
with mock.patch.object(self.driver.power,
'set_power_state') as set_power_mock:
conductor_utils.node_power_action(task, states.POWER_OFF)
node.refresh()
get_power_mock.assert_called_once_with(mock.ANY)
self.assertFalse(set_power_mock.called,
"set_power_state unexpectedly called")
self.assertEqual(states.POWER_OFF, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNone(node['last_error'])
def test_node_power_action_failed_getting_state(self):
"""Test for exception when we can't get the current power state."""
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
power_state=states.POWER_ON)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(self.driver.power,
'get_power_state') as get_power_state_mock:
get_power_state_mock.side_effect = (
exception.InvalidParameterValue('failed getting power state'))
self.assertRaises(exception.InvalidParameterValue,
conductor_utils.node_power_action,
task,
states.POWER_ON)
node.refresh()
get_power_state_mock.assert_called_once_with(mock.ANY)
self.assertEqual(states.POWER_ON, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNotNone(node['last_error'])
def test_node_power_action_set_power_failure(self):
"""Test if an exception is thrown when the set_power call fails."""
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
power_state=states.POWER_OFF)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(self.driver.power,
'get_power_state') as get_power_mock:
with mock.patch.object(self.driver.power,
'set_power_state') as set_power_mock:
get_power_mock.return_value = states.POWER_OFF
set_power_mock.side_effect = exception.IronicException()
self.assertRaises(
exception.IronicException,
conductor_utils.node_power_action,
task,
states.POWER_ON)
node.refresh()
get_power_mock.assert_called_once_with(mock.ANY)
set_power_mock.assert_called_once_with(mock.ANY,
states.POWER_ON)
self.assertEqual(states.POWER_OFF, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNotNone(node['last_error'])
class CleanupAfterTimeoutTestCase(tests_base.TestCase):
def setUp(self):
super(CleanupAfterTimeoutTestCase, self).setUp()
self.task = mock.Mock(spec=task_manager.TaskManager)
self.task.context = self.context
self.task.driver = mock.Mock(spec_set=['deploy'])
self.task.shared = False
self.task.node = mock.Mock(spec_set=objects.Node)
self.node = self.task.node
def test_cleanup_after_timeout(self):
conductor_utils.cleanup_after_timeout(self.task)
self.node.save.assert_called_once_with()
self.task.driver.deploy.clean_up.assert_called_once_with(self.task)
self.assertIn('Timeout reached', self.node.last_error)
def test_cleanup_after_timeout_shared_lock(self):
self.task.shared = True
self.assertRaises(exception.ExclusiveLockRequired,
conductor_utils.cleanup_after_timeout,
self.task)
def test_cleanup_after_timeout_cleanup_ironic_exception(self):
clean_up_mock = self.task.driver.deploy.clean_up
clean_up_mock.side_effect = exception.IronicException('moocow')
conductor_utils.cleanup_after_timeout(self.task)
self.task.driver.deploy.clean_up.assert_called_once_with(self.task)
self.assertEqual([mock.call()] * 2, self.node.save.call_args_list)
self.assertIn('moocow', self.node.last_error)
def test_cleanup_after_timeout_cleanup_random_exception(self):
clean_up_mock = self.task.driver.deploy.clean_up
clean_up_mock.side_effect = Exception('moocow')
conductor_utils.cleanup_after_timeout(self.task)
self.task.driver.deploy.clean_up.assert_called_once_with(self.task)
self.assertEqual([mock.call()] * 2, self.node.save.call_args_list)
self.assertIn('Deploy timed out', self.node.last_error)
class NodeCleaningStepsTestCase(base.DbTestCase):
def setUp(self):
super(NodeCleaningStepsTestCase, self).setUp()
mgr_utils.mock_the_extension_manager()
self.power_update = {
'step': 'update_firmware', 'priority': 10, 'interface': 'power'}
self.deploy_update = {
'step': 'update_firmware', 'priority': 10, 'interface': 'deploy'}
self.deploy_erase = {
'step': 'erase_disks', 'priority': 20, 'interface': 'deploy'}
# Automated cleaning should be executed in this order
self.clean_steps = [self.deploy_erase, self.power_update,
self.deploy_update]
# Manual clean step
self.deploy_raid = {
'step': 'build_raid', 'priority': 0, 'interface': 'deploy',
'argsinfo': {'arg1': {'description': 'desc1', 'required': True},
'arg2': {'description': 'desc2'}}}
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.get_clean_steps')
@mock.patch('ironic.drivers.modules.fake.FakePower.get_clean_steps')
def test__get_cleaning_steps(self, mock_power_steps, mock_deploy_steps):
# Test getting cleaning steps, with one driver returning None, two
# conflicting priorities, and asserting they are ordered properly.
node = obj_utils.create_test_node(
self.context, driver='fake',
provision_state=states.CLEANING,
target_provision_state=states.AVAILABLE)
mock_power_steps.return_value = [self.power_update]
mock_deploy_steps.return_value = [self.deploy_erase,
self.deploy_update]
with task_manager.acquire(
self.context, node.uuid, shared=False) as task:
steps = conductor_utils._get_cleaning_steps(task, enabled=False)
self.assertEqual(self.clean_steps, steps)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.get_clean_steps')
@mock.patch('ironic.drivers.modules.fake.FakePower.get_clean_steps')
def test__get_cleaning_steps_unsorted(self, mock_power_steps,
mock_deploy_steps):
node = obj_utils.create_test_node(
self.context, driver='fake',
provision_state=states.CLEANING,
target_provision_state=states.MANAGEABLE)
mock_deploy_steps.return_value = [self.deploy_raid,
self.deploy_update,
self.deploy_erase]
with task_manager.acquire(
self.context, node.uuid, shared=False) as task:
steps = conductor_utils._get_cleaning_steps(task, enabled=False,
sort=False)
self.assertEqual(mock_deploy_steps.return_value, steps)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.get_clean_steps')
@mock.patch('ironic.drivers.modules.fake.FakePower.get_clean_steps')
def test__get_cleaning_steps_only_enabled(self, mock_power_steps,
mock_deploy_steps):
# Test getting only cleaning steps, with one driver returning None, two
# conflicting priorities, and asserting they are ordered properly.
# Should discard zero-priority (manual) clean step
node = obj_utils.create_test_node(
self.context, driver='fake',
provision_state=states.CLEANING,
target_provision_state=states.AVAILABLE)
mock_power_steps.return_value = [self.power_update]
mock_deploy_steps.return_value = [self.deploy_erase,
self.deploy_update,
self.deploy_raid]
with task_manager.acquire(
self.context, node.uuid, shared=True) as task:
steps = conductor_utils._get_cleaning_steps(task, enabled=True)
self.assertEqual(self.clean_steps, steps)
@mock.patch.object(conductor_utils, '_validate_user_clean_steps')
@mock.patch.object(conductor_utils, '_get_cleaning_steps')
def test_set_node_cleaning_steps_automated(self, mock_steps,
mock_validate_user_steps):
mock_steps.return_value = self.clean_steps
node = obj_utils.create_test_node(
self.context, driver='fake',
provision_state=states.CLEANING,
target_provision_state=states.AVAILABLE,
last_error=None,
clean_step=None)
with task_manager.acquire(
self.context, node.uuid, shared=False) as task:
conductor_utils.set_node_cleaning_steps(task)
node.refresh()
self.assertEqual(self.clean_steps,
node.driver_internal_info['clean_steps'])
self.assertEqual({}, node.clean_step)
mock_steps.assert_called_once_with(task, enabled=True)
self.assertFalse(mock_validate_user_steps.called)
@mock.patch.object(conductor_utils, '_validate_user_clean_steps')
@mock.patch.object(conductor_utils, '_get_cleaning_steps')
def test_set_node_cleaning_steps_manual(self, mock_steps,
mock_validate_user_steps):
clean_steps = [self.deploy_raid]
mock_steps.return_value = self.clean_steps
node = obj_utils.create_test_node(
self.context, driver='fake',
provision_state=states.CLEANING,
target_provision_state=states.MANAGEABLE,
last_error=None,
clean_step=None,
driver_internal_info={'clean_steps': clean_steps})
with task_manager.acquire(
self.context, node.uuid, shared=False) as task:
conductor_utils.set_node_cleaning_steps(task)
node.refresh()
self.assertEqual(clean_steps,
node.driver_internal_info['clean_steps'])
self.assertEqual({}, node.clean_step)
self.assertFalse(mock_steps.called)
mock_validate_user_steps.assert_called_once_with(task, clean_steps)
@mock.patch.object(conductor_utils, '_get_cleaning_steps')
def test__validate_user_clean_steps(self, mock_steps):
node = obj_utils.create_test_node(self.context)
mock_steps.return_value = self.clean_steps
user_steps = [{'step': 'update_firmware', 'interface': 'power'},
{'step': 'erase_disks', 'interface': 'deploy'}]
with task_manager.acquire(self.context, node.uuid) as task:
conductor_utils._validate_user_clean_steps(task, user_steps)
mock_steps.assert_called_once_with(task, enabled=False, sort=False)
@mock.patch.object(conductor_utils, '_get_cleaning_steps')
def test__validate_user_clean_steps_no_steps(self, mock_steps):
node = obj_utils.create_test_node(self.context)
mock_steps.return_value = self.clean_steps
with task_manager.acquire(self.context, node.uuid) as task:
conductor_utils._validate_user_clean_steps(task, [])
mock_steps.assert_called_once_with(task, enabled=False, sort=False)
@mock.patch.object(conductor_utils, '_get_cleaning_steps')
def test__validate_user_clean_steps_get_steps_exception(self, mock_steps):
node = obj_utils.create_test_node(self.context)
mock_steps.side_effect = exception.NodeCleaningFailure('bad')
with task_manager.acquire(self.context, node.uuid) as task:
self.assertRaises(exception.NodeCleaningFailure,
conductor_utils._validate_user_clean_steps,
task, [])
mock_steps.assert_called_once_with(task, enabled=False, sort=False)
@mock.patch.object(conductor_utils, '_get_cleaning_steps')
def test__validate_user_clean_steps_not_supported(self, mock_steps):
node = obj_utils.create_test_node(self.context)
mock_steps.return_value = [self.power_update, self.deploy_raid]
user_steps = [{'step': 'update_firmware', 'interface': 'power'},
{'step': 'bad_step', 'interface': 'deploy'}]
with task_manager.acquire(self.context, node.uuid) as task:
self.assertRaisesRegexp(exception.InvalidParameterValue,
"does not support.*bad_step",
conductor_utils._validate_user_clean_steps,
task, user_steps)
mock_steps.assert_called_once_with(task, enabled=False, sort=False)
@mock.patch.object(conductor_utils, '_get_cleaning_steps')
def test__validate_user_clean_steps_invalid_arg(self, mock_steps):
node = obj_utils.create_test_node(self.context)
mock_steps.return_value = self.clean_steps
user_steps = [{'step': 'update_firmware', 'interface': 'power',
'args': {'arg1': 'val1', 'arg2': 'val2'}},
{'step': 'erase_disks', 'interface': 'deploy'}]
with task_manager.acquire(self.context, node.uuid) as task:
self.assertRaisesRegexp(exception.InvalidParameterValue,
"update_firmware.*invalid.*arg1",
conductor_utils._validate_user_clean_steps,
task, user_steps)
mock_steps.assert_called_once_with(task, enabled=False, sort=False)
@mock.patch.object(conductor_utils, '_get_cleaning_steps')
def test__validate_user_clean_steps_missing_required_arg(self, mock_steps):
node = obj_utils.create_test_node(self.context)
mock_steps.return_value = [self.power_update, self.deploy_raid]
user_steps = [{'step': 'update_firmware', 'interface': 'power'},
{'step': 'build_raid', 'interface': 'deploy'}]
with task_manager.acquire(self.context, node.uuid) as task:
self.assertRaisesRegexp(exception.InvalidParameterValue,
"build_raid.*missing.*arg1",
conductor_utils._validate_user_clean_steps,
task, user_steps)
mock_steps.assert_called_once_with(task, enabled=False, sort=False)
class ErrorHandlersTestCase(tests_base.TestCase):
def setUp(self):
super(ErrorHandlersTestCase, self).setUp()
self.task = mock.Mock(spec=task_manager.TaskManager)
self.task.driver = mock.Mock(spec_set=['deploy'])
self.task.node = mock.Mock(spec_set=objects.Node)
self.node = self.task.node
@mock.patch.object(conductor_utils, 'LOG')
def test_provision_error_handler_no_worker(self, log_mock):
exc = exception.NoFreeConductorWorker()
conductor_utils.provisioning_error_handler(exc, self.node, 'state-one',
'state-two')
self.node.save.assert_called_once_with()
self.assertEqual('state-one', self.node.provision_state)
self.assertEqual('state-two', self.node.target_provision_state)
self.assertIn('No free conductor workers', self.node.last_error)
self.assertTrue(log_mock.warning.called)
@mock.patch.object(conductor_utils, 'LOG')
def test_provision_error_handler_other_error(self, log_mock):
exc = Exception('foo')
conductor_utils.provisioning_error_handler(exc, self.node, 'state-one',
'state-two')
self.assertFalse(self.node.save.called)
self.assertFalse(log_mock.warning.called)
def test_cleaning_error_handler(self):
self.node.provision_state = states.CLEANING
target = 'baz'
self.node.target_provision_state = target
self.node.driver_internal_info = {}
msg = 'error bar'
conductor_utils.cleaning_error_handler(self.task, msg)
self.node.save.assert_called_once_with()
self.assertEqual({}, self.node.clean_step)
self.assertFalse('clean_step_index' in self.node.driver_internal_info)
self.assertEqual(msg, self.node.last_error)
self.assertTrue(self.node.maintenance)
self.assertEqual(msg, self.node.maintenance_reason)
driver = self.task.driver.deploy
driver.tear_down_cleaning.assert_called_once_with(self.task)
self.task.process_event.assert_called_once_with('fail',
target_state=None)
def test_cleaning_error_handler_manual(self):
target = states.MANAGEABLE
self.node.target_provision_state = target
conductor_utils.cleaning_error_handler(self.task, 'foo')
self.task.process_event.assert_called_once_with('fail',
target_state=target)
def test_cleaning_error_handler_no_teardown(self):
target = states.MANAGEABLE
self.node.target_provision_state = target
conductor_utils.cleaning_error_handler(self.task, 'foo',
tear_down_cleaning=False)
self.assertFalse(self.task.driver.deploy.tear_down_cleaning.called)
self.task.process_event.assert_called_once_with('fail',
target_state=target)
def test_cleaning_error_handler_no_fail(self):
conductor_utils.cleaning_error_handler(self.task, 'foo',
set_fail_state=False)
driver = self.task.driver.deploy
driver.tear_down_cleaning.assert_called_once_with(self.task)
self.assertFalse(self.task.process_event.called)
@mock.patch.object(conductor_utils, 'LOG')
def test_cleaning_error_handler_tear_down_error(self, log_mock):
driver = self.task.driver.deploy
driver.tear_down_cleaning.side_effect = Exception('bar')
conductor_utils.cleaning_error_handler(self.task, 'foo')
self.assertTrue(log_mock.exception.called)
@mock.patch.object(conductor_utils, 'LOG')
def test_spawn_cleaning_error_handler_no_worker(self, log_mock):
exc = exception.NoFreeConductorWorker()
conductor_utils.spawn_cleaning_error_handler(exc, self.node)
self.node.save.assert_called_once_with()
self.assertIn('No free conductor workers', self.node.last_error)
self.assertTrue(log_mock.warning.called)
@mock.patch.object(conductor_utils, 'LOG')
def test_spawn_cleaning_error_handler_other_error(self, log_mock):
exc = Exception('foo')
conductor_utils.spawn_cleaning_error_handler(exc, self.node)
self.assertFalse(self.node.save.called)
self.assertFalse(log_mock.warning.called)
@mock.patch.object(conductor_utils, 'LOG')
def test_power_state_error_handler_no_worker(self, log_mock):
exc = exception.NoFreeConductorWorker()
conductor_utils.power_state_error_handler(exc, self.node, 'newstate')
self.node.save.assert_called_once_with()
self.assertEqual('newstate', self.node.power_state)
self.assertEqual(states.NOSTATE, self.node.target_power_state)
self.assertIn('No free conductor workers', self.node.last_error)
self.assertTrue(log_mock.warning.called)
@mock.patch.object(conductor_utils, 'LOG')
def test_power_state_error_handler_other_error(self, log_mock):
exc = Exception('foo')
conductor_utils.power_state_error_handler(exc, self.node, 'foo')
self.assertFalse(self.node.save.called)
self.assertFalse(log_mock.warning.called)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.