gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
#
# wcs.py -- module wrapper for WCS calculations.
#
# Eric Jeschke (eric@naoj.org)
# Takeshi Inagaki
# Bruce Bon
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
"""
We are lucky to have several possible choices for a python WCS package
compatible with Ginga: astlib, kapteyn, starlink and astropy.
kapteyn and astropy wrap Doug Calabretta's "WCSLIB", astLib wraps
Doug Mink's "wcstools", and I'm not sure what starlink uses (their own?).
Note that astlib and starlink require pyfits (or astropy) in order to
create a WCS object from a FITS header.
To force the use of one, do:
from ginga.util import wcs
wcs.use('kapteyn')
before you load any images. Otherwise Ginga will try to pick one for
you.
"""
import math
import re
import numpy
have_pyfits = False
try:
from astropy.io import fits as pyfits
have_pyfits = True
except ImportError:
try:
import pyfits
have_pyfits = True
except ImportError:
pass
coord_types = []
wcs_configured = False
have_kapteyn = False
have_astlib = False
have_pywcs = False
have_astropy = False
have_starlink = False
WCS = None
class WCSError(Exception):
pass
def use(wcspkg, raise_err=True):
global coord_types, wcs_configured, WCS, \
have_kapteyn, kapwcs, \
have_astlib, astWCS, astCoords, \
have_starlink, Ast, Atl, \
have_astropy, pywcs, coordinates, units
if wcspkg == 'kapteyn':
try:
from kapteyn import wcs as kapwcs
coord_types = ['icrs', 'fk5', 'fk4', 'galactic', 'ecliptic']
have_kapteyn = True
wcs_configured = True
WCS = KapteynWCS
return True
except ImportError as e:
if raise_err:
raise
return False
elif wcspkg == 'starlink':
try:
import starlink.Ast as Ast
import starlink.Atl as Atl
coord_types = ['icrs', 'fk5', 'fk4', 'galactic', 'ecliptic']
have_starlink = True
wcs_configured = True
WCS = StarlinkWCS
return True
except ImportError as e:
if raise_err:
raise
return False
elif wcspkg == 'astlib':
try:
if not have_pyfits:
raise WCSError("Need pyfits module to use astLib WCS")
from astLib import astWCS, astCoords
astWCS.NUMPY_MODE = True
coord_types = ['j2000', 'b1950', 'galactic']
have_astlib = True
wcs_configured = True
WCS = AstLibWCS
return True
except ImportError as e:
if raise_err:
raise
return False
elif wcspkg == 'astropy':
# Assume we should have pyfits if we have astropy
#if not have_pyfits:
# raise WCSError("Need pyfits module to use astLib WCS")
try:
import astropy.wcs as pywcs
have_pywcs = True
except ImportError:
try:
import pywcs
have_pywcs = True
except ImportError as e:
if raise_err:
raise
try:
from astropy import coordinates
from astropy import units
have_astropy = True
wcs_configured = True
coord_types = ['icrs', 'fk5', 'fk4', 'galactic']
WCS = AstropyWCS
return True
except ImportError as e:
if raise_err:
raise
return False
elif wcspkg == 'barebones':
coord_types = ['fk5']
WCS = BareBonesWCS
return False
display_types = ['sexagesimal', 'degrees']
class BaseWCS(object):
def get_keyword(self, key):
return self.header[key]
def get_keywords(self, *args):
return map(lambda key: self.header[key], args)
def fix_bad_headers(self):
"""Fix up bad headers that cause problems for WCSLIB.
Subclass can override this method to fix up issues with the
header for problem FITS files.
"""
# WCSLIB doesn't like "nonstandard" units
unit = self.header.get('CUNIT1', 'deg')
if unit.upper() == 'DEGREE':
#self.header.update('CUNIT1', 'deg')
self.header['CUNIT1'] = 'deg'
unit = self.header.get('CUNIT2', 'deg')
if unit.upper() == 'DEGREE':
#self.header.update('CUNIT2', 'deg')
self.header['CUNIT2'] = 'deg'
class AstropyWCS(BaseWCS):
"""A WCS interface for astropy.wcs
You need to install python module 'astropy'
http://pypi.python.org/pypi/astropy
if you want to use this version.
"""
def __init__(self, logger):
super(AstropyWCS, self).__init__()
if not have_astropy:
raise WCSError("Please install module 'astropy' first!")
self.logger = logger
self.header = None
self.wcs = None
self.coordsys = 'raw'
self.coord_table = {
'icrs': coordinates.ICRSCoordinates,
'fk5': coordinates.FK5Coordinates,
'fk4': coordinates.FK4Coordinates,
'galactic': coordinates.GalacticCoordinates,
#'azel': coordinates.HorizontalCoordinates,
}
self.kind = 'astropy/WCSLIB'
def load_header(self, header, fobj=None):
## if isinstance(header, pyfits.Header):
## self.header = header
## else:
## # pywcs only operates on pyfits headers
## self.header = pyfits.Header()
## for kwd in header.keys():
## try:
## bnch = header.get_card(kwd)
## self.header.update(kwd, bnch.value, comment=bnch.comment)
## except Exception, e:
## self.logger.warn("Error setting keyword '%s': %s" % (
## kwd, str(e)))
self.header = {}
# Seems pyfits header objects are not perfectly duck-typed as dicts
#self.header.update(header)
for key, value in header.items():
self.header[key] = value
self.fix_bad_headers()
try:
self.wcs = pywcs.WCS(self.header, fobj=fobj, relax=True)
self.coordsys = choose_coord_system(self.header)
except Exception, e:
self.logger.error("Error making WCS object: %s" % (str(e)))
self.wcs = None
def pixtoradec(self, idxs, coords='data'):
if coords == 'data':
origin = 0
else:
origin = 1
pixcrd = numpy.array([idxs], numpy.float_)
try:
#sky = self.wcs.wcs_pix2sky(pixcrd, origin)
#sky = self.wcs.all_pix2sky(pixcrd, origin)
# astropy only?
sky = self.wcs.all_pix2world(pixcrd, origin)
except Exception, e:
self.logger.error("Error calculating pixtoradec: %s" % (str(e)))
raise WCSError(e)
ra_deg = float(sky[0, 0])
dec_deg = float(sky[0, 1])
return ra_deg, dec_deg
def radectopix(self, ra_deg, dec_deg, coords='data', naxispath=None):
if coords == 'data':
origin = 0
else:
origin = 1
args = [ra_deg, dec_deg]
if naxispath:
args += [0] * len(naxispath)
skycrd = numpy.array([args], numpy.float_)
try:
#pix = self.wcs.wcs_sky2pix(skycrd, origin)
# Doesn't seem to be a all_sky2pix
#pix = self.wcs.all_sky2pix(skycrd, origin)
# astropy only?
pix = self.wcs.wcs_world2pix(skycrd, origin)
except Exception, e:
print ("Error calculating radectopix: %s" % (str(e)))
raise WCSError(e)
x = float(pix[0, 0])
y = float(pix[0, 1])
return (x, y)
def pixtocoords(self, idxs, system=None, coords='data'):
if self.coordsys == 'raw':
raise WCSError("No usable WCS")
if system == None:
system = 'icrs'
# Get a coordinates object based on ra/dec wcs transform
ra_deg, dec_deg = self.pixtoradec(idxs, coords=coords)
self.logger.debug("ra, dec = %f, %f" % (ra_deg, dec_deg))
# convert to astropy coord
try:
fromclass = self.coord_table[self.coordsys]
except KeyError:
raise WCSError("No such coordinate system available: '%s'" % (
self.coordsys))
coord = fromclass(ra_deg, dec_deg,
unit=(units.degree, units.degree))
if (system == None) or (system == self.coordsys):
return coord
# Now give it back to the user in the system requested
try:
toclass = self.coord_table[system]
except KeyError:
raise WCSError("No such coordinate system available: '%s'" % (
system))
coord = coord.transform_to(toclass)
return coord
def _deg(self, coord):
# AstroPy changed the API so now we have to support more
# than one--we don't know what version the user has installed!
if hasattr(coord, 'degrees'):
return coord.degrees
else:
return coord.degree
def pixtosystem(self, idxs, system=None, coords='data'):
c = self.pixtocoords(idxs, system=system, coords=coords)
return (self._deg(c.lonangle), self._deg(c.latangle))
class AstLibWCS(BaseWCS):
"""A WCS interface for astLib.astWCS.WCS
You need to install python module 'astLib'
http://sourceforge.net/projects/astlib
if you want to use this version.
"""
def __init__(self, logger):
super(AstLibWCS, self).__init__()
if not have_astlib:
raise WCSError("Please install package 'astLib' first!")
self.logger = logger
self.header = None
self.wcs = None
self.coordsys = 'raw'
self.kind = 'astlib/wcstools'
def load_header(self, header, fobj=None):
self.header = {}
# Seems pyfits header objects are not perfectly duck-typed as dicts
#self.header.update(header)
for key, value in header.items():
self.header[key] = value
self.fix_bad_headers()
try:
self.wcs = astWCS.WCS(self.header, mode='pyfits')
self.coordsys = self.choose_coord_system(self.header)
except Exception, e:
self.logger.error("Error making WCS object: %s" % (str(e)))
self.wcs = None
def choose_coord_system(self, header):
"""Return an appropriate key code for the axes coordinate system by
examining the FITS header.
"""
try:
ctype = header['CTYPE1'].strip().upper()
except KeyError:
return 'raw'
#raise WCSError("Cannot determine appropriate coordinate system from FITS header")
match = re.match(r'^GLON\-.*$', ctype)
if match:
return 'galactic'
# match = re.match(r'^ELON\-.*$', ctype)
# if match:
# return 'ecliptic'
match = re.match(r'^RA\-\-\-.*$', ctype)
if match:
hdkey = 'RADECSYS'
try:
radecsys = header[hdkey]
except KeyError:
try:
hdkey = 'RADESYS'
radecsys = header[hdkey]
except KeyError:
# missing keyword
# RADESYS defaults to IRCS unless EQUINOX is given
# alone, in which case it defaults to FK4 prior to 1984
# and FK5 after 1984.
try:
equinox = header['EQUINOX']
radecsys = 'FK5'
except KeyError:
radecsys = 'ICRS'
radecsys = radecsys.strip().upper()
if radecsys in ('FK4', ):
return 'b1950'
return 'j2000'
#raise WCSError("Cannot determine appropriate coordinate system from FITS header")
return 'j2000'
def pixtoradec(self, idxs, coords='data'):
if coords == 'fits':
# Via astWCS.NUMPY_MODE, we've forced pixels referenced from 0
idxs = tuple(map(lambda x: x-1, idxs))
try:
ra_deg, dec_deg = self.wcs.pix2wcs(idxs[0], idxs[1])
except Exception, e:
self.logger.error("Error calculating pixtoradec: %s" % (str(e)))
raise WCSError(e)
return ra_deg, dec_deg
def radectopix(self, ra_deg, dec_deg, coords='data', naxispath=None):
try:
x, y = self.wcs.wcs2pix(ra_deg, dec_deg)
except Exception, e:
print ("Error calculating radectopix: %s" % (str(e)))
raise WCSError(e)
if coords == 'fits':
# Via astWCS.NUMPY_MODE, we've forced pixels referenced from 0
x, y = x+1, y+1
return (x, y)
def pixtosystem(self, idxs, system=None, coords='data'):
if self.coordsys == 'raw':
raise WCSError("No usable WCS")
if system == None:
system = 'j2000'
# Get a coordinates object based on ra/dec wcs transform
ra_deg, dec_deg = self.pixtoradec(idxs, coords=coords)
self.logger.debug("ra, dec = %f, %f" % (ra_deg, dec_deg))
# convert to alternate coord
try:
fromsys = self.coordsys.upper()
tosys = system.upper()
if fromsys == 'B1950':
equinox = 1950.0
else:
equinox = 2000.0
lon_deg, lat_deg = astCoords.convertCoords(fromsys, tosys,
ra_deg, dec_deg,
equinox)
except Exception as e:
raise WCSError("Error converting between coordinate systems '%s' and '%s': %s" % (
fromsys, tosys, str(e)))
return (lon_deg, lat_deg)
class KapteynWCS(BaseWCS):
"""A WCS interface for kapteyn.wcs.Projection
You need to install python module 'kapteyn'
http://www.astro.rug.nl/software/kapteyn/
if you want to use this version.
"""
def __init__(self, logger):
super(KapteynWCS, self).__init__()
if not have_kapteyn:
raise WCSError("Please install package 'kapteyn' first!")
self.logger = logger
self.header = None
self.wcs = None
self.coordsys = 'raw'
self.kind = 'kapteyn/WCSLIB'
self._skyout = "equatorial icrs J2000.0"
# see: https://github.com/astropy/coordinates-benchmark/blob/master/kapteyn/convert.py
self.conv_d = dict(fk5='fk5', fk4='fk4,J2000_OBS', icrs='icrs',
galactic='galactic', ecliptic='ecliptic,J2000')
def load_header(self, header, fobj=None):
# For kapteyn, header just needs to be duck-typed like a dict
self.header = {}
# Seems pyfits header objects are not perfectly duck-typed as dicts
#self.header.update(header)
for key, value in header.items():
self.header[key] = value
self.fix_bad_headers()
try:
self.wcs = kapwcs.Projection(self.header,
skyout=self._skyout)
self.coordsys = choose_coord_system(self.header)
except Exception, e:
self.logger.error("Error making WCS object: %s" % (str(e)))
self.wcs = None
def pixtoradec(self, idxs, coords='data'):
# Kapteyn's WCS needs pixels referenced from 1
if coords == 'data':
idxs = tuple(map(lambda x: x+1, idxs))
else:
idxs = tuple(idxs)
#print "indexes=%s" % (str(idxs))
try:
res = self.wcs.toworld(idxs)
ra_deg, dec_deg = res[0], res[1]
except Exception, e:
self.logger.error("Error calculating pixtoradec: %s" % (str(e)))
raise WCSError(e)
return ra_deg, dec_deg
def radectopix(self, ra_deg, dec_deg, coords='data', naxispath=None):
args = [ra_deg, dec_deg]
if naxispath:
args += [0] * len(naxispath)
args = tuple(args)
try:
pix = self.wcs.topixel(args)
except Exception, e:
print ("Error calculating radectopix: %s" % (str(e)))
raise WCSError(e)
if coords == 'data':
# Kapteyn's WCS returns pixels referenced from 1
pix = map(lambda x: x-1, pix)
x, y = pix[0], pix[1]
return (x, y)
def pixtosystem(self, idxs, system=None, coords='data'):
if self.coordsys == 'raw':
raise WCSError("No usable WCS")
if system == None:
system = 'icrs'
# Get a coordinates object based on ra/dec wcs transform
ra_deg, dec_deg = self.pixtoradec(idxs, coords=coords)
self.logger.debug("ra, dec = %f, %f" % (ra_deg, dec_deg))
# convert to alternate coord
spec = self.conv_d[system]
tran = kapwcs.Transformation(self._skyout, spec)
lon_deg, lat_deg = tran((ra_deg, dec_deg))
return lon_deg, lat_deg
class StarlinkWCS(BaseWCS):
"""A WCS interface for Starlink
You need to install python module 'starlink-pyast'
http://www.astro.rug.nl/software/kapteyn/
if you want to use this version.
"""
def __init__(self, logger):
super(StarlinkWCS, self).__init__()
if not have_starlink:
raise WCSError("Please install package 'starlink-pyast' first!")
self.logger = logger
self.header = None
self.wcs = None
self.coordsys = 'raw'
self.kind = 'starlink'
def load_header(self, header, fobj=None):
# For starlink, header is pulled in via pyfits adapter
## hdu = pyfits.PrimaryHDU()
## self.header = hdu.header
## for key, value in header.items():
## self.header[key] = value
self.header = {}
# Seems pyfits header objects are not perfectly duck-typed
# as dicts so we can't use update()
for key, value in header.items():
self.header[key] = value
self.fix_bad_headers()
source = []
for key, value in header.items():
source.append("%-8.8s= %-70.70s" % (key, repr(value)))
# following https://gist.github.com/dsberry/4171277 to get a
# usable WCS in Ast
try:
# read in the header and create the default WCS transform
#adapter = Atl.PyFITSAdapter(hdu)
#fitschan = Ast.FitsChan(adapter)
fitschan = Ast.FitsChan(source)
self.wcs = fitschan.read()
# self.wcs is a FrameSet, with a Mapping
#self.wcs.Report = True
self.coordsys = choose_coord_system(self.header)
# define a transform from this destination frame to icrs/j2000
refframe = self.wcs.getframe(2)
toframe = Ast.SkyFrame("System=ICRS, Equinox=J2000")
self.icrs_trans = refframe.convert(toframe)
except Exception, e:
self.logger.error("Error making WCS object: %s" % (str(e)))
self.wcs = None
def pixtoradec(self, idxs, coords='data'):
# Starlink's WCS needs pixels referenced from 1
if coords == 'data':
idxs = numpy.array(map(lambda x: x+1, idxs))
else:
idxs = numpy.array(idxs)
try:
# pixel to sky coords (in the WCS specified transform)
xs, ys = [idxs[0]], [idxs[1]]
res = self.wcs.tran([ xs, ys ], 1)
ra_rad, dec_rad = res[0][0], res[1][0]
# whatever sky coords to icrs coords
res = self.icrs_trans.tran([[ra_rad], [dec_rad]], 1)
ra_rad, dec_rad = res[0][0], res[1][0]
ra_deg, dec_deg = math.degrees(ra_rad), math.degrees(dec_rad)
#print ra_deg, dec_deg
except Exception, e:
self.logger.error("Error calculating pixtoradec: %s" % (str(e)))
raise WCSError(e)
return ra_deg, dec_deg
def radectopix(self, ra_deg, dec_deg, coords='data', naxispath=None):
try:
# sky coords to pixel (in the WCS specified transform)
ra_rad, dec_rad = math.radians(ra_deg), math.radians(dec_deg)
xs, ys = [ra_rad], [dec_rad]
# 0 as second arg -> inverse transform
res = self.wcs.tran([ xs, ys ], 0)
x, y = res[0][0], res[1][0]
except Exception, e:
print ("Error calculating radectopix: %s" % (str(e)))
raise WCSError(e)
if coords == 'data':
# Starlink's WCS returns pixels referenced from 1
x, y = x-1, y-1
return (x, y)
def pixtosystem(self, idxs, system=None, coords='data'):
if self.coordsys == 'raw':
raise WCSError("No usable WCS")
if system == None:
system = 'icrs'
# define a transform from reference (icrs/j2000) to user's end choice
refframe = self.icrs_trans.getframe(2)
toframe = Ast.SkyFrame("System=%s, Epoch=2000.0" % (system.upper()))
end_trans = refframe.convert(toframe)
# Get a coordinates object based on ra/dec wcs transform
ra_deg, dec_deg = self.pixtoradec(idxs, coords=coords)
self.logger.debug("ra, dec = %f, %f" % (ra_deg, dec_deg))
# convert to alternate coord
ra_rad, dec_rad = math.radians(ra_deg), math.radians(dec_deg)
res = end_trans.tran([[ra_rad], [dec_rad]], 1)
lon_rad, lat_rad = res[0][0], res[1][0]
lon_deg, lat_deg = math.degrees(lon_rad), math.degrees(lat_rad)
return lon_deg, lat_deg
class BareBonesWCS(BaseWCS):
"""A very basic WCS. Assumes J2000, units in degrees, projection TAN.
***** NOTE *****:
We strongly recommend that you install one of the 3rd party python
WCS modules referred to at the top of this module, all of which are
much more capable than BareBonesWCS.
****************
"""
def __init__(self, logger):
super(BareBonesWCS, self).__init__()
self.logger = logger
self.header = {}
self.coordsys = 'raw'
self.kind = 'barebones'
def load_header(self, header, fobj=None):
self.header = {}
for key, value in header.items():
self.header[key] = value
self.fix_bad_headers()
self.coordsys = choose_coord_system(self.header)
# WCS calculations
def get_reference_pixel(self):
x = float(self.get_keyword('CRPIX1'))
y = float(self.get_keyword('CRPIX2'))
return x, y
def get_physical_reference_pixel(self):
xv = float(self.get_keyword('CRVAL1'))
yv = float(self.get_keyword('CRVAL2'))
assert 0.0 <= xv < 360.0, \
WCSError("CRVAL1 out of range: %f" % (xv))
assert -90.0 <= yv <= 90.0, \
WCSError("CRVAL2 out of range: %f" % (yv))
return xv, yv
def get_pixel_coordinates(self):
try:
cd11 = float(self.get_keyword('CD1_1'))
cd12 = float(self.get_keyword('CD1_2'))
cd21 = float(self.get_keyword('CD2_1'))
cd22 = float(self.get_keyword('CD2_2'))
except Exception as e:
cdelt1 = float(self.get_keyword('CDELT1'))
cdelt2 = float(self.get_keyword('CDELT2'))
try:
cd11 = float(self.get_keyword('PC1_1')) * cdelt1
cd12 = float(self.get_keyword('PC1_2')) * cdelt1
cd21 = float(self.get_keyword('PC2_1')) * cdelt2
cd22 = float(self.get_keyword('PC2_2')) * cdelt2
except KeyError:
cd11 = float(self.get_keyword('PC001001')) * cdelt1
cd12 = float(self.get_keyword('PC001002')) * cdelt1
cd21 = float(self.get_keyword('PC002001')) * cdelt2
cd22 = float(self.get_keyword('PC002002')) * cdelt2
return (cd11, cd12, cd21, cd22)
def pixtoradec(self, idxs, coords='data'):
"""Convert a (x, y) pixel coordinate on the image to a (ra, dec)
coordinate in space.
Parameter (coords):
- if 'data' then x, y coordinates are interpreted as 0-based
- otherwise coordinates are interpreted as 1-based (traditional FITS)
"""
x, y = idxs[:2]
# account for DATA->FITS coordinate space
if coords == 'data':
x, y = x + 1, y + 1
crpix1, crpix2 = self.get_reference_pixel()
crval1, crval2 = self.get_physical_reference_pixel()
cd11, cd12, cd21, cd22 = self.get_pixel_coordinates()
ra_deg = (cd11 * (x - crpix1) + cd12 *
(y - crpix2)) / math.cos(math.radians(crval2)) + crval1
dec_deg = cd21 * (x - crpix1) + cd22 * (y - crpix2) + crval2
return ra_deg, dec_deg
def radectopix(self, ra_deg, dec_deg, coords='data', naxispath=None):
"""Convert a (ra_deg, dec_deg) space coordinates to (x, y) pixel
coordinates on the image. ra and dec are expected as floats in
degrees.
Parameter (coords):
- if 'data' then x, y coordinates are returned as 0-based
- otherwise coordinates are returned as 1-based (traditional FITS)
"""
crpix1, crpix2 = self.get_reference_pixel()
crval1, crval2 = self.get_physical_reference_pixel()
cd11, cd12, cd21, cd22 = self.get_pixel_coordinates()
# reverse matrix
rmatrix = (cd11 * cd22) - (cd12 * cd21)
if not cmp(rmatrix, 0.0):
raise WCSError("WCS Matrix Error: check values")
# Adjust RA as necessary
if (ra_deg - crval1) > 180.0:
ra_deg -= 360.0
elif (ra_deg - crval1) < -180.0:
ra_deg += 360.0
try:
x = (cd22 * math.cos(crval2 * math.pi/180.0) *
(ra_deg - crval1) - cd12 *
(dec_deg - crval2))/rmatrix + crpix1
y = (cd11 * (dec_deg - crval2) - cd21 *
math.cos(crval2 * math.pi/180.0) *
(ra_deg - crval1))/rmatrix + crpix2
except Exception, e:
raise WCSError("radectopix calculation error: %s" % str(e))
# account for FITS->DATA space
if coords == 'data':
x, y = x - 1, y - 1
return (x, y)
def pixtocoords(self, idxs, system='icrs', coords='data'):
return None
class WcslibWCS(AstropyWCS):
"""DO NOT USE--this class name to be deprecated."""
pass
# HELP FUNCTIONS
def choose_coord_units(header):
"""Return the appropriate key code for the units value for the axes by
examining the FITS header.
"""
cunit = header['CUNIT1']
match = re.match(r'^deg\s*$', cunit)
if match:
return 'degree'
#raise WCSError("Don't understand units '%s'" % (cunit))
return 'degree'
def choose_coord_system(header):
"""Return an appropriate key code for the axes coordinate system by
examining the FITS header.
"""
try:
ctype = header['CTYPE1'].strip().upper()
except KeyError:
return 'raw'
#raise WCSError("Cannot determine appropriate coordinate system from FITS header")
match = re.match(r'^GLON\-.*$', ctype)
if match:
return 'galactic'
match = re.match(r'^ELON\-.*$', ctype)
if match:
return 'ecliptic'
match = re.match(r'^RA\-\-\-.*$', ctype)
if match:
hdkey = 'RADECSYS'
try:
radecsys = header[hdkey]
except KeyError:
try:
hdkey = 'RADESYS'
radecsys = header[hdkey]
except KeyError:
# missing keyword
# RADESYS defaults to IRCS unless EQUINOX is given
# alone, in which case it defaults to FK4 prior to 1984
# and FK5 after 1984.
try:
# EQUINOX defaults to 2000 unless RADESYS is FK4,
# in which case it defaults to 1950.
equinox = header['EQUINOX']
radecsys = 'FK5'
except KeyError:
radecsys = 'ICRS'
radecsys = radecsys.strip()
return radecsys.lower()
#raise WCSError("Cannot determine appropriate coordinate system from FITS header")
return 'icrs'
def simple_wcs(px_x, px_y, ra_deg, dec_deg, px_scale_deg_px, pa_deg):
"""Calculate a set of WCS keywords for a 2D simple instrument FITS
file with a 'standard' RA/DEC pixel projection.
Parameters:
px_x : reference pixel of field in X (usually center of field)
px_y : reference pixel of field in Y (usually center of field)
ra_deg : RA (in deg) for the reference point
dec_deg : DEC (in deg) for the reference point
px_scale_deg_px : pixel scale deg/pixel
pa_deg : position angle of the instrument (in deg)
Returns a WCS object. Use the to_header() method on it to get something
interesting that you can use.
"""
wcsobj = pywcs.WCS()
# center of the projection
wcsobj.wcs.crpix = [px_x, px_y] # pixel position
wcsobj.wcs.crval = [ra_deg, dec_deg] # RA, Dec (degrees)
# image scale in deg/pix
wcsobj.wcs.cdelt = numpy.array([-1, 1]) * px_scale_deg_px
# Position angle of north (radians E of N)
pa = numpy.radians(pa_deg)
cpa = numpy.cos(pa)
spa = numpy.sin(pa)
#wcsobj.wcs.pc = numpy.array([[-cpa, -spa], [-spa, cpa]])
wcsobj.wcs.pc = numpy.array([[cpa, -spa], [spa, cpa]])
return wcsobj
degPerHMSHour = 15.0 #360/24
degPerHMSMin = 0.25 #360.0/24/60
degPerHMSSec = 1.0/240.0 #360.0/24/60/60
degPerDmsMin = 1.0/60.0
degPerDmsSec = 1.0/3600.0
HMSHourPerDeg = 1.0/15.0
HMSMinPerDeg = 4.0
HMSSecPerDeg = 240.0
def hmsToDeg(h, m, s):
"""Convert RA hours, minutes, seconds into an angle in degrees."""
return h * degPerHMSHour + m * degPerHMSMin + s * degPerHMSSec
def dmsToDeg(sign, deg, min, sec):
"""Convert dec sign, degrees, minutes, seconds into a signed angle in degrees."""
return sign * (deg + min * degPerDmsMin + sec * degPerDmsSec)
def decTimeToDeg(sign_sym, deg, min, sec):
"""Convert dec sign, degrees, minutes, seconds into a signed angle in degrees.
sign_sym may represent negative as either '-' or numeric -1."""
if sign_sym == -1 or sign_sym == '-':
sign = -1
else:
sign = 1
return dmsToDeg(sign, deg, min, sec)
def degToHms(ra):
"""Converts the ra (in degrees) to HMS three tuple.
H and M are in integer and the S part is in float.
"""
assert (ra >= 0.0), WCSError("RA (%f) is negative" % (ra))
assert ra < 360.0, WCSError("RA (%f) > 360.0" % (ra))
rah = ra / degPerHMSHour
ramin = (ra % degPerHMSHour) * HMSMinPerDeg
rasec = (ra % degPerHMSMin) * HMSSecPerDeg
return (int(rah), int(ramin), rasec)
def degToDms(dec, isLatitude=True):
"""Convert the dec, in degrees, to an (sign,D,M,S) tuple.
D and M are integer, and sign and S are float.
"""
if isLatitude:
assert dec <= 90, WCSError("DEC (%f) > 90.0" % (dec))
assert dec >= -90, WCSError("DEC (%f) < -90.0" % (dec))
if dec < 0.0:
sign = -1.0
else:
sign = 1.0
dec = dec * sign
#mnt = (dec % 1.0) * 60.0
#sec = (dec % (1.0/60.0)) * 3600.0
# this calculation with return values produces conversion problem.
# e.g. dec +311600.00 -> 31.2666666667 degree
# deg=31 min=15 sec=60 instead deg=31 min=16 sec=0.0
# bug fixed
mnt, sec = divmod(dec*3600, 60)
deg, mnt = divmod(mnt, 60)
return (int(sign), int(deg), int(mnt), sec)
def arcsecToDeg(arcsec):
"""Convert numeric arcseconds (aka DMS seconds) to degrees of arc.
"""
return arcsec * degPerDmsSec
def hmsStrToDeg(ra):
"""Convert a string representation of RA into a float in degrees."""
hour, min, sec = ra.split(':')
ra_deg = hmsToDeg(int(hour), int(min), float(sec))
return ra_deg
def dmsStrToDeg(dec):
"""Convert a string representation of DEC into a float in degrees."""
sign_deg, min, sec = dec.split(':')
sign = sign_deg[0:1]
deg = sign_deg[1:]
dec_deg = decTimeToDeg(sign, int(deg), int(min), float(sec))
return dec_deg
def raDegToString(ra_deg, format='%02d:%02d:%06.3f'):
if ra_deg > 360.0:
ra_deg = math.fmod(ra_deg, 360.0)
ra_hour, ra_min, ra_sec = degToHms(ra_deg)
return format % (ra_hour, ra_min, ra_sec)
def decDegToString(dec_deg, format='%s%02d:%02d:%05.2f'):
sign, dec_degree, dec_min, dec_sec = degToDms(dec_deg)
if sign > 0:
sign_sym = '+'
else:
sign_sym = '-'
return format % (sign_sym, int(dec_degree), int(dec_min), dec_sec)
# this function is provided by MOKA2 Development Team (1996.xx.xx)
# and used in SOSS system
def trans_coeff (eq, x, y, z):
tt = (eq - 2000.0) / 100.0
zeta = 2306.2181*tt+0.30188*tt*tt+0.017998*tt*tt*tt
zetto = 2306.2181*tt+1.09468*tt*tt+0.018203*tt*tt*tt
theta = 2004.3109*tt-0.42665*tt*tt-0.041833*tt*tt*tt
zeta = math.radians(zeta) / 3600.0
zetto = math.radians(zetto) / 3600.0
theta = math.radians(theta) / 3600.0
p11 = math.cos(zeta)*math.cos(theta)*math.cos(zetto)-math.sin(zeta)*math.sin(zetto)
p12 = -math.sin(zeta)*math.cos(theta)*math.cos(zetto)-math.cos(zeta)*math.sin(zetto)
p13 = -math.sin(theta)*math.cos(zetto)
p21 = math.cos(zeta)*math.cos(theta)*math.sin(zetto)+math.sin(zeta)*math.cos(zetto)
p22 = -math.sin(zeta)*math.cos(theta)*math.sin(zetto)+math.cos(zeta)*math.cos(zetto)
p23 = -math.sin(theta)*math.sin(zetto)
p31 = math.cos(zeta)*math.sin(theta)
p32 = -math.sin(zeta)*math.sin(theta)
p33 = math.cos(theta)
return (p11,p12,p13, p21, p22, p23, p31,p32, p33)
def eqToEq2000(ra_deg, dec_deg, eq):
ra_rad = math.radians(ra_deg)
dec_rad = math.radians(dec_deg)
x = math.cos(dec_rad) * math.cos(ra_rad)
y = math.cos(dec_rad) * math.sin(ra_rad)
z = math.sin(dec_rad)
p11, p12, p13, p21, p22, p23, p31, p32, p33 = trans_coeff (eq, x, y, z)
x0 = p11*x + p21*y + p31*z
y0 = p12*x + p22*y + p32*z
z0 = p13*x + p23*y + p33*z
new_dec = math.asin(z0)
if x0 == 0.0:
new_ra = math.pi / 2.0
else:
new_ra = math.atan( y0/x0 )
if ((y0*math.cos(new_dec) > 0.0 and x0*math.cos(new_dec) <= 0.0) or
(y0*math.cos(new_dec) <= 0.0 and x0*math.cos(new_dec) < 0.0) ):
new_ra += math.pi
elif new_ra < 0.0:
new_ra += 2.0*math.pi
#new_ra = new_ra * 12.0 * 3600.0 / math.pi
new_ra_deg = new_ra * 12.0 / math.pi * 15.0
#new_dec = new_dec * 180.0 * 3600.0 / math.pi
new_dec_deg = new_dec * 180.0 / math.pi
return (new_ra_deg, new_dec_deg)
def get_rotation_and_scale(header):
"""
CREDIT: See IDL code at
# http://www.astro.washington.edu/docs/idl/cgi-bin/getpro/library32.html?GETROT
"""
# TODO: need to do the right thing if only PC?_? and CDELT?
# keywords are given
#
cd1_1 = header['CD1_1']
cd1_2 = header['CD1_2']
cd2_1 = header['CD2_1']
cd2_2 = header['CD2_2']
try:
# Image has plate scale keywords?
cdelt1 = header['CDELT1']
cdelt2 = header['CDELT2']
s = float(cdelt1) / float(cdelt2)
xrot = math.atan2(cd2_1*s, cd1_1)
yrot = math.atan2(-cd1_2/s, cd2_2)
except KeyError:
# No, calculate them
det = cd1_1*cd2_2 - cd1_2*cd2_1
if det < 0:
sgn = -1
else:
sgn = 1
## if det > 0:
## print 'WARNING - Astrometry is for a right-handed coordinate system'
if (cd2_1 == 0.0) or (cd1_2 == 0.0):
# Unrotated coordinates?
xrot = 0.0
yrot = 0.0
cdelt1 = cd1_1
cdelt2 = cd2_2
else:
cdelt1 = sgn * math.sqrt(cd1_1**2 + cd2_1**2)
cdelt2 = math.sqrt(cd1_2**2 + cd2_2**2)
if cdelt1 > 0:
sgn1 = 1
else:
sgn1 = -1
xrot = math.atan2(-cd2_1, sgn1*cd1_1)
yrot = math.atan2(sgn1*cd1_2, cd2_2)
xrot, yrot = math.degrees(xrot), math.degrees(yrot)
if xrot != yrot:
print 'X axis rotation: %f Y axis rotation: %f' % (
math.degrees(xrot), math.degrees(yrot))
rot = (xrot + yrot) / 2.0
else:
rot = xrot
cdelt1, cdelt2 = math.degrees(cdelt1), math.degrees(cdelt2)
return (rot, cdelt1, cdelt2)
class WcsMatch(object):
"""
CREDIT: Code modified from
http://www.astropython.org/snippet/2011/1/Fix-the-WCS-for-a-FITS-image-file
"""
def __init__(self, header, wcsClass, xy_coords, ref_coords):
# Image
self.hdr = header
from ginga.misc.log import NullLogger
self.wcs = wcsClass(NullLogger())
self.wcs.load_header(self.hdr)
# Reference (correct) source positions in RA, Dec
self.ref_coords = numpy.array(ref_coords)
# Get source pixel positions from reference coords
#xy_coords = map(lambda args: self.wcs.radectopix(*args), img_coords)
self.pix0 = numpy.array(xy_coords).flatten()
# Copy the original WCS CRVAL and CD values
self.has_cd = False
self.crval = numpy.array(self.wcs.get_keywords('CRVAL1', 'CRVAL2'))
try:
cd = numpy.array(self.wcs.get_keywords('CD1_1', 'CD1_2',
'CD2_1', 'CD2_2'))
self.cd = cd.reshape((2, 2))
self.has_cd = True
except KeyError:
cd = numpy.array(self.wcs.get_keywords('PC1_1', 'PC1_2',
'PC2_1', 'PC2_2'))
self.cd = cd.reshape((2, 2))
def rotate(self, degs):
rads = numpy.radians(degs)
s = numpy.sin(rads)
c = numpy.cos(rads)
return numpy.array([[c, -s],
[s, c]])
def calc_pix(self, pars):
"""For the given d_ra, d_dec, and d_theta pars, update the WCS
transformation and calculate the new pixel coordinates for each
reference source position.
"""
# calculate updated ra/dec and rotation
d_ra, d_dec, d_theta = pars
crval = self.crval + numpy.array([d_ra, d_dec]) / 3600.0
cd = numpy.dot(self.rotate(d_theta), self.cd)
# temporarily assign to the WCS
d = self.hdr
d.update(dict(CRVAL1=crval[0], CRVAL2=crval[1]))
if self.has_cd:
d.update(dict(CD1_1=cd[0,0], CD1_2=cd[0,1], CD2_1=cd[1,0], CD2_2=cd[1,1]))
else:
d.update(dict(PC1_1=cd[0,0], PC1_2=cd[0,1], PC2_1=cd[1,0], PC2_2=cd[1,1]))
self.wcs.load_header(self.hdr)
# calculate the new pixel values based on this wcs
pix = numpy.array(map(lambda args: self.wcs.radectopix(*args),
self.ref_coords)).flatten()
#print 'pix =', pix
#print 'pix0 =', self.pix0
return pix
def calc_resid2(self, pars):
"""Return the squared sum of the residual difference between the
original pixel coordinates and the new pixel coords (given offset
specified in ``pars``)
This gets called by the scipy.optimize.fmin function.
"""
pix = self.calc_pix(pars)
resid2 = numpy.sum((self.pix0 - pix) ** 2) # assumes uniform errors
#print 'resid2 =', resid2
return resid2
def calc_match(self):
from scipy.optimize import fmin
x0 = numpy.array([0.0, 0.0, 0.0])
d_ra, d_dec, d_theta = fmin(self.calc_resid2, x0)
crval = self.crval + numpy.array([d_ra, d_dec]) / 3600.0
cd = numpy.dot(self.rotate(d_theta), self.cd)
d = self.hdr
d.update(dict(CRVAL1=crval[0], CRVAL2=crval[1]))
if self.has_cd:
d.update(dict(CD1_1=cd[0,0], CD1_2=cd[0,1], CD2_1=cd[1,0], CD2_2=cd[1,1]))
else:
d.update(dict(PC1_1=cd[0,0], PC1_2=cd[0,1], PC2_1=cd[1,0], PC2_2=cd[1,1]))
self.wcs.load_header(self.hdr)
# return delta ra/dec and delta rotation
return (d_ra, d_dec, d_theta)
# default
WCS = BareBonesWCS
# try to use them in this order
for name in ('kapteyn', 'starlink', 'pyast', 'astropy'):
if use(name, raise_err=False):
break
#END
| |
"""
Does the following:
1. Generates and saves random secret key
2. Removes the taskapp if celery isn't going to be used
3. Removes the .idea directory if PyCharm isn't going to be used
4. Copy files from /docs/ to {{ cookiecutter.project_slug }}/docs/
TODO: this might have to be moved to a pre_gen_hook
A portion of this code was adopted from Django's standard crypto functions and
utilities, specifically:
https://github.com/django/django/blob/master/django/utils/crypto.py
"""
from __future__ import print_function
import os
import random
import shutil
# Get the root project directory
PROJECT_DIRECTORY = os.path.realpath(os.path.curdir)
# Use the system PRNG if possible
try:
random = random.SystemRandom()
using_sysrandom = True
except NotImplementedError:
using_sysrandom = False
def get_random_string(
length=50,
allowed_chars='abcdefghijklmnopqrstuvwxyz0123456789!@#%^&*(-_=+)'):
"""
Returns a securely generated random string.
The default length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits
"""
if using_sysrandom:
return ''.join(random.choice(allowed_chars) for i in range(length))
print(
"Cookiecutter Django couldn't find a secure pseudo-random number generator on your system."
" Please change change your SECRET_KEY variables in conf/settings/local.py and env.example"
" manually."
)
return "CHANGEME!!"
def set_secret_key(setting_file_location):
# Open locals.py
with open(setting_file_location) as f:
file_ = f.read()
# Generate a SECRET_KEY that matches the Django standard
SECRET_KEY = get_random_string()
# Replace "CHANGEME!!!" with SECRET_KEY
file_ = file_.replace('CHANGEME!!!', SECRET_KEY, 1)
# Write the results to the locals.py module
with open(setting_file_location, 'w') as f:
f.write(file_)
def make_secret_key(project_directory):
"""Generates and saves random secret key"""
# Determine the local_setting_file_location
local_setting = os.path.join(
project_directory,
'config/settings/local.py'
)
# local.py settings file
set_secret_key(local_setting)
env_file = os.path.join(
project_directory,
'env.example'
)
# env.example file
set_secret_key(env_file)
def remove_file(file_name):
if os.path.exists(file_name):
os.remove(file_name)
def remove_task_app(project_directory):
"""Removes the taskapp if celery isn't going to be used"""
# Determine the local_setting_file_location
task_app_location = os.path.join(
PROJECT_DIRECTORY,
'{{ cookiecutter.project_slug }}/taskapp'
)
shutil.rmtree(task_app_location)
def remove_pycharm_dir(project_directory):
"""
Removes directories related to PyCharm
if it isn't going to be used
"""
idea_dir_location = os.path.join(PROJECT_DIRECTORY, '.idea/')
if os.path.exists(idea_dir_location):
shutil.rmtree(idea_dir_location)
docs_dir_location = os.path.join(PROJECT_DIRECTORY, 'docs/pycharm/')
if os.path.exists(docs_dir_location):
shutil.rmtree(docs_dir_location)
def remove_heroku_files():
"""
Removes files needed for heroku if it isn't going to be used
"""
for filename in ["app.json", "Procfile", "requirements.txt", "runtime.txt"]:
file_name = os.path.join(PROJECT_DIRECTORY, filename)
remove_file(file_name)
def remove_docker_files():
"""
Removes files needed for docker if it isn't going to be used
"""
for filename in ["dev.yml", "docker-compose.yml", ".dockerignore"]:
os.remove(os.path.join(
PROJECT_DIRECTORY, filename
))
shutil.rmtree(os.path.join(
PROJECT_DIRECTORY, "compose"
))
def remove_grunt_files():
"""
Removes files needed for grunt if it isn't going to be used
"""
for filename in ["Gruntfile.js"]:
os.remove(os.path.join(
PROJECT_DIRECTORY, filename
))
def remove_gulp_files():
"""
Removes files needed for grunt if it isn't going to be used
"""
for filename in ["gulpfile.js"]:
os.remove(os.path.join(
PROJECT_DIRECTORY, filename
))
def remove_packageJSON_file():
"""
Removes files needed for grunt if it isn't going to be used
"""
for filename in ["package.json"]:
os.remove(os.path.join(
PROJECT_DIRECTORY, filename
))
def remove_certbot_files():
"""
Removes files needed for certbot if it isn't going to be used
"""
nginx_dir_location = os.path.join(PROJECT_DIRECTORY, 'compose/nginx')
for filename in ["nginx-secure.conf", "start.sh", "dhparams.example.pem"]:
file_name = os.path.join(nginx_dir_location, filename)
remove_file(file_name)
def remove_copying_files():
"""
Removes files needed for the GPLv3 licence if it isn't going to be used
"""
for filename in ["COPYING"]:
os.remove(os.path.join(
PROJECT_DIRECTORY, filename
))
# IN PROGRESS
# def copy_doc_files(project_directory):
# cookiecutters_dir = DEFAULT_CONFIG['cookiecutters_dir']
# cookiecutter_django_dir = os.path.join(
# cookiecutters_dir,
# 'cookiecutter-django',
# 'docs'
# )
# target_dir = os.path.join(
# project_directory,
# 'docs'
# )
# for name in os.listdir(cookiecutter_django_dir):
# if name.endswith('.rst') and not name.startswith('index'):
# src = os.path.join(cookiecutter_django_dir, name)
# dst = os.path.join(target_dir, name)
# shutil.copyfile(src, dst)
# 1. Generates and saves random secret key
make_secret_key(PROJECT_DIRECTORY)
# 2. Removes the taskapp if celery isn't going to be used
if '{{ cookiecutter.use_celery }}'.lower() == 'n':
remove_task_app(PROJECT_DIRECTORY)
# 3. Removes the .idea directory if PyCharm isn't going to be used
if '{{ cookiecutter.use_pycharm }}'.lower() != 'y':
remove_pycharm_dir(PROJECT_DIRECTORY)
# 4. Removes all heroku files if it isn't going to be used
if '{{ cookiecutter.use_heroku }}'.lower() != 'y':
remove_heroku_files()
# 5. Removes all docker files if it isn't going to be used
if '{{ cookiecutter.use_docker }}'.lower() != 'y':
remove_docker_files()
# 6. Removes all JS task manager files if it isn't going to be used
if '{{ cookiecutter.js_task_runner}}'.lower() == 'gulp':
remove_grunt_files()
elif '{{ cookiecutter.js_task_runner}}'.lower() == 'grunt':
remove_gulp_files()
else:
remove_gulp_files()
remove_grunt_files()
remove_packageJSON_file()
# 7. Removes all certbot/letsencrypt files if it isn't going to be used
if '{{ cookiecutter.use_lets_encrypt }}'.lower() != 'y':
remove_certbot_files()
# 8. Display a warning if use_docker and use_grunt are selected. Grunt isn't
# supported by our docker config atm.
if '{{ cookiecutter.js_task_runner }}'.lower() in ['grunt', 'gulp'] and '{{ cookiecutter.use_docker }}'.lower() == 'y':
print(
"You selected to use docker and a JS task runner. This is NOT supported out of the box for now. You "
"can continue to use the project like you normally would, but you will need to add a "
"js task runner service to your docker configuration manually."
)
# 9. Removes the certbot/letsencrypt files and display a warning if use_lets_encrypt is selected and use_docker isn't.
if '{{ cookiecutter.use_lets_encrypt }}'.lower() == 'y' and '{{ cookiecutter.use_docker }}'.lower() != 'y':
remove_certbot_files()
print(
"You selected to use Let's Encrypt and didn't select to use docker. This is NOT supported out of the box for now. You "
"can continue to use the project like you normally would, but Let's Encrypt files have been included."
)
# 10. Directs the user to the documentation if certbot and docker are selected.
if '{{ cookiecutter.use_lets_encrypt }}'.lower() == 'y' and '{{ cookiecutter.use_docker }}'.lower() == 'y':
print(
"You selected to use Let's Encrypt, please see the documentation for instructions on how to use this in production. "
"You must generate a dhparams.pem file before running docker-compose in a production environment."
)
# 11. Removes files needed for the GPLv3 licence if it isn't going to be used.
if '{{ cookiecutter.open_source_license}}' != 'GPLv3':
remove_copying_files()
# 4. Copy files from /docs/ to {{ cookiecutter.project_slug }}/docs/
# copy_doc_files(PROJECT_DIRECTORY)
| |
import contextlib
import functools
import socket
import ssl
import tempfile
import time
from typing import (
Any,
Callable,
Container,
Dict,
Generic,
Hashable,
Iterable,
Iterator,
List,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
cast,
)
import pytest
from . import basecontrollers, client_mock, patma, runner, tls
from .authentication import Authentication
from .basecontrollers import TestCaseControllerConfig
from .exceptions import ConnectionClosed
from .irc_utils import capabilities, message_parser
from .irc_utils.message_parser import Message
from .irc_utils.sasl import sasl_plain_blob
from .numerics import (
ERR_BADCHANNELKEY,
ERR_BANNEDFROMCHAN,
ERR_INVITEONLYCHAN,
ERR_NEEDREGGEDNICK,
ERR_NOSUCHCHANNEL,
ERR_TOOMANYCHANNELS,
RPL_HELLO,
)
from .specifications import Capabilities, IsupportTokens, Specifications
__tracebackhide__ = True # Hide from pytest tracebacks on test failure.
CHANNEL_JOIN_FAIL_NUMERICS = frozenset(
[
ERR_NOSUCHCHANNEL,
ERR_TOOMANYCHANNELS,
ERR_BADCHANNELKEY,
ERR_INVITEONLYCHAN,
ERR_BANNEDFROMCHAN,
ERR_NEEDREGGEDNICK,
]
)
# typevar for decorators
TCallable = TypeVar("TCallable", bound=Callable)
TClass = TypeVar("TClass", bound=Type)
# typevar for the client name used by tests (usually int or str)
TClientName = TypeVar("TClientName", bound=Union[Hashable, int])
TController = TypeVar("TController", bound=basecontrollers._BaseController)
# general-purpose typevar
T = TypeVar("T")
class ChannelJoinException(Exception):
def __init__(self, code: str, params: List[str]):
super().__init__(f"Failed to join channel ({code}): {params}")
self.code = code
self.params = params
class _IrcTestCase(Generic[TController]):
"""Base class for test cases.
It implements various `assert*` method that look like unittest's,
but is actually based on the `assert` statement so derived classes are
pytest-style rather than unittest-style.
It also calls setUp() and tearDown() like unittest would."""
# Will be set by __main__.py
controllerClass: Type[TController]
show_io: bool
controller: TController
__new__ = object.__new__ # pytest won't collect Generic subclasses otherwise
@staticmethod
def config() -> TestCaseControllerConfig:
"""Some configuration to pass to the controllers.
For example, Oragono only enables its MySQL support if
config()["chathistory"]=True.
"""
return TestCaseControllerConfig()
def setUp(self) -> None:
if self.controllerClass is not None:
self.controller = self.controllerClass(self.config())
if self.show_io:
print("---- new test ----")
def tearDown(self) -> None:
pass
def setup_method(self, method: Callable) -> None:
self.setUp()
def teardown_method(self, method: Callable) -> None:
self.tearDown()
def assertMessageMatch(self, msg: Message, **kwargs: Any) -> None:
"""Helper for partially comparing a message.
Takes the message as first arguments, and comparisons to be made
as keyword arguments.
Uses patma.match_list on the params argument.
"""
error = self.messageDiffers(msg, **kwargs)
if error:
raise AssertionError(error)
def messageEqual(self, msg: Message, **kwargs: Any) -> bool:
"""Boolean negation of `messageDiffers` (returns a boolean,
not an optional string)."""
return not self.messageDiffers(msg, **kwargs)
def messageDiffers(
self,
msg: Message,
params: Optional[List[Union[str, None, patma.Operator]]] = None,
target: Optional[str] = None,
tags: Optional[
Dict[Union[str, patma.Operator], Union[str, patma.Operator, None]]
] = None,
nick: Optional[str] = None,
fail_msg: Optional[str] = None,
extra_format: Tuple = (),
**kwargs: Any,
) -> Optional[str]:
"""Returns an error message if the message doesn't match the given arguments,
or None if it matches."""
for (key, value) in kwargs.items():
if getattr(msg, key) != value:
fail_msg = (
fail_msg or "expected {param} to be {expects}, got {got}: {msg}"
)
return fail_msg.format(
*extra_format,
got=getattr(msg, key),
expects=value,
param=key,
msg=msg,
)
if params and not patma.match_list(list(msg.params), params):
fail_msg = (
fail_msg or "expected params to match {expects}, got {got}: {msg}"
)
return fail_msg.format(
*extra_format, got=msg.params, expects=params, msg=msg
)
if tags and not patma.match_dict(msg.tags, tags):
fail_msg = fail_msg or "expected tags to match {expects}, got {got}: {msg}"
return fail_msg.format(*extra_format, got=msg.tags, expects=tags, msg=msg)
if nick:
got_nick = msg.prefix.split("!")[0] if msg.prefix else None
if nick != got_nick:
fail_msg = (
fail_msg
or "expected nick to be {expects}, got {got} instead: {msg}"
)
return fail_msg.format(
*extra_format, got=got_nick, expects=nick, param=key, msg=msg
)
return None
def assertIn(
self,
member: Any,
container: Union[Iterable[Any], Container[Any]],
msg: Optional[str] = None,
fail_msg: Optional[str] = None,
extra_format: Tuple = (),
) -> None:
if fail_msg:
msg = fail_msg.format(*extra_format, item=member, list=container, msg=msg)
assert member in container, msg # type: ignore
def assertNotIn(
self,
member: Any,
container: Union[Iterable[Any], Container[Any]],
msg: Optional[str] = None,
fail_msg: Optional[str] = None,
extra_format: Tuple = (),
) -> None:
if fail_msg:
msg = fail_msg.format(*extra_format, item=member, list=container, msg=msg)
assert member not in container, msg # type: ignore
def assertEqual(
self,
got: T,
expects: T,
msg: Any = None,
fail_msg: Optional[str] = None,
extra_format: Tuple = (),
) -> None:
if fail_msg:
msg = fail_msg.format(*extra_format, got=got, expects=expects, msg=msg)
assert got == expects, msg
def assertNotEqual(
self,
got: T,
expects: T,
msg: Any = None,
fail_msg: Optional[str] = None,
extra_format: Tuple = (),
) -> None:
if fail_msg:
msg = fail_msg.format(*extra_format, got=got, expects=expects, msg=msg)
assert got != expects, msg
def assertGreater(
self,
got: T,
expects: T,
msg: Any = None,
fail_msg: Optional[str] = None,
extra_format: Tuple = (),
) -> None:
if fail_msg:
msg = fail_msg.format(*extra_format, got=got, expects=expects, msg=msg)
assert got >= expects, msg # type: ignore
def assertGreaterEqual(
self,
got: T,
expects: T,
msg: Any = None,
fail_msg: Optional[str] = None,
extra_format: Tuple = (),
) -> None:
if fail_msg:
msg = fail_msg.format(*extra_format, got=got, expects=expects, msg=msg)
assert got >= expects, msg # type: ignore
def assertLess(
self,
got: T,
expects: T,
msg: Any = None,
fail_msg: Optional[str] = None,
extra_format: Tuple = (),
) -> None:
if fail_msg:
msg = fail_msg.format(*extra_format, got=got, expects=expects, msg=msg)
assert got < expects, msg # type: ignore
def assertLessEqual(
self,
got: T,
expects: T,
msg: Any = None,
fail_msg: Optional[str] = None,
extra_format: Tuple = (),
) -> None:
if fail_msg:
msg = fail_msg.format(*extra_format, got=got, expects=expects, msg=msg)
assert got <= expects, msg # type: ignore
def assertTrue(
self,
got: T,
msg: Any = None,
fail_msg: Optional[str] = None,
extra_format: Tuple = (),
) -> None:
if fail_msg:
msg = fail_msg.format(*extra_format, got=got, msg=msg)
assert got, msg
def assertFalse(
self,
got: T,
msg: Any = None,
fail_msg: Optional[str] = None,
extra_format: Tuple = (),
) -> None:
if fail_msg:
msg = fail_msg.format(*extra_format, got=got, msg=msg)
assert not got, msg
@contextlib.contextmanager
def assertRaises(self, exception: Type[Exception]) -> Iterator[None]:
with pytest.raises(exception):
yield
class BaseClientTestCase(_IrcTestCase[basecontrollers.BaseClientController]):
"""Basic class for client tests. Handles spawning a client and exchanging
messages with it."""
conn: Optional[socket.socket]
nick: Optional[str] = None
user: Optional[List[str]] = None
server: socket.socket
protocol_version = Optional[str]
acked_capabilities = Optional[Set[str]]
__new__ = object.__new__ # pytest won't collect Generic[] subclasses otherwise
def setUp(self) -> None:
super().setUp()
self.conn = None
self._setUpServer()
def tearDown(self) -> None:
if self.conn:
try:
self.conn.sendall(b"QUIT :end of test.")
except BrokenPipeError:
pass # client already disconnected
except OSError:
pass # the conn was already closed by the test, or something
self.controller.kill()
if self.conn:
self.conn_file.close()
self.conn.close()
self.server.close()
def _setUpServer(self) -> None:
"""Creates the server and make it listen."""
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.bind(("", 0)) # Bind any free port
self.server.listen(1)
# Used to check if the client is alive from time to time
self.server.settimeout(1)
def acceptClient(
self,
tls_cert: Optional[str] = None,
tls_key: Optional[str] = None,
server: Optional[socket.socket] = None,
) -> None:
"""Make the server accept a client connection. Blocking."""
server = server or self.server
assert server
# Wait for the client to connect
while True:
try:
(self.conn, addr) = server.accept()
except socket.timeout:
self.controller.check_is_alive()
else:
break
if tls_cert is None and tls_key is None:
pass
else:
assert (
tls_cert and tls_key
), "tls_cert must be provided if and only if tls_key is."
with tempfile.NamedTemporaryFile(
"at"
) as certfile, tempfile.NamedTemporaryFile("at") as keyfile:
certfile.write(tls_cert)
certfile.seek(0)
keyfile.write(tls_key)
keyfile.seek(0)
context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
context.load_cert_chain(certfile=certfile.name, keyfile=keyfile.name)
self.conn = context.wrap_socket(self.conn, server_side=True)
self.conn_file = self.conn.makefile(newline="\r\n", encoding="utf8")
def getLine(self) -> str:
line = self.conn_file.readline()
if self.show_io:
print("{:.3f} C: {}".format(time.time(), line.strip()))
return line
def getMessage(
self, *args: Any, filter_pred: Optional[Callable[[Message], bool]] = None
) -> Message:
"""Gets a message and returns it. If a filter predicate is given,
fetches messages until the predicate returns a False on a message,
and returns this message."""
while True:
line = self.getLine(*args)
if not line:
raise ConnectionClosed()
msg = message_parser.parse_message(line)
if not filter_pred or filter_pred(msg):
return msg
def sendLine(self, line: str) -> None:
assert self.conn
self.conn.sendall(line.encode())
if not line.endswith("\r\n"):
self.conn.sendall(b"\r\n")
if self.show_io:
print("{:.3f} S: {}".format(time.time(), line.strip()))
def readCapLs(
self, auth: Optional[Authentication] = None, tls_config: tls.TlsConfig = None
) -> None:
(hostname, port) = self.server.getsockname()
self.controller.run(
hostname=hostname, port=port, auth=auth, tls_config=tls_config
)
self.acceptClient()
m = self.getMessage()
self.assertEqual(m.command, "CAP", "First message is not CAP LS.")
if m.params == ["LS"]:
self.protocol_version = 301
elif m.params == ["LS", "302"]:
self.protocol_version = 302
elif m.params == ["END"]:
self.protocol_version = None
else:
raise AssertionError("Unknown CAP params: {}".format(m.params))
def userNickPredicate(self, msg: Message) -> bool:
"""Predicate to be used with getMessage to handle NICK/USER
transparently."""
if msg.command == "NICK":
self.assertEqual(len(msg.params), 1, msg=msg)
self.nick = msg.params[0]
return False
elif msg.command == "USER":
self.assertEqual(len(msg.params), 4, msg=msg)
self.user = msg.params
return False
else:
return True
def negotiateCapabilities(
self,
caps: List[str],
cap_ls: bool = True,
auth: Optional[Authentication] = None,
) -> Optional[Message]:
"""Performes a complete capability negociation process, without
ending it, so the caller can continue the negociation."""
if cap_ls:
self.readCapLs(auth)
if not self.protocol_version:
# No negotiation.
return None
self.sendLine("CAP * LS :{}".format(" ".join(caps)))
capability_names = frozenset(capabilities.cap_list_to_dict(caps))
self.acked_capabilities = set()
while True:
m = self.getMessage(filter_pred=self.userNickPredicate)
if m.command != "CAP":
return m
self.assertGreater(len(m.params), 0, m)
if m.params[0] == "REQ":
self.assertEqual(len(m.params), 2, m)
requested = frozenset(m.params[1].split())
if not requested.issubset(capability_names):
self.sendLine(
"CAP {} NAK :{}".format(self.nick or "*", m.params[1][0:100])
)
else:
self.sendLine(
"CAP {} ACK :{}".format(self.nick or "*", m.params[1])
)
self.acked_capabilities.update(requested) # type: ignore
else:
return m
class BaseServerTestCase(
_IrcTestCase[basecontrollers.BaseServerController], Generic[TClientName]
):
"""Basic class for server tests. Handles spawning a server and exchanging
messages with it."""
show_io: bool # set by conftest.py
password: Optional[str] = None
ssl = False
valid_metadata_keys: Set[str] = set()
invalid_metadata_keys: Set[str] = set()
server_support: Optional[Dict[str, Optional[str]]]
run_services = False
__new__ = object.__new__ # pytest won't collect Generic[] subclasses otherwise
def setUp(self) -> None:
super().setUp()
self.server_support = None
(self.hostname, self.port) = self.controller.get_hostname_and_port()
self.controller.run(
self.hostname,
self.port,
password=self.password,
valid_metadata_keys=self.valid_metadata_keys,
invalid_metadata_keys=self.invalid_metadata_keys,
ssl=self.ssl,
run_services=self.run_services,
)
self.clients: Dict[TClientName, client_mock.ClientMock] = {}
def tearDown(self) -> None:
self.controller.kill()
for client in list(self.clients):
self.removeClient(client)
def addClient(
self, name: Optional[TClientName] = None, show_io: Optional[bool] = None
) -> TClientName:
"""Connects a client to the server and adds it to the dict.
If 'name' is not given, uses the lowest unused non-negative integer."""
self.controller.wait_for_port()
if self.run_services:
self.controller.wait_for_services()
if not name:
new_name: int = (
max(
[int(name) for name in self.clients if isinstance(name, (int, str))]
+ [0]
)
+ 1
)
name = cast(TClientName, new_name)
show_io = show_io if show_io is not None else self.show_io
self.clients[name] = client_mock.ClientMock(name=name, show_io=show_io)
self.clients[name].connect(self.hostname, self.port)
return name
def removeClient(self, name: TClientName) -> None:
"""Disconnects the client, without QUIT."""
assert name in self.clients
self.clients[name].disconnect()
del self.clients[name]
def getMessages(self, client: TClientName, **kwargs: Any) -> List[Message]:
return self.clients[client].getMessages(**kwargs)
def getMessage(self, client: TClientName, **kwargs: Any) -> Message:
return self.clients[client].getMessage(**kwargs)
def getRegistrationMessage(self, client: TClientName) -> Message:
"""Filter notices, do not send pings."""
while True:
msg = self.getMessage(
client,
synchronize=False,
filter_pred=lambda m: m.command not in ("NOTICE", RPL_HELLO),
)
if msg.command == "PING":
# Hi Unreal
self.sendLine(client, "PONG :" + msg.params[0])
else:
return msg
def sendLine(self, client: TClientName, line: Union[str, bytes]) -> None:
return self.clients[client].sendLine(line)
def getCapLs(
self, client: TClientName, as_list: bool = False
) -> Union[List[str], Dict[str, Optional[str]]]:
"""Waits for a CAP LS block, parses all CAP LS messages, and return
the dict capabilities, with their values.
If as_list is given, returns the raw list (ie. key/value not split)
in case the order matters (but it shouldn't)."""
caps = []
while True:
m = self.getRegistrationMessage(client)
self.assertMessageMatch(m, command="CAP")
self.assertEqual(m.params[1], "LS", fail_msg="Expected CAP * LS, got {got}")
if m.params[2] == "*":
caps.extend(m.params[3].split())
else:
caps.extend(m.params[2].split())
if not as_list:
return capabilities.cap_list_to_dict(caps)
return caps
def assertDisconnected(self, client: TClientName) -> None:
try:
self.getMessages(client)
self.getMessages(client)
except (socket.error, ConnectionClosed):
del self.clients[client]
return
else:
raise AssertionError("Client not disconnected.")
def skipToWelcome(self, client: TClientName) -> List[Message]:
"""Skip to the point where we are registered
<https://tools.ietf.org/html/rfc2812#section-3.1>
"""
result = []
while True:
m = self.getMessage(client, synchronize=False)
result.append(m)
if m.command == "001":
return result
elif m.command == "PING":
# Hi, Unreal
self.sendLine(client, "PONG :" + m.params[0])
def requestCapabilities(
self,
client: TClientName,
capabilities: List[str],
skip_if_cap_nak: bool = False,
) -> None:
self.sendLine(client, "CAP REQ :{}".format(" ".join(capabilities)))
m = self.getRegistrationMessage(client)
try:
self.assertMessageMatch(
m, command="CAP", fail_msg="Expected CAP ACK, got: {msg}"
)
self.assertEqual(
m.params[1], "ACK", m, fail_msg="Expected CAP ACK, got: {msg}"
)
except AssertionError:
if skip_if_cap_nak:
raise runner.CapabilityNotSupported(" or ".join(capabilities))
else:
raise
def connectClient(
self,
nick: str,
name: TClientName = None,
capabilities: Optional[List[str]] = None,
skip_if_cap_nak: bool = False,
show_io: Optional[bool] = None,
account: Optional[str] = None,
password: Optional[str] = None,
ident: str = "username",
) -> List[Message]:
"""Connections a new client, does the cap negotiation
and connection registration, and skips to the end of the MOTD.
Returns the list of all messages received after registration,
just like `skipToWelcome`."""
client = self.addClient(name, show_io=show_io)
if capabilities:
self.sendLine(client, "CAP LS 302")
m = self.getRegistrationMessage(client)
self.requestCapabilities(client, capabilities, skip_if_cap_nak)
if password is not None:
if "sasl" not in (capabilities or ()):
raise ValueError("Used 'password' option without sasl capbilitiy")
self.sendLine(client, "AUTHENTICATE PLAIN")
m = self.getRegistrationMessage(client)
self.assertMessageMatch(m, command="AUTHENTICATE", params=["+"])
self.sendLine(client, sasl_plain_blob(account or nick, password))
m = self.getRegistrationMessage(client)
self.assertIn(m.command, ["900", "903"], str(m))
self.sendLine(client, "NICK {}".format(nick))
self.sendLine(client, "USER %s * * :Realname" % (ident,))
if capabilities:
self.sendLine(client, "CAP END")
welcome = self.skipToWelcome(client)
self.sendLine(client, "PING foo")
# Skip all that happy welcoming stuff
self.server_support = {}
while True:
m = self.getMessage(client)
if m.command == "PONG":
break
elif m.command == "005":
for param in m.params[1:-1]:
if "=" in param:
(key, value) = param.split("=")
self.server_support[key] = value
else:
self.server_support[param] = None
welcome.append(m)
return welcome
def joinClient(self, client: TClientName, channel: str) -> None:
self.sendLine(client, "JOIN {}".format(channel))
received = {m.command for m in self.getMessages(client)}
self.assertIn(
"366",
received,
fail_msg="Join to {} failed, {item} is not in the set of "
"received responses: {list}",
extra_format=(channel,),
)
def joinChannel(self, client: TClientName, channel: str) -> None:
self.sendLine(client, "JOIN {}".format(channel))
# wait until we see them join the channel
joined = False
while not joined:
for msg in self.getMessages(client):
if (
msg.command == "JOIN"
and 0 < len(msg.params)
and msg.params[0].lower() == channel.lower()
):
joined = True
break
elif msg.command in CHANNEL_JOIN_FAIL_NUMERICS:
raise ChannelJoinException(msg.command, msg.params)
_TSelf = TypeVar("_TSelf", bound="OptionalityHelper")
_TReturn = TypeVar("_TReturn")
class OptionalityHelper(Generic[TController]):
controller: TController
def checkSaslSupport(self) -> None:
if self.controller.supported_sasl_mechanisms:
return
raise runner.NotImplementedByController("SASL")
def checkMechanismSupport(self, mechanism: str) -> None:
if mechanism in self.controller.supported_sasl_mechanisms:
return
raise runner.OptionalSaslMechanismNotSupported(mechanism)
@staticmethod
def skipUnlessHasMechanism(
mech: str,
) -> Callable[[Callable[[_TSelf], _TReturn]], Callable[[_TSelf], _TReturn]]:
# Just a function returning a function that takes functions and
# returns functions, nothing to see here.
# If Python didn't have such an awful syntax for callables, it would be:
# str -> ((TSelf -> TReturn) -> (TSelf -> TReturn))
def decorator(f: Callable[[_TSelf], _TReturn]) -> Callable[[_TSelf], _TReturn]:
@functools.wraps(f)
def newf(self: _TSelf) -> _TReturn:
self.checkMechanismSupport(mech)
return f(self)
return newf
return decorator
@staticmethod
def skipUnlessHasSasl(
f: Callable[[_TSelf], _TReturn]
) -> Callable[[_TSelf], _TReturn]:
@functools.wraps(f)
def newf(self: _TSelf) -> _TReturn:
self.checkSaslSupport()
return f(self)
return newf
def mark_services(cls: TClass) -> TClass:
cls.run_services = True
return pytest.mark.services(cls) # type: ignore
def mark_specifications(
*specifications_str: str, deprecated: bool = False, strict: bool = False
) -> Callable[[TCallable], TCallable]:
specifications = frozenset(
Specifications.from_name(s) if isinstance(s, str) else s
for s in specifications_str
)
if None in specifications:
raise ValueError("Invalid set of specifications: {}".format(specifications))
def decorator(f: TCallable) -> TCallable:
for specification in specifications:
f = getattr(pytest.mark, specification.value)(f)
if strict:
f = pytest.mark.strict(f)
if deprecated:
f = pytest.mark.deprecated(f)
return f
return decorator
def mark_capabilities(
*capabilities_str: str, deprecated: bool = False, strict: bool = False
) -> Callable[[TCallable], TCallable]:
capabilities = frozenset(
Capabilities.from_name(c) if isinstance(c, str) else c for c in capabilities_str
)
if None in capabilities:
raise ValueError("Invalid set of capabilities: {}".format(capabilities))
def decorator(f: TCallable) -> TCallable:
for capability in capabilities:
f = getattr(pytest.mark, capability.value)(f)
# Support for any capability implies IRCv3
f = pytest.mark.IRCv3(f)
return f
return decorator
def mark_isupport(
*tokens_str: str, deprecated: bool = False, strict: bool = False
) -> Callable[[TCallable], TCallable]:
tokens = frozenset(
IsupportTokens.from_name(c) if isinstance(c, str) else c for c in tokens_str
)
if None in tokens:
raise ValueError("Invalid set of isupport tokens: {}".format(tokens))
def decorator(f: TCallable) -> TCallable:
for token in tokens:
f = getattr(pytest.mark, token.value)(f)
return f
return decorator
| |
#!/usr/bin/env python
"""Defines a more user friendly way of entering data."""
import warnings
import pandas as pd
from shatter.constants import *
from shatter.output import Output
from shatter.util import helpers
from shatter.util.ordered_set import OrderedSet
from shatter.util.code_dict import CodeDict
from shatter import solver
from shatter.util import helpers as h
__author__ = 'juan pablo isaza'
class Rules(list):
"""
It is a list that contains rules, each being a dictionary with the inputs.
"""
@staticmethod
def gets_start_positional_idx(dictionary):
"""
Gets the biggest index for a dictionary and add 1.
:param dictionary: any dict
:return: int
"""
max_idx = 0
has_key = False
for key in dictionary:
if isinstance(key, str) and re.match("^" + POSITIONAL_ARGS_RULE + "\d+$", key) is not None:
has_key = True
r = re.search("\d+$", key)
candidate = int(r.group())
if candidate > max_idx:
max_idx = candidate
if has_key:
return max_idx + 1
else:
return 0
def get_max_positional_arg(self):
"""
Gets the index for the next positional argument to start.
:return: int.
"""
max_arg = 0
for d in self:
candidate = self.gets_start_positional_idx(d)
if candidate > max_arg:
max_arg = candidate
return max_arg
def search_repeating_variable(self, value):
"""Tries to find if variable was already declared. If so outputs the original key else outputs None. Will
exclude reserved words, as they are not variable declarations.
:param value: a variable value. For example a Code object.
:return : key of possible repeating variable or None
"""
for d in self:
for key in set(d.keys()) - set(KEYWORDS.values()):
if d[key] == value:
return key
return None
def get_dicts(self, args, kwargs):
"""
Big issue solved here. Adds args, to have positional args always in the same order as the user inputs.
Therefore the user can have short circuiting for logical operators, by having inputs in the right order.
:param args: positional args. Used when specific order required.
:param kwargs: a common dict
:return: a vector containing dicts (which as of python 3.6 preserves insertion order)
"""
a_dict = dict()
# Adds args
start_idx = self.get_max_positional_arg()
for idx, e in enumerate(args):
if isinstance(e, pd.DataFrame):
# TODO: implement
list_of_dicts = []
variables = list(e.columns.values)
for index, row in e.iterrows():
new_dict = dict()
for var in variables:
new_dict[var] = row[var]
list_of_dicts.append(new_dict)
return list_of_dicts
else:
repeating_var = self.search_repeating_variable(e)
if repeating_var is None: # first time: declares new value.
a_dict[POSITIONAL_ARGS_RULE + str(start_idx + idx)] = e
else: # has been previously declared.
a_dict[repeating_var] = e
# Adds kwargs
for k in kwargs.keys():
a_dict[k] = kwargs[k]
return [a_dict]
def __init__(self, *args, **kwargs):
"""
init and add new parameters if provided.
:param kwargs:
:return:
"""
# TODO: add comment: what is this? Can still pass all tests without this.
list.__init__(list())
if len(args) + len(kwargs) > 0:
self += self.get_dicts(args, kwargs)
def add(self, *args, **kwargs):
"""
Adds a new row condition.
:param kwargs: dictionary like parameters.
:return: void
"""
if len(args) + len(kwargs) > 0:
self += (self.get_dicts(args, kwargs))
else:
warnings.warn('To add condition at least 1 argument should be provided', UserWarning)
@staticmethod
def validate(function, rules):
"""
Validates the entries, for solver()
:param function: callable
:param rules: rules object or table.
"""
# if invalid raises exception.
h.valid_function(function) and valid_rules(rules)
f_path = h.get_function_path(function)
if not h.os.path.exists(f_path):
raise NotImplementedError("Function path {} not found.".format(f_path))
def solve(self, function, unittest=None):
"""
Solves puzzle given the restrains added. This is a method wrapper of solver.execute().
:param function: the function to be coded.
:param unittest: optional, the current test being run eg: 'self'.
:return: Solution object.
"""
self.validate(function, self)
return solver.return_solution(f=function,
rules=self,
unittest=unittest)
def get_input_values(self, f_inputs, output):
"""
Scans the whole rules object looking for input values, adds them with the function inputs.
:param f_inputs: function inputs.
:param output: thing returned
:return: All possible inputs that are not keywords.
"""
remove_elements = KEYWORDS.values()
f_inputs = list(f_inputs)
input_values = []
for row in self:
if KEYWORDS[OUTPUT] in row and row[KEYWORDS[OUTPUT]] == output:
keys = helpers.remove_list_from_list(row.keys(), f_inputs)
keys = helpers.remove_list_from_list(keys, remove_elements)
input_values += [row[k] for k in keys]
return f_inputs + input_values
def get_input_keys(self, f_inputs, output):
"""
Scans the whole rules object looking for input keys. Will add inputs (such as code pieces), that are not
explicitly declared as function inputs.
Uses OrderedSet because order is very important. The order is:
first the f_inputs (ordered from right to left), then args added on the condition object from right to left and
top to bottom.
Example:
>>> out = -1
>>> def f(a, b):
>>> return a + b
>>> r = Rules(c=1, d=2, output=out)
>>> r.add(x=3, y=4, output='other_stuff')
>>> r.add(e=3, f=4, output=out)
>>> cond.get_input_keys(helpers.get_function_inputs(f), out)
>>> OrderedSet(['a', 'b', 'c', 'd', 'e', 'f'])
:param f_inputs: function inputs.
:param output: the output of the row.
:return: All possible inputs that are not keywords.
"""
# TODO: missing optional args(kwargs) of the input function.
f_inputs = OrderedSet(f_inputs)
new_inputs = OrderedSet()
for row in self:
if KEYWORDS[OUTPUT] in row and row[KEYWORDS[OUTPUT]] == output:
new_inputs |= OrderedSet(row.keys()) - f_inputs # adds inputs who are not already args.
all_elements = f_inputs | new_inputs
return all_elements - KEYWORDS.values()
@staticmethod
def add_element_to_tuples(tuples_set, new_element):
"""
Adds additional element to a tuple list.
:param tuples_set: a set containing tuples.
:param new_element: any element to add in last position.
:return: tuple set
"""
new_tuples = list()
for tuple_element in tuples_set:
new_tuples.append(tuple_element + (new_element,))
return new_tuples
def get_tuples_from_args(self, row, function_args, output):
"""
Get a list containing tuples (with implicit or explicit rows).
:param row: dict with index as key and value as input value.
:param function_args: function
:param output: the output of the row.
:return: set containing tuples.
"""
# starts with 1 tuple
tuples = [()]
for variable in self.get_input_keys(function_args, output):
if variable in row:
tuples = self.add_element_to_tuples(tuples, row[variable])
else:
# All possible outcomes for undetermined boolean variable: duplicates number of tuples.
true_tuples = self.add_element_to_tuples(tuples, True)
false_tuples = self.add_element_to_tuples(tuples, False)
tuples = true_tuples + false_tuples
# add explicit output to tuples, if necessary.
return tuples
@staticmethod
def is_explicit(row):
"""
Does the output is explicitly named on this table row. Has 2 elements the first is tuple.
:param row: table row, or a condition.
:return: boolean
"""
return len(row) == 2 and isinstance(row[0], tuple)
@staticmethod
def get_output(row):
"""
Gets the output from a row.
:param row: dict.
:return: output function or output or True if not specified.
"""
out_key = KEYWORDS[OUTPUT]
args_key = KEYWORDS[OUTPUT_ARGS]
if out_key in row and args_key in row: # This case is encountered only when the output is a function.
return Output(function=row[out_key], arguments=row[args_key])
elif out_key in row:
return row[out_key]
return True
@staticmethod
def row_has_no_keyword_keys(row):
"""
Boolean output indicating whether a row (dict) has no keyword keys.
:param row: dict.
:return: True if there is at least 1 input different from a keyword.
"""
row_keys = set(row.keys())
keyword_keys = set(KEYWORDS.values())
return len(row_keys.difference(keyword_keys)) > 0
@staticmethod
def change_key_from_bool_to_int(d, new_key):
"""
Changes the keys from booleans (True or False) to int(0 or 1)
if a int(0 or 1) is present.
:param d: dict
:param new_key: a new key to be added to dict.
:return: new dict
"""
if helpers.var_is_1(new_key) and helpers.has_true_key(d):
d[1] = d.pop(True, None)
if helpers.var_is_0(new_key) and helpers.has_false_key(d):
d[0] = d.pop(False, None)
return d
def add_truth_table(self, truth_tables, row, function_args):
"""
Adds a new truth table to the dict of truth_tables.
:param truth_tables: orderDict, where the key is the output and the inputs are a orderSet of values.
:param row: 1 row of self.
:param function_args: tuple
:return: modified truth_tables.
"""
output = self.get_output(row)
if output in truth_tables: # uses existing table.
truth_table = truth_tables[output]
else: # adds new truth table
truth_table = list()
condition_rows = self.get_tuples_from_args(row, function_args, output)
truth_table = truth_table + condition_rows # concat lists.
truth_tables[output] = truth_table # add to tables dict.
return self.change_key_from_bool_to_int(truth_tables, output)
def get_truth_tables(self, function_args):
"""
Factor Truth tables by output.
This is the 'private' version.
:param function_args: variables.
:return: CodeDict(), where key=output and value=implicit truth table.
"""
# dict where outputs are the keys, values are the rows.
truth_tables = CodeDict()
for row in self:
if self.row_has_no_keyword_keys(row):
truth_tables = self.add_truth_table(truth_tables, row, function_args)
return truth_tables
def add_to_dict_table(table, key, value):
"""
Converts the table from tuples (explicit or implicit) to a dict().
Where the key is the output.
:param table: dict
:param key: to be added to dict
:param value: to be added to dict
:return: modified table
"""
if key in table: # add value to set in already existing key value pair.
table[key] = table[key] + [value]
else: # create new key value pair.
table[key] = [value]
return table
def solve(function, rules, unittest=None):
"""
This is the static version of rules.solve()
:param function: the function to be coded.
:param rules: Rules object or truth table. The table can be represented in 2 ways:
Representation 1: Can be specified as a set containing tuples, where each table row is a tuple; general form is:
{table_row_tuple(), ...}
Where each table_row_tuple has inputs and an output:
(tuple_inputs(a, b, ...), output)
Representation 2: Another simpler way of representing a truth table is with an implicit `True` output:
{tuple_inputs(a, b, ...), ...}
Note: this representation is limited to outputs that are boolean, if not use Representation 1.
:param unittest: optional, the current test being run eg: 'self'.
:return: Solution object.
"""
Rules.validate(function, rules)
return solver.return_solution(f=function,
rules=rules,
unittest=unittest)
def from_raw_list_to_dict_table(rules):
"""
Convert raw case to general format.
:param rules: obj
:return: dict where key is output and value is implicit truth table.
"""
table = dict()
for row in rules:
if Rules.is_explicit(row): # explicit case
table = add_to_dict_table(table, row[1], row[0])
else: # implicit case
table = add_to_dict_table(table, True, row)
return table
TYPE_ERROR = 'type_error'
ROW_ERROR = 'row_error'
EXPLICIT_ROW_ERROR = 'explicit_row_error'
class RulesTypeError(TypeError):
def __init__(self, error_object, error_type):
if TYPE_ERROR == error_type:
message = 'Rules variable is not a list nor a Rules object, but rather type {}'\
.format(type(error_object))
elif ROW_ERROR:
message = '{} row in truth table is not a tuple'.format(error_object)
elif EXPLICIT_ROW_ERROR:
message = '{} row with explicit output in truth table has wrong format.'.format(error_object)
else:
message = 'unknown TypeError'
super(RulesTypeError, self).__init__(message)
def get_truth_tables(rules, function_args):
"""
This is the 'public' version of the class method with the same name.
:param rules: either a truth table or a rules object.
:param function_args: the arguments of the function.
:return: truth table (ie set with tuples).
"""
if isinstance(rules, Rules):
return rules.get_truth_tables(function_args)
elif isinstance(rules, list): # raw list case.
return from_raw_list_to_dict_table(rules)
else:
raise RulesTypeError(rules, TYPE_ERROR)
def valid_rules(rules):
"""
Valid rules objects must be lists, inherit from list or be a Rules object.
- list case: When the input is a raw table. If rules is a list then all rows have to be tuples or inherit
from tuple.
- Rules case: When the input is a Rules object.
:param rules: truth table or a rules object.
:return: boolean.
"""
if not isinstance(rules, list) and not isinstance(rules, Rules):
raise RulesTypeError(rules, TYPE_ERROR)
# only for list. Explicitly forbids Rules class as Rules class inherits from list and would be easily mistaken for a
# standard list.
if isinstance(rules, list) and not isinstance(rules, Rules):
for row in rules:
if not isinstance(row, tuple):
raise RulesTypeError(row, ROW_ERROR)
# when the output is explicit, check for 2 elements of outer tuple.
if isinstance(row[0], tuple):
if len(row) != 2:
raise RulesTypeError(row, EXPLICIT_ROW_ERROR)
return True
| |
import asyncio
REMOTE_TIMEOUT = 5.0
import logging
logger = logging.getLogger("seamless")
def print_info(*args):
msg = " ".join([str(arg) for arg in args])
logger.info(msg)
def print_warning(*args):
msg = " ".join([str(arg) for arg in args])
logger.warning(msg)
def print_debug(*args):
msg = " ".join([str(arg) for arg in args])
logger.debug(msg)
def print_error(*args):
msg = " ".join([str(arg) for arg in args])
logger.error(msg)
class CommunionClient:
def get_peer_id(self):
mgr = communion_client_manager
return mgr.servant_to_peer_id[id(self.servant)]
class CommunionBufferClient(CommunionClient):
config_type = "buffer"
def __init__(self, servant, config):
self.servant = servant
self.config_buffer = config["buffer"]
self.config_status = config["buffer_status"]
async def status(self, checksum):
assert checksum is not None
if not self.config_status:
return
message = {
"type": "buffer_status",
"content": checksum.hex()
}
result = await communion_server.client_submit(message, self.servant)
if result is not None:
try:
status = result
if status in (0, 1):
communion_client_manager.remote_checksum_available.add(
checksum
)
except:
pass
return result
async def submit(self, checksum):
assert checksum is not None
if not self.config_buffer:
return
message = {
"type": "buffer",
"content": checksum.hex()
}
result = await communion_server.client_submit(message, self.servant)
return result
class CommunionBufferLengthClient(CommunionClient):
config_type = "buffer_length"
def __init__(self, servant, config):
self.servant = servant
async def submit(self, checksum):
assert checksum is not None
message = {
"type": "buffer_length",
"content": checksum.hex()
}
result = await communion_server.client_submit(message, self.servant)
return result
class CommunionSemanticToSyntacticChecksumClient(CommunionClient):
config_type = "semantic_to_syntactic"
def __init__(self, servant, config):
self.servant = servant
async def submit(self, checksum, celltype, subcelltype):
assert checksum is not None
message = {
"type": "semantic_to_syntactic",
"content": (checksum.hex(), celltype, subcelltype)
}
result = await communion_server.client_submit(message, self.servant)
if result is not None:
result = [bytes.fromhex(r) for r in result]
return result
class CommunionTransformationClient(CommunionClient):
config_type = "transformation"
def __init__(self, servant, config):
self.servant = servant
self.config_job = config["transformation_job"]
self.config_status = config["transformation_status"]
self.config_hard_cancel = config["hard_cancel"]
self.config_clear_exception = config["clear_exception"]
self.future_clear_exception = None
async def status(self, checksum):
if self.future_clear_exception is not None:
await self.future_clear_exception
assert checksum is not None
if not self.config_status:
return None, None
message = {
"type": "transformation_status",
"content": checksum.hex()
}
result = await communion_server.client_submit(message, self.servant)
if result is not None and result[0] != 0 and isinstance(result[-1], str):
result = (*result[:-1], bytes.fromhex(result[-1]))
return result
async def wait(self, checksum):
if self.future_clear_exception is not None:
await self.future_clear_exception
if not self.config_job:
return
message = {
"type": "transformation_wait",
"content": checksum.hex()
}
await communion_server.client_submit(message, self.servant)
async def submit(self, checksum):
if self.future_clear_exception is not None:
await self.future_clear_exception
if not self.config_job:
return
message = {
"type": "transformation_job",
"content": checksum.hex()
}
result = await communion_server.client_submit(message, self.servant)
return result
async def cancel(self, checksum):
if self.future_clear_exception is not None:
await self.future_clear_exception
if not self.config_job:
return
message = {
"type": "transformation_cancel",
"content": checksum.hex()
}
await communion_server.client_submit(message, self.servant)
async def hard_cancel(self, checksum):
if self.future_clear_exception is not None:
await self.future_clear_exception
if not self.config_hard_cancel:
return
message = {
"type": "transformation_hard_cancel",
"content": checksum.hex()
}
await communion_server.client_submit(message, self.servant)
async def clear_exception(self, checksum):
message = {
"type": "transformation_clear_exception",
"content": checksum.hex()
}
await communion_server.client_submit(message, self.servant)
self.future_clear_exception = None
class CommunionClientManager:
_clientclasses = [klass for klass in globals().values() if isinstance(klass, type) \
and issubclass(klass, CommunionClient) and klass is not CommunionClient]
_clientclasses = {klass.config_type:klass for klass in _clientclasses}
def __init__(self):
self.clients = {k:[] for k in self._clientclasses}
self.remote_checksum_available = set()
self.servant_to_clients = {}
self.servant_to_peer_id = {}
def add_servant(self, servant, peer_id, *,
config_servant, config_master
):
servid = id(servant)
self.servant_to_clients[servid] = []
self.servant_to_peer_id[servid] = peer_id
communion_types = {
"buffer": ["buffer", "buffer_status"],
"buffer_length": ["buffer_length"],
"semantic_to_syntactic": ["semantic_to_syntactic"],
"transformation": [
"transformation_job", "transformation_status",
"hard_cancel", "clear_exception",
],
}
for communion_type in communion_types:
sub_communion_types = {}
for sub_communion_type in communion_types[communion_type]:
if sub_communion_type in ("hard_cancel", "clear_exception"):
c_master = True
else:
c_master = config_master[sub_communion_type]
c_servant = config_servant[sub_communion_type]
if not c_master or not c_servant:
c = False
elif sub_communion_type == "buffer":
if c_servant == "small" or c_master == "small":
c = "small"
else:
c = True
else:
c = True
sub_communion_types[sub_communion_type] = c
if not any(sub_communion_types.values()):
continue
print_warning("ADD SERVANT", communion_type)
clientclass = self._clientclasses[communion_type]
client = clientclass(servant, sub_communion_types)
self.clients[communion_type].append(client)
self.servant_to_clients[servid].append((communion_type, client))
def remove_servant(self, servant):
self.remote_checksum_available.clear()
clients = self.servant_to_clients.pop(id(servant))
self.servant_to_peer_id.pop(id(servant))
for communion_type, client in clients:
print_warning("REMOVE SERVANT", communion_type)
self.clients[communion_type].remove(client)
async def remote_semantic_to_syntactic(self, checksum, celltype, subcelltype, peer_id):
clients = []
for client in self.clients["semantic_to_syntactic"]:
client_peer_id = self.servant_to_peer_id[id(client.servant)]
if client_peer_id != peer_id:
clients.append(client)
if not len(clients):
return None
coros = [client.submit(checksum, celltype, subcelltype) for client in clients]
results = await asyncio.gather(*coros)
result = []
for r in results:
if r is not None:
result += r
if not len(result):
return None
return result
async def remote_buffer_status(self, checksum, peer_id):
if checksum in self.remote_checksum_available:
return True
clients = []
for client in self.clients["buffer"]:
client_peer_id = self.servant_to_peer_id[id(client.servant)]
if client_peer_id != peer_id:
clients.append(client)
if not len(clients):
return False
coros = [client.status(checksum) for client in clients]
futures = [asyncio.ensure_future(coro) for coro in coros]
while 1:
done, pending = await asyncio.wait(futures,
timeout=REMOTE_TIMEOUT,
return_when=asyncio.FIRST_COMPLETED
)
for future in done:
try:
result = future.result()
status = result
if status > 0:
return True
except:
import traceback
print_error(traceback.format_exc())
continue
if not len(pending):
return False
async def remote_transformation_status(self, checksum, peer_id):
clients = []
for client in self.clients["transformation"]:
client_peer_id = self.servant_to_peer_id[id(client.servant)]
if client_peer_id != peer_id:
clients.append(client)
if not len(clients):
return
coros = [client.status(checksum) for client in clients]
futures = [asyncio.ensure_future(coro) for coro in coros]
best_status, best_result = None, None
while 1:
done, pending = await asyncio.wait(futures,
timeout=REMOTE_TIMEOUT,
return_when=asyncio.FIRST_COMPLETED
)
for future in done:
try:
status, result = future.result()
if status < 0:
continue
if best_status is None or status > best_status:
best_status = status
except:
import traceback
print_error(traceback.format_exc())
continue
if not len(pending):
break
if best_status is None:
return None
return best_status, best_result
communion_client_manager = CommunionClientManager()
from .communion_server import communion_server
| |
#-*-test-case-name: openid.test.test_association-*-
#-*-coding: utf-8-*-
"""
This module contains code for dealing with associations between
consumers and servers. Associations contain a shared secret that is
used to sign C{openid.mode=id_res} messages.
Users of the library should not usually need to interact directly with
associations. The L{store<openid.store>}, L{server<openid.server.server>}
and L{consumer<openid.consumer>} objects will create and manage
the associations. The consumer and server code will make use of a
C{L{SessionNegotiator}} when managing associations, which enables
users to express a preference for what kind of associations should be
allowed, and what kind of exchange should be done to establish the
association.
@var default_negotiator: A C{L{SessionNegotiator}} that allows all
association types that are specified by the OpenID
specification. It prefers to use HMAC-SHA1/DH-SHA1, if it's
available. If HMAC-SHA256 is not supported by your Python runtime,
HMAC-SHA256 and DH-SHA256 will not be available.
@var encrypted_negotiator: A C{L{SessionNegotiator}} that
does not support C{'no-encryption'} associations. It prefers
HMAC-SHA1/DH-SHA1 association types if available.
"""
__all__ = [
'default_negotiator',
'encrypted_negotiator',
'SessionNegotiator',
'Association',
]
import time
from openid import cryptutil
from openid import kvform
from openid import oidutil
from openid.message import OPENID_NS
all_association_types = [
'HMAC-SHA1',
'HMAC-SHA256',
]
if hasattr(cryptutil, 'hmacSha256'):
supported_association_types = list(all_association_types)
default_association_order = [
('HMAC-SHA1', 'DH-SHA1'),
('HMAC-SHA1', 'no-encryption'),
('HMAC-SHA256', 'DH-SHA256'),
('HMAC-SHA256', 'no-encryption'),
]
only_encrypted_association_order = [
('HMAC-SHA1', 'DH-SHA1'),
('HMAC-SHA256', 'DH-SHA256'),
]
else:
supported_association_types = ['HMAC-SHA1']
default_association_order = [
('HMAC-SHA1', 'DH-SHA1'),
('HMAC-SHA1', 'no-encryption'),
]
only_encrypted_association_order = [
('HMAC-SHA1', 'DH-SHA1'),
]
def getSessionTypes(assoc_type):
"""Return the allowed session types for a given association type"""
assoc_to_session = {
'HMAC-SHA1': ['DH-SHA1', 'no-encryption'],
'HMAC-SHA256': ['DH-SHA256', 'no-encryption'],
}
return assoc_to_session.get(assoc_type, [])
def checkSessionType(assoc_type, session_type):
"""Check to make sure that this pair of assoc type and session
type are allowed"""
if session_type not in getSessionTypes(assoc_type):
raise ValueError(
'Session type %r not valid for assocation type %r'
% (session_type, assoc_type))
class SessionNegotiator(object):
"""A session negotiator controls the allowed and preferred
association types and association session types. Both the
C{L{Consumer<openid.consumer.Consumer>}} and
C{L{Server<openid.server.server.Server>}} use negotiators when
creating associations.
You can create and use negotiators if you:
- Do not want to do Diffie-Hellman key exchange because you use
transport-layer encryption (e.g. SSL)
- Want to use only SHA-256 associations
- Do not want to support plain-text associations over a non-secure
channel
It is up to you to set a policy for what kinds of associations to
accept. By default, the library will make any kind of association
that is allowed in the OpenID 2.0 specification.
Use of negotiators in the library
=================================
When a consumer makes an association request, it calls
C{L{getAllowedType}} to get the preferred association type and
association session type.
The server gets a request for a particular association/session
type and calls C{L{isAllowed}} to determine if it should
create an association. If it is supported, negotiation is
complete. If it is not, the server calls C{L{getAllowedType}} to
get an allowed association type to return to the consumer.
If the consumer gets an error response indicating that the
requested association/session type is not supported by the server
that contains an assocation/session type to try, it calls
C{L{isAllowed}} to determine if it should try again with the
given combination of association/session type.
@ivar allowed_types: A list of association/session types that are
allowed by the server. The order of the pairs in this list
determines preference. If an association/session type comes
earlier in the list, the library is more likely to use that
type.
@type allowed_types: [(str, str)]
"""
def __init__(self, allowed_types):
self.setAllowedTypes(allowed_types)
def copy(self):
return self.__class__(list(self.allowed_types))
def setAllowedTypes(self, allowed_types):
"""Set the allowed association types, checking to make sure
each combination is valid."""
for (assoc_type, session_type) in allowed_types:
checkSessionType(assoc_type, session_type)
self.allowed_types = allowed_types
def addAllowedType(self, assoc_type, session_type=None):
"""Add an association type and session type to the allowed
types list. The assocation/session pairs are tried in the
order that they are added."""
if self.allowed_types is None:
self.allowed_types = []
if session_type is None:
available = getSessionTypes(assoc_type)
if not available:
raise ValueError('No session available for association type %r'
% (assoc_type,))
for session_type in getSessionTypes(assoc_type):
self.addAllowedType(assoc_type, session_type)
else:
checkSessionType(assoc_type, session_type)
self.allowed_types.append((assoc_type, session_type))
def isAllowed(self, assoc_type, session_type):
"""Is this combination of association type and session type allowed?"""
assoc_good = (assoc_type, session_type) in self.allowed_types
matches = session_type in getSessionTypes(assoc_type)
return assoc_good and matches
def getAllowedType(self):
"""Get a pair of assocation type and session type that are
supported"""
try:
return self.allowed_types[0]
except IndexError:
return (None, None)
default_negotiator = SessionNegotiator(default_association_order)
encrypted_negotiator = SessionNegotiator(only_encrypted_association_order)
def getSecretSize(assoc_type):
if assoc_type == 'HMAC-SHA1':
return 20
elif assoc_type == 'HMAC-SHA256':
return 32
else:
raise ValueError('Unsupported association type: %r' % (assoc_type,))
class Association(object):
"""
This class represents an association between a server and a
consumer. In general, users of this library will never see
instances of this object. The only exception is if you implement
a custom C{L{OpenIDStore<openid.store.interface.OpenIDStore>}}.
If you do implement such a store, it will need to store the values
of the C{L{handle}}, C{L{secret}}, C{L{issued}}, C{L{lifetime}}, and
C{L{assoc_type}} instance variables.
@ivar handle: This is the handle the server gave this association.
@type handle: C{str}
@ivar secret: This is the shared secret the server generated for
this association.
@type secret: C{str}
@ivar issued: This is the time this association was issued, in
seconds since 00:00 GMT, January 1, 1970. (ie, a unix
timestamp)
@type issued: C{int}
@ivar lifetime: This is the amount of time this association is
good for, measured in seconds since the association was
issued.
@type lifetime: C{int}
@ivar assoc_type: This is the type of association this instance
represents. The only valid value of this field at this time
is C{'HMAC-SHA1'}, but new types may be defined in the future.
@type assoc_type: C{str}
@sort: __init__, fromExpiresIn, expiresIn, __eq__, __ne__,
handle, secret, issued, lifetime, assoc_type
"""
# The ordering and name of keys as stored by serialize
assoc_keys = [
'version',
'handle',
'secret',
'issued',
'lifetime',
'assoc_type',
]
_macs = {
'HMAC-SHA1': cryptutil.hmacSha1,
'HMAC-SHA256': cryptutil.hmacSha256,
}
@classmethod
def fromExpiresIn(cls, expires_in, handle, secret, assoc_type):
"""
This is an alternate constructor used by the OpenID consumer
library to create associations. C{L{OpenIDStore
<openid.store.interface.OpenIDStore>}} implementations
shouldn't use this constructor.
@param expires_in: This is the amount of time this association
is good for, measured in seconds since the association was
issued.
@type expires_in: C{int}
@param handle: This is the handle the server gave this
association.
@type handle: C{str}
@param secret: This is the shared secret the server generated
for this association.
@type secret: C{str}
@param assoc_type: This is the type of association this
instance represents. The only valid value of this field
at this time is C{'HMAC-SHA1'}, but new types may be
defined in the future.
@type assoc_type: C{str}
"""
issued = int(time.time())
lifetime = expires_in
return cls(handle, secret, issued, lifetime, assoc_type)
def __init__(self, handle, secret, issued, lifetime, assoc_type):
"""
This is the standard constructor for creating an association.
@param handle: This is the handle the server gave this
association.
@type handle: C{str}
@param secret: This is the shared secret the server generated
for this association.
@type secret: C{str}
@param issued: This is the time this association was issued,
in seconds since 00:00 GMT, January 1, 1970. (ie, a unix
timestamp)
@type issued: C{int}
@param lifetime: This is the amount of time this association
is good for, measured in seconds since the association was
issued.
@type lifetime: C{int}
@param assoc_type: This is the type of association this
instance represents. The only valid value of this field
at this time is C{'HMAC-SHA1'}, but new types may be
defined in the future.
@type assoc_type: C{str}
"""
if assoc_type not in all_association_types:
fmt = '%r is not a supported association type'
raise ValueError(fmt % (assoc_type,))
# secret_size = getSecretSize(assoc_type)
# if len(secret) != secret_size:
# fmt = 'Wrong size secret (%s bytes) for association type %s'
# raise ValueError(fmt % (len(secret), assoc_type))
self.handle = handle
if isinstance(secret, str):
secret = secret.encode("utf-8") # should be bytes
self.secret = secret
self.issued = issued
self.lifetime = lifetime
self.assoc_type = assoc_type
@property
def expiresIn(self, now=None):
"""
This returns the number of seconds this association is still
valid for, or C{0} if the association is no longer valid.
@return: The number of seconds this association is still valid
for, or C{0} if the association is no longer valid.
@rtype: C{int}
"""
if now is None:
now = int(time.time())
return max(0, self.issued + self.lifetime - now)
def __eq__(self, other):
"""
This checks to see if two C{L{Association}} instances
represent the same association.
@return: C{True} if the two instances represent the same
association, C{False} otherwise.
@rtype: C{bool}
"""
return type(self) is type(other) and self.__dict__ == other.__dict__
def __ne__(self, other):
"""
This checks to see if two C{L{Association}} instances
represent different associations.
@return: C{True} if the two instances represent different
associations, C{False} otherwise.
@rtype: C{bool}
"""
return not (self == other)
def serialize(self):
"""
Convert an association to KV form.
@return: String in KV form suitable for deserialization by
deserialize.
@rtype: str
"""
data = {
'version': '2',
'handle': self.handle,
'secret': oidutil.toBase64(self.secret),
'issued': str(int(self.issued)),
'lifetime': str(int(self.lifetime)),
'assoc_type': self.assoc_type
}
assert len(data) == len(self.assoc_keys)
pairs = []
for field_name in self.assoc_keys:
pairs.append((field_name, data[field_name]))
return kvform.seqToKV(pairs, strict=True)
@classmethod
def deserialize(cls, assoc_s):
"""
Parse an association as stored by serialize().
inverse of serialize
@param assoc_s: Association as serialized by serialize()
@type assoc_s: bytes
@return: instance of this class
"""
pairs = kvform.kvToSeq(assoc_s, strict=True)
keys = []
values = []
for k, v in pairs:
keys.append(k)
values.append(v)
if keys != cls.assoc_keys:
raise ValueError('Unexpected key values: %r', keys)
version, handle, secret, issued, lifetime, assoc_type = values
if version != '2':
raise ValueError('Unknown version: %r' % version)
issued = int(issued)
lifetime = int(lifetime)
secret = oidutil.fromBase64(secret)
return cls(handle, secret, issued, lifetime, assoc_type)
def sign(self, pairs):
"""
Generate a signature for a sequence of (key, value) pairs
@param pairs: The pairs to sign, in order
@type pairs: sequence of (str, str)
@return: The binary signature of this sequence of pairs
@rtype: bytes
"""
kv = kvform.seqToKV(pairs)
try:
mac = self._macs[self.assoc_type]
except KeyError:
raise ValueError(
'Unknown association type: %r' % (self.assoc_type,))
return mac(self.secret, kv)
def getMessageSignature(self, message):
"""Return the signature of a message.
If I am not a sign-all association, the message must have a
signed list.
@return: the signature, base64 encoded
@rtype: bytes
@raises ValueError: If there is no signed list and I am not a sign-all
type of association.
"""
pairs = self._makePairs(message)
return oidutil.toBase64(self.sign(pairs))
def signMessage(self, message):
"""Add a signature (and a signed list) to a message.
@return: a new Message object with a signature
@rtype: L{openid.message.Message}
"""
if (message.hasKey(OPENID_NS, 'sig') or
message.hasKey(OPENID_NS, 'signed')):
raise ValueError('Message already has signed list or signature')
extant_handle = message.getArg(OPENID_NS, 'assoc_handle')
if extant_handle and extant_handle != self.handle:
raise ValueError("Message has a different association handle")
signed_message = message.copy()
signed_message.setArg(OPENID_NS, 'assoc_handle', self.handle)
message_keys = list(signed_message.toPostArgs().keys())
signed_list = [k[7:] for k in message_keys
if k.startswith('openid.')]
signed_list.append('signed')
signed_list.sort()
signed_message.setArg(OPENID_NS, 'signed', ','.join(signed_list))
sig = self.getMessageSignature(signed_message)
signed_message.setArg(OPENID_NS, 'sig', sig)
return signed_message
def checkMessageSignature(self, message):
"""Given a message with a signature, calculate a new signature
and return whether it matches the signature in the message.
@raises ValueError: if the message has no signature or no signature
can be calculated for it.
"""
message_sig = message.getArg(OPENID_NS, 'sig')
if not message_sig:
raise ValueError("%s has no sig." % (message,))
calculated_sig = self.getMessageSignature(message)
# remember, getMessageSignature returns bytes
calculated_sig = calculated_sig.decode('utf-8')
return cryptutil.const_eq(calculated_sig, message_sig)
def _makePairs(self, message):
signed = message.getArg(OPENID_NS, 'signed')
if not signed:
raise ValueError('Message has no signed list: %s' % (message,))
signed_list = signed.split(',')
pairs = []
data = message.toPostArgs()
for field in signed_list:
pairs.append((field, data.get('openid.' + field, '')))
return pairs
def __repr__(self):
return "<%s.%s %s %s>" % (
self.__class__.__module__,
self.__class__.__name__,
self.assoc_type,
self.handle)
| |
import pytest
from unittest import mock
from django.contrib.auth.models import User
from django.forms.models import model_to_dict
from rest_framework.exceptions import ParseError
from awx.main.access import (
BaseAccess,
check_superuser,
JobTemplateAccess,
WorkflowJobTemplateAccess,
SystemJobTemplateAccess,
vars_are_encrypted
)
from awx.main.models import (
Credential,
CredentialType,
Inventory,
Project,
Role,
Organization,
)
@pytest.fixture
def user_unit():
return User(username='rando', password='raginrando', email='rando@redhat.com')
class TestRelatedFieldAccess:
@pytest.fixture
def resource_good(self, mocker):
good_role = mocker.MagicMock(__contains__=lambda self, user: True)
return mocker.MagicMock(related=mocker.MagicMock(admin_role=good_role),
admin_role=good_role)
@pytest.fixture
def resource_bad(self, mocker):
bad_role = mocker.MagicMock(__contains__=lambda self, user: False)
return mocker.MagicMock(related=mocker.MagicMock(admin_role=bad_role),
admin_role=bad_role)
@pytest.fixture
def access(self, user_unit):
return BaseAccess(user_unit)
def test_new_optional_fail(self, access, resource_bad, mocker):
"""
User tries to create a new resource, but lacks permission
to the related resource they provided
"""
data = {'related': resource_bad}
assert not access.check_related('related', mocker.MagicMock, data)
def test_new_with_bad_data(self, access, mocker):
data = {'related': 3.1415}
with pytest.raises(ParseError):
access.check_related('related', mocker.MagicMock(), data)
def test_new_mandatory_fail(self, access, mocker):
access.user.is_superuser = False
assert not access.check_related(
'related', mocker.MagicMock, {}, mandatory=True)
assert not access.check_related(
'related', mocker.MagicMock, {'resource': None}, mandatory=True)
def test_existing_no_op(self, access, resource_bad, mocker):
"""
User edits a resource, but does not change related field
lack of access to related field does not block action
"""
data = {'related': resource_bad.related}
assert access.check_related(
'related', mocker.MagicMock, data, obj=resource_bad)
assert access.check_related(
'related', mocker.MagicMock, {}, obj=resource_bad)
def test_existing_required_access(self, access, resource_bad, mocker):
# no-op actions, but mandatory kwarg requires check to pass
assert not access.check_related(
'related', mocker.MagicMock, {}, obj=resource_bad, mandatory=True)
assert not access.check_related(
'related', mocker.MagicMock, {'related': resource_bad.related},
obj=resource_bad, mandatory=True)
def test_existing_no_access_to_current(
self, access, resource_good, resource_bad, mocker):
"""
User gives a valid related resource (like organization), but does
not have access to _existing_ related resource, so deny action
"""
data = {'related': resource_good}
assert not access.check_related(
'related', mocker.MagicMock, data, obj=resource_bad)
def test_existing_no_access_to_new(
self, access, resource_good, resource_bad, mocker):
data = {'related': resource_bad}
assert not access.check_related(
'related', mocker.MagicMock, data, obj=resource_good)
def test_existing_not_allowed_to_remove(self, access, resource_bad, mocker):
data = {'related': None}
assert not access.check_related(
'related', mocker.MagicMock, data, obj=resource_bad)
def test_existing_not_null_null(self, access, mocker):
resource = mocker.MagicMock(related=None)
data = {'related': None}
# Not changing anything by giving null when it is already-null
# important for PUT requests
assert access.check_related(
'related', mocker.MagicMock, data, obj=resource, mandatory=True)
def test_encrypted_vars_detection():
assert vars_are_encrypted({
'aaa': {'b': 'c'},
'alist': [],
'test_var_eight': '$encrypted$UTF8$AESCBC$Z0FBQUF...==',
'test_var_five': 'four',
})
assert not vars_are_encrypted({
'aaa': {'b': 'c'},
'alist': [],
'test_var_five': 'four',
})
@pytest.fixture
def job_template_with_ids(job_template_factory):
# Create non-persisted objects with IDs to send to job_template_factory
ssh_type = CredentialType(kind='ssh')
credential = Credential(id=1, pk=1, name='testcred', credential_type=ssh_type)
net_type = CredentialType(kind='net')
net_cred = Credential(id=2, pk=2, name='testnetcred', credential_type=net_type)
cloud_type = CredentialType(kind='aws')
cloud_cred = Credential(id=3, pk=3, name='testcloudcred', credential_type=cloud_type)
inv = Inventory(id=11, pk=11, name='testinv')
proj = Project(id=14, pk=14, name='testproj')
jt_objects = job_template_factory(
'testJT', project=proj, inventory=inv, credential=credential,
cloud_credential=cloud_cred, network_credential=net_cred,
persisted=False)
jt = jt_objects.job_template
jt.organization = Organization(id=1, pk=1, name='fooOrg')
return jt
def test_superuser(mocker):
user = mocker.MagicMock(spec=User, id=1, is_superuser=True)
access = BaseAccess(user)
can_add = check_superuser(BaseAccess.can_add)
assert can_add(access, None) is True
def test_not_superuser(mocker):
user = mocker.MagicMock(spec=User, id=1, is_superuser=False)
access = BaseAccess(user)
can_add = check_superuser(BaseAccess.can_add)
assert can_add(access, None) is False
def test_jt_existing_values_are_nonsensitive(job_template_with_ids, user_unit):
"""Assure that permission checks are not required if submitted data is
identical to what the job template already has."""
data = model_to_dict(job_template_with_ids, exclude=['unifiedjobtemplate_ptr'])
access = JobTemplateAccess(user_unit)
assert access.changes_are_non_sensitive(job_template_with_ids, data)
def test_change_jt_sensitive_data(job_template_with_ids, mocker, user_unit):
"""Assure that can_add is called with all ForeignKeys."""
class RoleReturnsTrue(Role):
class Meta:
proxy = True
def __contains__(self, accessor):
return True
job_template_with_ids.admin_role = RoleReturnsTrue()
job_template_with_ids.organization.job_template_admin_role = RoleReturnsTrue()
inv2 = Inventory()
inv2.use_role = RoleReturnsTrue()
data = {'inventory': inv2}
access = JobTemplateAccess(user_unit)
assert not access.changes_are_non_sensitive(job_template_with_ids, data)
job_template_with_ids.inventory.use_role = RoleReturnsTrue()
job_template_with_ids.project.use_role = RoleReturnsTrue()
assert access.can_change(job_template_with_ids, data)
def mock_raise_none(self, add_host=False, feature=None, check_expiration=True):
return None
def test_jt_can_add_bad_data(user_unit):
"Assure that no server errors are returned if we call JT can_add with bad data"
access = JobTemplateAccess(user_unit)
assert not access.can_add({'asdf': 'asdf'})
class TestWorkflowAccessMethods:
@pytest.fixture
def workflow(self, workflow_job_template_factory):
objects = workflow_job_template_factory('test_workflow', persisted=False)
return objects.workflow_job_template
def test_workflow_can_add(self, workflow, user_unit):
organization = Organization(name='test-org')
workflow.organization = organization
organization.workflow_admin_role = Role()
def mock_get_object(Class, **kwargs):
if Class == Organization:
return organization
else:
raise Exception('Item requested has not been mocked')
access = WorkflowJobTemplateAccess(user_unit)
with mock.patch('awx.main.models.rbac.Role.__contains__', return_value=True):
with mock.patch('awx.main.access.get_object_or_400', mock_get_object):
assert access.can_add({'organization': 1})
def test_user_capabilities_method():
"""Unit test to verify that the user_capabilities method will defer
to the appropriate sub-class methods of the access classes.
Note that normal output is True/False, but a string is returned
in these tests to establish uniqueness.
"""
class FooAccess(BaseAccess):
def can_change(self, obj, data):
return 'bar'
def can_copy(self, obj):
return 'foo'
user = User(username='auser')
foo_access = FooAccess(user)
foo = object()
foo_capabilities = foo_access.get_user_capabilities(foo, ['edit', 'copy'])
assert foo_capabilities == {
'edit': 'bar',
'copy': 'foo'
}
def test_system_job_template_can_start(mocker):
user = mocker.MagicMock(spec=User, id=1, is_system_auditor=True, is_superuser=False)
assert user.is_system_auditor
access = SystemJobTemplateAccess(user)
assert not access.can_start(None)
user.is_superuser = True
access = SystemJobTemplateAccess(user)
assert access.can_start(None)
| |
"""Manage (save/check) task dependency-on-files data."""
import os
import hashlib
import subprocess
import inspect
from collections import defaultdict
from dbm import dumb
import dbm as ddbm
# uncomment imports below to run tests on all dbm backends...
#import dumbdbm as ddbm
#import dbm as ddbm
#import gdbm as ddbm
# note: to check which DBM backend is being used (in py2):
# >>> anydbm._defaultmod
import json
class DatabaseException(Exception):
"""Exception class for whatever backend exception"""
pass
def get_md5(input_data):
"""return md5 from string or unicode"""
byte_data = input_data.encode("utf-8")
return hashlib.md5(byte_data).hexdigest()
def get_file_md5(path):
"""Calculate the md5 sum from file content.
@param path: (string) file path
@return: (string) md5
"""
with open(path, 'rb') as file_data:
md5 = hashlib.md5()
block_size = 128 * md5.block_size
while True:
data = file_data.read(block_size)
if not data:
break
md5.update(data)
return md5.hexdigest()
class JsonDB(object):
"""Backend using a single text file with JSON content"""
def __init__(self, name):
"""Open/create a DB file"""
self.name = name
if not os.path.exists(self.name):
self._db = {}
else:
self._db = self._load()
def _load(self):
"""load db content from file"""
db_file = open(self.name, 'r')
try:
try:
return json.load(db_file)
except ValueError as error:
# file contains corrupted json data
msg = (error.args[0] +
"\nInvalid JSON data in %s\n" %
os.path.abspath(self.name) +
"To fix this problem, you can just remove the " +
"corrupted file, a new one will be generated.\n")
error.args = (msg,)
raise DatabaseException(msg)
finally:
db_file.close()
def dump(self):
"""save DB content in file"""
try:
db_file = open(self.name, 'w')
json.dump(self._db, db_file)
finally:
db_file.close()
def set(self, task_id, dependency, value):
"""Store value in the DB."""
if task_id not in self._db:
self._db[task_id] = {}
self._db[task_id][dependency] = value
def get(self, task_id, dependency):
"""Get value stored in the DB.
@return: (string) or (None) if entry not found
"""
if task_id in self._db:
return self._db[task_id].get(dependency, None)
def in_(self, task_id):
"""@return bool if task_id is in DB"""
return task_id in self._db
def remove(self, task_id):
"""remove saved dependecies from DB for taskId"""
if task_id in self._db:
del self._db[task_id]
def remove_all(self):
"""remove saved dependecies from DB for all tasks"""
self._db = {}
class DbmDB(object):
"""Backend using a DBM file with individual values encoded in JSON
On initialization all items are read from DBM file and loaded on _dbm.
During execution whenever an item is read ('get' method) the json value
is cached on _db. If a item is modified _db is update and the id is added
to the 'dirty' set. Only on 'dump' all dirty items values are encoded
in json into _dbm and the DBM file is saved.
@ivar name: (str) file name/path
@ivar _dbm: (dbm) items with json encoded values
@ivar _db: (dict) items with python-dict as value
@ivar dirty: (set) id of modified tasks
"""
DBM_CONTENT_ERROR_MSG = 'db type could not be determined'
def __init__(self, name):
"""Open/create a DB file"""
self.name = name
try:
self._dbm = ddbm.open(self.name, 'c')
except ddbm.error as exception:
message = str(exception)
if message == self.DBM_CONTENT_ERROR_MSG:
# When a corrupted/old format database is found
# suggest the user to just remove the file
new_message = (
'Dependencies file in %(filename)s seems to use '
'an old format or is corrupted.\n'
'To fix the issue you can just remove the database file(s) '
'and a new one will be generated.'
% {'filename': repr(self.name)})
raise DatabaseException(new_message)
else:
# Re-raise any other exceptions
raise DatabaseException(message)
self._db = {}
self.dirty = set()
def dump(self):
"""save/close DBM file"""
for task_id in self.dirty:
self._dbm[task_id] = json.dumps(self._db[task_id])
self._dbm.close()
def set(self, task_id, dependency, value):
"""Store value in the DB."""
if task_id not in self._db:
self._db[task_id] = {}
self._db[task_id][dependency] = value
self.dirty.add(task_id)
def _in_dbm(self, key):
"""
should be just::
return key in self._dbm
for get()/set() key is convert to bytes but not for 'in'
"""
return key.encode('utf-8') in self._dbm
def get(self, task_id, dependency):
"""Get value stored in the DB.
@return: (string) or (None) if entry not found
"""
# optimization, just try to get it without checking it exists
if task_id in self._db:
return self._db[task_id].get(dependency, None)
else:
try:
task_data = self._dbm[task_id]
except KeyError:
return
self._db[task_id] = json.loads(task_data.decode('utf-8'))
return self._db[task_id].get(dependency, None)
def in_(self, task_id):
"""@return bool if task_id is in DB"""
return self._in_dbm(task_id) or task_id in self.dirty
def remove(self, task_id):
"""remove saved dependecies from DB for taskId"""
if task_id in self._db:
del self._db[task_id]
if self._in_dbm(task_id):
del self._dbm[task_id]
if task_id in self.dirty:
self.dirty.remove(task_id)
def remove_all(self):
"""remove saved dependecies from DB for all tasks"""
self._db = {}
# dumb dbm always opens file in update mode
if isinstance(self._dbm, dumb._Database): # pragma: no cover
self._dbm._index = {}
self._dbm.close()
# gdbm can not be running on 2 instances on same thread
# see https://bitbucket.org/schettino72/doit/issue/16/
del self._dbm
self._dbm = ddbm.open(self.name, 'n')
self.dirty = set()
class SqliteDB(object):
""" sqlite3 json backend """
def __init__(self, name):
self.name = name
self._conn = self._sqlite3(self.name)
self._cache = {}
self._dirty = set()
@staticmethod
def _sqlite3(name):
"""Open/create a sqlite3 DB file"""
# Import sqlite here so it's only imported when required
import sqlite3
def dict_factory(cursor, row):
"""convert row to dict"""
data = {}
for idx, col in enumerate(cursor.description):
data[col[0]] = row[idx]
return data
def converter(data):
return json.loads(data.decode('utf-8'))
sqlite3.register_adapter(list, json.dumps)
sqlite3.register_adapter(dict, json.dumps)
sqlite3.register_converter("json", converter)
conn = sqlite3.connect(
name,
detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES,
isolation_level='DEFERRED')
conn.row_factory = dict_factory
sqlscript = """
create table if not exists doit (
task_id text not null primary key,
task_data json
);"""
try:
conn.execute(sqlscript)
except sqlite3.DatabaseError as exception:
new_message = (
'Dependencies file in %(filename)s seems to use '
'an bad format or is corrupted.\n'
'To fix the issue you can just remove the database file(s) '
'and a new one will be generated.'
'Original error: %(msg)s'
% {'filename': repr(name), 'msg': str(exception)})
raise DatabaseException(new_message)
return conn
def get(self, task_id, dependency):
"""Get value stored in the DB.
@return: (string) or (None) if entry not found
"""
if task_id in self._cache:
return self._cache[task_id].get(dependency, None)
else:
data = self._cache[task_id] = self._get_task_data(task_id)
return data.get(dependency, None)
def _get_task_data(self, task_id):
data = self._conn.execute('select task_data from doit where task_id=?',
(task_id,)).fetchone()
return data['task_data'] if data else {}
def set(self, task_id, dependency, value):
"""Store value in the DB."""
if task_id not in self._cache:
self._cache[task_id] = {}
self._cache[task_id][dependency] = value
self._dirty.add(task_id)
def in_(self, task_id):
if task_id in self._cache:
return True
if self._conn.execute('select task_id from doit where task_id=?',
(task_id,)).fetchone():
return True
return False
def dump(self):
"""save/close sqlite3 DB file"""
for task_id in self._dirty:
self._conn.execute('insert or replace into doit values (?,?)',
(task_id, json.dumps(self._cache[task_id])))
self._conn.commit()
self._conn.close()
self._dirty = set()
def remove(self, task_id):
"""remove saved dependecies from DB for taskId"""
if task_id in self._cache:
del self._cache[task_id]
if task_id in self._dirty:
self._dirty.remove(task_id)
self._conn.execute('delete from doit where task_id=?', (task_id,))
def remove_all(self):
"""remove saved dependecies from DB for all task"""
self._conn.execute('delete from doit')
self._cache = {}
self._dirty = set()
class FileChangedChecker(object):
"""Base checker for dependencies, must be inherited."""
def check_modified(self, file_path, file_stat, state):
"""Check if file in file_path is modified from previous "state".
@param file_path (string): file path
@param file_stat: result of os.stat() of file_path
@param state: state that was previously saved with ``get_state()``
@returns (bool): True if dep is modified
"""
raise NotImplementedError()
def get_state(self, dep, current_state):
"""Compute the state of a task after it has been successfuly executed.
@param dep (str): path of the dependency file.
@param current_state (tuple): the current state, saved from a previous
execution of the task (None if the task was never run).
@returns: the new state. Return None if the state is unchanged.
The parameter `current_state` is passed to allow speed optimization,
see MD5Checker.get_state().
"""
raise NotImplementedError()
class MD5Checker(FileChangedChecker):
"""MD5 checker, uses the md5sum.
This is the default checker used by doit.
As an optimization the check uses (timestamp, file-size, md5).
If the timestamp is the same it considers that the file has the same
content. If file size is different its content certainly is modified.
Finally the md5 is used for a different timestamp with the same size.
"""
def check_modified(self, file_path, file_stat, state):
"""Check if file in file_path is modified from previous "state".
"""
timestamp, size, file_md5 = state
# 1 - if timestamp is not modified file is the same
if file_stat.st_mtime == timestamp:
return False
# 2 - if size is different file is modified
if file_stat.st_size != size:
return True
# 3 - check md5
return file_md5 != get_file_md5(file_path)
def get_state(self, dep, current_state):
timestamp = os.path.getmtime(dep)
# time optimization. if dep is already saved with current
# timestamp skip calculating md5
if current_state and current_state[0] == timestamp:
return
size = os.path.getsize(dep)
md5 = get_file_md5(dep)
return timestamp, size, md5
class TimestampChecker(FileChangedChecker):
"""Checker that use only the timestamp."""
def check_modified(self, file_path, file_stat, state):
return file_stat.st_mtime != state
def get_state(self, dep, current_state):
"""@returns float: mtime for file `dep`"""
return os.path.getmtime(dep)
# name of checkers class available
CHECKERS = {'md5': MD5Checker,
'timestamp': TimestampChecker}
class DependencyStatus(object):
"""Result object for Dependency.get_status.
@ivar status: (str) one of "run", "up-to-date" or "error"
"""
def __init__(self, get_log):
self.get_log = get_log
self.status = 'up-to-date'
# save reason task is not up-to-date
self.reasons = defaultdict(list)
self.error_reason = None
def add_reason(self, reason, arg, status='run'):
"""sets state and append reason for not being up-to-date
:return boolean: processing should be interrupted
"""
self.status = status
if self.get_log:
self.reasons[reason].append(arg)
return not self.get_log
def set_reason(self, reason, arg):
"""sets state and reason for not being up-to-date
:return boolean: processing should be interrupted
"""
self.status = 'run'
if self.get_log:
self.reasons[reason] = arg
return not self.get_log
def get_error_message(self):
'''return str with error message'''
return self.error_reason
class Dependency(object):
"""Manage tasks dependencies
Each dependency is saved in "db". the "db" can have json or dbm
format where there is a dictionary for every task. each task has a
dictionary where key is a dependency (abs file path), and the value is the
dependency signature.
Apart from dependencies other values are also saved on the task dictionary
* 'result:', 'task:<task-name>', 'ignore:'
* user(task) defined values are defined in '_values_:' sub-dict
@ivar name: (string) filepath of the DB file
@ivar _closed: (bool) DB was flushed to file
"""
def __init__(self, db_class, backend_name, checker_cls=MD5Checker):
self._closed = False
self.checker = checker_cls()
self.db_class = db_class
self.backend = db_class(backend_name)
self._set = self.backend.set
self._get = self.backend.get
self.remove = self.backend.remove
self.remove_all = self.backend.remove_all
self._in = self.backend.in_
self.name = self.backend.name
def close(self):
"""Write DB in file"""
if not self._closed:
self.backend.dump()
self._closed = True
####### task specific
def save_success(self, task, result_hash=None):
"""save info after a task is successfuly executed
:param result_hash: (str) explicitly set result_hash
"""
# save task values
self._set(task.name, "_values_:", task.values)
# save task result md5
if result_hash is not None:
self._set(task.name, "result:", result_hash)
elif task.result:
if isinstance(task.result, dict):
self._set(task.name, "result:", task.result)
else:
self._set(task.name, "result:", get_md5(task.result))
# file-dep
self._set(task.name, 'checker:', self.checker.__class__.__name__)
for dep in task.file_dep:
state = self.checker.get_state(dep, self._get(task.name, dep))
if state is not None:
self._set(task.name, dep, state)
# save list of file_deps
self._set(task.name, 'deps:', tuple(task.file_dep))
def get_values(self, task_name):
"""get all saved values from a task
@return dict
"""
values = self._get(task_name, '_values_:')
return values or {}
def get_value(self, task_id, key_name):
"""get saved value from task
@param task_id (str)
@param key_name (str): key result dict of the value
"""
if not self._in(task_id):
# FIXME do not use generic exception
raise Exception("taskid '%s' has no computed value!" % task_id)
values = self.get_values(task_id)
if key_name not in values:
msg = "Invalid arg name. Task '%s' has no value for '%s'."
raise Exception(msg % (task_id, key_name))
return values[key_name]
def get_result(self, task_name):
"""get the result saved from a task
@return dict or md5sum
"""
return self._get(task_name, 'result:')
def remove_success(self, task):
"""remove saved info from task"""
self.remove(task.name)
def ignore(self, task):
"""mark task to be ignored"""
self._set(task.name, 'ignore:', '1')
def status_is_ignore(self, task):
"""check if task is marked to be ignored"""
return self._get(task.name, "ignore:")
def get_status(self, task, tasks_dict, get_log=False):
"""Check if task is up to date. set task.dep_changed
If the checker class changed since the previous run, the task is
deleted, to be sure that its state is not re-used.
@param task: (Task)
@param tasks_dict: (dict: Task) passed to objects used on uptodate
@param get_log: (bool) if True, adds all reasons to the return
object why this file will be rebuild.
@return: (DependencyStatus) a status object with possible status
values up-to-date, run or error
task.dep_changed (list-strings): file-dependencies that are not
up-to-date if task not up-to-date because of a target, returned value
will contain all file-dependencies reagrdless they are up-to-date
or not.
"""
result = DependencyStatus(get_log)
task.dep_changed = []
# check uptodate bool/callables
uptodate_result_list = []
for utd, utd_args, utd_kwargs in task.uptodate:
# if parameter is a callable
if hasattr(utd, '__call__'):
# FIXME control verbosity, check error messages
# 1) setup object with global info all tasks
if isinstance(utd, UptodateCalculator):
utd.setup(self, tasks_dict)
# 2) add magic positional args for `task` and `values`
# if present.
spec_args = list(inspect.signature(utd).parameters.keys())
magic_args = []
for i, name in enumerate(spec_args):
if i == 0 and name == 'task':
magic_args.append(task)
elif i == 1 and name == 'values':
magic_args.append(self.get_values(task.name))
args = magic_args + utd_args
# 3) call it and get result
uptodate_result = utd(*args, **utd_kwargs)
elif isinstance(utd, str):
# TODO py3.3 has subprocess.DEVNULL
with open(os.devnull, 'wb') as null:
uptodate_result = subprocess.call(
utd, shell=True, stderr=null, stdout=null) == 0
# parameter is a value
else:
uptodate_result = utd
# None means uptodate was not really calculated and should be
# just ignored
if uptodate_result is None:
continue
uptodate_result_list.append(uptodate_result)
if not uptodate_result:
result.add_reason('uptodate_false', (utd, utd_args, utd_kwargs))
# any uptodate check is false
if not get_log and result.status == 'run':
return result
# no dependencies means it is never up to date.
if not (task.file_dep or uptodate_result_list):
if result.set_reason('has_no_dependencies', True):
return result
# if target file is not there, task is not up to date
for targ in task.targets:
if not os.path.exists(targ):
task.dep_changed = list(task.file_dep)
if result.add_reason('missing_target', targ):
return result
# check for modified file_dep checker
previous = self._get(task.name, 'checker:')
checker_name = self.checker.__class__.__name__
if previous and previous != checker_name:
task.dep_changed = list(task.file_dep)
# remove all saved values otherwise they might be re-used by
# some optmization on MD5Checker.get_state()
self.remove(task.name)
if result.set_reason('checker_changed', (previous, checker_name)):
return result
# check for modified file_dep
previous = self._get(task.name, 'deps:')
previous_set = set(previous) if previous else None
if previous_set and previous_set != task.file_dep:
if get_log:
added_files = sorted(list(task.file_dep - previous_set))
removed_files = sorted(list(previous_set - task.file_dep))
result.set_reason('added_file_dep', added_files)
result.set_reason('removed_file_dep', removed_files)
result.status = 'run'
# list of file_dep that changed
check_modified = self.checker.check_modified
changed = []
for dep in task.file_dep:
state = self._get(task.name, dep)
try:
file_stat = os.stat(dep)
except OSError:
error_msg = "Dependent file '{}' does not exist.".format(dep)
result.error_reason = error_msg.format(dep)
if result.add_reason('missing_file_dep', dep, 'error'):
return result
else:
if state is None or check_modified(dep, file_stat, state):
changed.append(dep)
task.dep_changed = changed
if len(changed) > 0:
result.set_reason('changed_file_dep', changed)
return result
#############
class UptodateCalculator(object):
"""Base class for 'uptodate' that need access to all tasks
"""
def __init__(self):
self.get_val = None # Dependency._get
self.tasks_dict = None # dict with all tasks
def setup(self, dep_manager, tasks_dict):
"""@param"""
self.get_val = dep_manager._get
self.tasks_dict = tasks_dict
| |
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Python provides the base64 module as a core module but this is mostly
limited to encoding and decoding base64 and it's variants. It is often
useful to be able to perform other operations on base64 text. This
module is meant to be used in conjunction with the core base64 module.
Standardized base64 is defined in
RFC-4648 "The Base16, Base32, and Base64 Data Encodings".
This module provides the following base64 utility functionality:
* tests if text is valid base64
* filter formatting from base64
* convert base64 between different alphabets
* Handle padding issues
- test if base64 is padded
- removes padding
- restores padding
* wraps base64 text into formatted blocks
- via iterator
- return formatted string
"""
import re
import string
import six
from six.moves import urllib
from keystone.i18n import _
class InvalidBase64Error(ValueError):
pass
base64_alphabet_re = re.compile(r'^[^A-Za-z0-9+/=]+$')
base64url_alphabet_re = re.compile(r'^[^A-Za-z0-9---_=]+$')
base64_non_alphabet_re = re.compile(r'[^A-Za-z0-9+/=]+')
base64url_non_alphabet_re = re.compile(r'[^A-Za-z0-9---_=]+')
_strip_formatting_re = re.compile(r'\s+')
_base64_to_base64url_trans = string.maketrans('+/', '-_')
_base64url_to_base64_trans = string.maketrans('-_', '+/')
def is_valid_base64(text):
"""Test if input text can be base64 decoded.
:param text: input base64 text
:type text: string
:returns: bool -- True if text can be decoded as base64, False otherwise
"""
text = filter_formatting(text)
if base64_non_alphabet_re.search(text):
return False
try:
return base64_is_padded(text)
except InvalidBase64Error:
return False
def is_valid_base64url(text):
"""Test if input text can be base64url decoded.
:param text: input base64 text
:type text: string
:returns: bool -- True if text can be decoded as base64url,
False otherwise
"""
text = filter_formatting(text)
if base64url_non_alphabet_re.search(text):
return False
try:
return base64_is_padded(text)
except InvalidBase64Error:
return False
def filter_formatting(text):
"""Return base64 text without any formatting, just the base64.
Base64 text is often formatted with whitespace, line endings,
etc. This function strips out any formatting, the result will
contain only base64 characters.
Note, this function does not filter out all non-base64 alphabet
characters, it only removes characters used for formatting.
:param text: input text to filter
:type text: string
:returns: string -- filtered text without formatting
"""
return _strip_formatting_re.sub('', text)
def base64_to_base64url(text):
"""Convert base64 text to base64url text.
base64url text is designed to be safe for use in file names and
URL's. It is defined in RFC-4648 Section 5.
base64url differs from base64 in the last two alphabet characters
at index 62 and 63, these are sometimes referred as the
altchars. The '+' character at index 62 is replaced by '-'
(hyphen) and the '/' character at index 63 is replaced by '_'
(underscore).
This function only translates the altchars, non-alphabet
characters are not filtered out.
WARNING::
base64url continues to use the '=' pad character which is NOT URL
safe. RFC-4648 suggests two alternate methods to deal with this:
percent-encode
percent-encode the pad character (e.g. '=' becomes
'%3D'). This makes the base64url text fully safe. But
percent-encoding has the downside of requiring
percent-decoding prior to feeding the base64url text into a
base64url decoder since most base64url decoders do not
recognize %3D as a pad character and most decoders require
correct padding.
no-padding
padding is not strictly necessary to decode base64 or
base64url text, the pad can be computed from the input text
length. However many decoders demand padding and will consider
non-padded text to be malformed. If one wants to omit the
trailing pad character(s) for use in URL's it can be added back
using the base64_assure_padding() function.
This function makes no decisions about which padding methodology to
use. One can either call base64_strip_padding() to remove any pad
characters (restoring later with base64_assure_padding()) or call
base64url_percent_encode() to percent-encode the pad characters.
:param text: input base64 text
:type text: string
:returns: string -- base64url text
"""
return text.translate(_base64_to_base64url_trans)
def base64url_to_base64(text):
"""Convert base64url text to base64 text.
See base64_to_base64url() for a description of base64url text and
it's issues.
This function does NOT handle percent-encoded pad characters, they
will be left intact. If the input base64url text is
percent-encoded you should call
:param text: text in base64url alphabet
:type text: string
:returns: string -- text in base64 alphabet
"""
return text.translate(_base64url_to_base64_trans)
def base64_is_padded(text, pad='='):
"""Test if the text is base64 padded.
The input text must be in a base64 alphabet. The pad must be a
single character. If the text has been percent-encoded (e.g. pad
is the string '%3D') you must convert the text back to a base64
alphabet (e.g. if percent-encoded use the function
base64url_percent_decode()).
:param text: text containing ONLY characters in a base64 alphabet
:type text: string
:param pad: pad character (must be single character) (default: '=')
:type pad: string
:returns: bool -- True if padded, False otherwise
:raises: ValueError, InvalidBase64Error
"""
if len(pad) != 1:
raise ValueError(_('pad must be single character'))
text_len = len(text)
if text_len > 0 and text_len % 4 == 0:
pad_index = text.find(pad)
if pad_index >= 0 and pad_index < text_len - 2:
raise InvalidBase64Error(_('text is multiple of 4, '
'but pad "%s" occurs before '
'2nd to last char') % pad)
if pad_index == text_len - 2 and text[-1] != pad:
raise InvalidBase64Error(_('text is multiple of 4, '
'but pad "%s" occurs before '
'non-pad last char') % pad)
return True
if text.find(pad) >= 0:
raise InvalidBase64Error(_('text is not a multiple of 4, '
'but contains pad "%s"') % pad)
return False
def base64url_percent_encode(text):
"""Percent-encode base64url padding.
The input text should only contain base64url alphabet
characters. Any non-base64url alphabet characters will also be
subject to percent-encoding.
:param text: text containing ONLY characters in the base64url alphabet
:type text: string
:returns: string -- percent-encoded base64url text
:raises: InvalidBase64Error
"""
if len(text) % 4 != 0:
raise InvalidBase64Error(_('padded base64url text must be '
'multiple of 4 characters'))
return urllib.parse.quote(text)
def base64url_percent_decode(text):
"""Percent-decode base64url padding.
The input text should only contain base64url alphabet
characters and the percent-encoded pad character. Any other
percent-encoded characters will be subject to percent-decoding.
:param text: base64url alphabet text
:type text: string
:returns: string -- percent-decoded base64url text
"""
decoded_text = urllib.parse.unquote(text)
if len(decoded_text) % 4 != 0:
raise InvalidBase64Error(_('padded base64url text must be '
'multiple of 4 characters'))
return decoded_text
def base64_strip_padding(text, pad='='):
"""Remove padding from input base64 text.
:param text: text containing ONLY characters in a base64 alphabet
:type text: string
:param pad: pad character (must be single character) (default: '=')
:type pad: string
:returns: string -- base64 text without padding
:raises: ValueError
"""
if len(pad) != 1:
raise ValueError(_('pad must be single character'))
# Can't be padded if text is less than 4 characters.
if len(text) < 4:
return text
if text[-1] == pad:
if text[-2] == pad:
return text[0:-2]
else:
return text[0:-1]
else:
return text
def base64_assure_padding(text, pad='='):
"""Assure the input text ends with padding.
Base64 text is normally expected to be a multiple of 4
characters. Each 4 character base64 sequence produces 3 octets of
binary data. If the binary data is not a multiple of 3 the base64
text is padded at the end with a pad character such that it is
always a multiple of 4. Padding is ignored and does not alter the
binary data nor it's length.
In some circumstances it is desirable to omit the padding
character due to transport encoding conflicts. Base64 text can
still be correctly decoded if the length of the base64 text
(consisting only of characters in the desired base64 alphabet) is
known, padding is not absolutely necessary.
Some base64 decoders demand correct padding or one may wish to
format RFC compliant base64, this function performs this action.
Input is assumed to consist only of members of a base64
alphabet (i.e no whitespace). Iteration yields a sequence of lines.
The line does NOT terminate with a line ending.
Use the filter_formatting() function to assure the input text
contains only the members of the alphabet.
If the text ends with the pad it is assumed to already be
padded. Otherwise the binary length is computed from the input
text length and correct number of pad characters are appended.
:param text: text containing ONLY characters in a base64 alphabet
:type text: string
:param pad: pad character (must be single character) (default: '=')
:type pad: string
:returns: string -- input base64 text with padding
:raises: ValueError
"""
if len(pad) != 1:
raise ValueError(_('pad must be single character'))
if text.endswith(pad):
return text
n = len(text) % 4
if n == 0:
return text
n = 4 - n
padding = pad * n
return text + padding
def base64_wrap_iter(text, width=64):
"""Fold text into lines of text with max line length.
Input is assumed to consist only of members of a base64
alphabet (i.e no whitespace). Iteration yields a sequence of lines.
The line does NOT terminate with a line ending.
Use the filter_formatting() function to assure the input text
contains only the members of the alphabet.
:param text: text containing ONLY characters in a base64 alphabet
:type text: string
:param width: number of characters in each wrapped line (default: 64)
:type width: int
:returns: generator -- sequence of lines of base64 text.
"""
text = six.text_type(text)
for x in six.moves.range(0, len(text), width):
yield text[x:x + width]
def base64_wrap(text, width=64):
"""Fold text into lines of text with max line length.
Input is assumed to consist only of members of a base64
alphabet (i.e no whitespace). Fold the text into lines whose
line length is width chars long, terminate each line with line
ending (default is '\\n'). Return the wrapped text as a single
string.
Use the filter_formatting() function to assure the input text
contains only the members of the alphabet.
:param text: text containing ONLY characters in a base64 alphabet
:type text: string
:param width: number of characters in each wrapped line (default: 64)
:type width: int
:returns: string -- wrapped text.
"""
buf = six.StringIO()
for line in base64_wrap_iter(text, width):
buf.write(line)
buf.write(u'\n')
text = buf.getvalue()
buf.close()
return text
| |
# Copyright (c) 2012 OpenStack Foundation.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import mock
import webob.exc
from neutron.api.v2 import attributes as attr
from neutron.common import constants as const
from neutron.common import exceptions as n_exc
from neutron import context
from neutron.db import db_base_plugin_v2
from neutron.db import securitygroups_db
from neutron.extensions import securitygroup as ext_sg
from neutron.tests import base
from neutron.tests.unit import test_db_plugin
DB_PLUGIN_KLASS = ('neutron.tests.unit.test_extension_security_group.'
'SecurityGroupTestPlugin')
class SecurityGroupTestExtensionManager(object):
def get_resources(self):
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attr.RESOURCE_ATTRIBUTE_MAP.update(
ext_sg.RESOURCE_ATTRIBUTE_MAP)
return ext_sg.Securitygroup.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class SecurityGroupsTestCase(test_db_plugin.NeutronDbPluginV2TestCase):
def _create_security_group(self, fmt, name, description, **kwargs):
data = {'security_group': {'name': name,
'tenant_id': kwargs.get('tenant_id',
'test-tenant'),
'description': description}}
security_group_req = self.new_create_request('security-groups', data,
fmt)
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
# create a specific auth context for this request
security_group_req.environ['neutron.context'] = (
context.Context('', kwargs['tenant_id']))
return security_group_req.get_response(self.ext_api)
def _build_security_group_rule(self, security_group_id, direction, proto,
port_range_min=None, port_range_max=None,
remote_ip_prefix=None, remote_group_id=None,
tenant_id='test-tenant',
ethertype=const.IPv4):
data = {'security_group_rule': {'security_group_id': security_group_id,
'direction': direction,
'protocol': proto,
'ethertype': ethertype,
'tenant_id': tenant_id,
'ethertype': ethertype}}
if port_range_min:
data['security_group_rule']['port_range_min'] = port_range_min
if port_range_max:
data['security_group_rule']['port_range_max'] = port_range_max
if remote_ip_prefix:
data['security_group_rule']['remote_ip_prefix'] = remote_ip_prefix
if remote_group_id:
data['security_group_rule']['remote_group_id'] = remote_group_id
return data
def _create_security_group_rule(self, fmt, rules, **kwargs):
security_group_rule_req = self.new_create_request(
'security-group-rules', rules, fmt)
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
# create a specific auth context for this request
security_group_rule_req.environ['neutron.context'] = (
context.Context('', kwargs['tenant_id']))
return security_group_rule_req.get_response(self.ext_api)
def _make_security_group(self, fmt, name, description, **kwargs):
res = self._create_security_group(fmt, name, description, **kwargs)
if res.status_int >= webob.exc.HTTPBadRequest.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
def _make_security_group_rule(self, fmt, rules, **kwargs):
res = self._create_security_group_rule(self.fmt, rules)
if res.status_int >= webob.exc.HTTPBadRequest.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
@contextlib.contextmanager
def security_group(self, name='webservers', description='webservers',
fmt=None, do_delete=True):
if not fmt:
fmt = self.fmt
security_group = self._make_security_group(fmt, name, description)
yield security_group
if do_delete:
self._delete('security-groups',
security_group['security_group']['id'])
@contextlib.contextmanager
def security_group_rule(self, security_group_id='4cd70774-cc67-4a87-9b39-7'
'd1db38eb087',
direction='ingress', protocol=const.PROTO_NAME_TCP,
port_range_min='22', port_range_max='22',
remote_ip_prefix=None, remote_group_id=None,
fmt=None, do_delete=True, ethertype=const.IPv4):
if not fmt:
fmt = self.fmt
rule = self._build_security_group_rule(security_group_id,
direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix,
remote_group_id,
ethertype=ethertype)
security_group_rule = self._make_security_group_rule(self.fmt, rule)
yield security_group_rule
if do_delete:
self._delete('security-group-rules',
security_group_rule['security_group_rule']['id'])
def _delete_default_security_group_egress_rules(self, security_group_id):
"""Deletes default egress rules given a security group ID."""
res = self._list(
'security-group-rules',
query_params='security_group_id=%s' % security_group_id)
for r in res['security_group_rules']:
if (r['direction'] == 'egress' and not r['port_range_max'] and
not r['port_range_min'] and not r['protocol']
and not r['remote_ip_prefix']):
self._delete('security-group-rules', r['id'])
def _assert_sg_rule_has_kvs(self, security_group_rule, expected_kvs):
"""Asserts that the sg rule has expected key/value pairs passed
in as expected_kvs dictionary
"""
for k, v in expected_kvs.iteritems():
self.assertEqual(security_group_rule[k], v)
class SecurityGroupsTestCaseXML(SecurityGroupsTestCase):
fmt = 'xml'
class SecurityGroupTestPlugin(db_base_plugin_v2.NeutronDbPluginV2,
securitygroups_db.SecurityGroupDbMixin):
"""Test plugin that implements necessary calls on create/delete port for
associating ports with security groups.
"""
__native_pagination_support = True
__native_sorting_support = True
supported_extension_aliases = ["security-group"]
def create_port(self, context, port):
tenant_id = self._get_tenant_id_for_create(context, port['port'])
default_sg = self._ensure_default_security_group(context, tenant_id)
if not attr.is_attr_set(port['port'].get(ext_sg.SECURITYGROUPS)):
port['port'][ext_sg.SECURITYGROUPS] = [default_sg]
session = context.session
with session.begin(subtransactions=True):
sgids = self._get_security_groups_on_port(context, port)
port = super(SecurityGroupTestPlugin, self).create_port(context,
port)
self._process_port_create_security_group(context, port,
sgids)
return port
def update_port(self, context, id, port):
session = context.session
with session.begin(subtransactions=True):
if ext_sg.SECURITYGROUPS in port['port']:
port['port'][ext_sg.SECURITYGROUPS] = (
self._get_security_groups_on_port(context, port))
# delete the port binding and read it with the new rules
self._delete_port_security_group_bindings(context, id)
port['port']['id'] = id
self._process_port_create_security_group(
context, port['port'],
port['port'].get(ext_sg.SECURITYGROUPS))
port = super(SecurityGroupTestPlugin, self).update_port(
context, id, port)
return port
def create_network(self, context, network):
tenant_id = self._get_tenant_id_for_create(context, network['network'])
self._ensure_default_security_group(context, tenant_id)
return super(SecurityGroupTestPlugin, self).create_network(context,
network)
def get_ports(self, context, filters=None, fields=None,
sorts=[], limit=None, marker=None,
page_reverse=False):
neutron_lports = super(SecurityGroupTestPlugin, self).get_ports(
context, filters, sorts=sorts, limit=limit, marker=marker,
page_reverse=page_reverse)
return neutron_lports
class SecurityGroupDBTestCase(SecurityGroupsTestCase):
def setUp(self, plugin=None, ext_mgr=None):
plugin = plugin or DB_PLUGIN_KLASS
ext_mgr = ext_mgr or SecurityGroupTestExtensionManager()
super(SecurityGroupDBTestCase,
self).setUp(plugin=plugin, ext_mgr=ext_mgr)
class TestSecurityGroups(SecurityGroupDBTestCase):
def test_create_security_group(self):
name = 'webservers'
description = 'my webservers'
keys = [('name', name,), ('description', description)]
with self.security_group(name, description) as security_group:
for k, v, in keys:
self.assertEqual(security_group['security_group'][k], v)
# Verify that default egress rules have been created
sg_rules = security_group['security_group']['security_group_rules']
self.assertEqual(len(sg_rules), 2)
v4_rules = [r for r in sg_rules if r['ethertype'] == const.IPv4]
self.assertEqual(len(v4_rules), 1)
v4_rule = v4_rules[0]
expected = {'direction': 'egress',
'ethertype': const.IPv4,
'remote_group_id': None,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v4_rule, expected)
v6_rules = [r for r in sg_rules if r['ethertype'] == const.IPv6]
self.assertEqual(len(v6_rules), 1)
v6_rule = v6_rules[0]
expected = {'direction': 'egress',
'ethertype': const.IPv6,
'remote_group_id': None,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v6_rule, expected)
def test_update_security_group(self):
with self.security_group() as sg:
data = {'security_group': {'name': 'new_name',
'description': 'new_desc'}}
req = self.new_update_request('security-groups',
data,
sg['security_group']['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
self.assertEqual(res['security_group']['name'],
data['security_group']['name'])
self.assertEqual(res['security_group']['description'],
data['security_group']['description'])
def test_update_security_group_name_to_default_fail(self):
with self.security_group() as sg:
data = {'security_group': {'name': 'default',
'description': 'new_desc'}}
req = self.new_update_request('security-groups',
data,
sg['security_group']['id'])
req.environ['neutron.context'] = context.Context('', 'somebody')
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_update_default_security_group_name_fail(self):
with self.network():
res = self.new_list_request('security-groups')
sg = self.deserialize(self.fmt, res.get_response(self.ext_api))
data = {'security_group': {'name': 'new_name',
'description': 'new_desc'}}
req = self.new_update_request('security-groups',
data,
sg['security_groups'][0]['id'])
req.environ['neutron.context'] = context.Context('', 'somebody')
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
def test_update_default_security_group_with_description(self):
with self.network():
res = self.new_list_request('security-groups')
sg = self.deserialize(self.fmt, res.get_response(self.ext_api))
data = {'security_group': {'description': 'new_desc'}}
req = self.new_update_request('security-groups',
data,
sg['security_groups'][0]['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
self.assertEqual(res['security_group']['description'],
data['security_group']['description'])
def test_default_security_group(self):
with self.network():
res = self.new_list_request('security-groups')
groups = self.deserialize(self.fmt, res.get_response(self.ext_api))
self.assertEqual(len(groups['security_groups']), 1)
def test_create_default_security_group_fail(self):
name = 'default'
description = 'my webservers'
res = self._create_security_group(self.fmt, name, description)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_list_security_groups(self):
with contextlib.nested(self.security_group(name='sg1',
description='sg'),
self.security_group(name='sg2',
description='sg'),
self.security_group(name='sg3',
description='sg')
) as security_groups:
self._test_list_resources('security-group',
security_groups,
query_params='description=sg')
def test_list_security_groups_with_sort(self):
with contextlib.nested(self.security_group(name='sg1',
description='sg'),
self.security_group(name='sg2',
description='sg'),
self.security_group(name='sg3',
description='sg')
) as (sg1, sg2, sg3):
self._test_list_with_sort('security-group',
(sg3, sg2, sg1),
[('name', 'desc')],
query_params='description=sg')
def test_list_security_groups_with_pagination(self):
with contextlib.nested(self.security_group(name='sg1',
description='sg'),
self.security_group(name='sg2',
description='sg'),
self.security_group(name='sg3',
description='sg')
) as (sg1, sg2, sg3):
self._test_list_with_pagination('security-group',
(sg1, sg2, sg3),
('name', 'asc'), 2, 2,
query_params='description=sg')
def test_list_security_groups_with_pagination_reverse(self):
with contextlib.nested(self.security_group(name='sg1',
description='sg'),
self.security_group(name='sg2',
description='sg'),
self.security_group(name='sg3',
description='sg')
) as (sg1, sg2, sg3):
self._test_list_with_pagination_reverse(
'security-group', (sg1, sg2, sg3), ('name', 'asc'), 2, 2,
query_params='description=sg')
def test_create_security_group_rule_ethertype_invalid_as_number(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
ethertype = 2
rule = self._build_security_group_rule(
security_group_id, 'ingress', const.PROTO_NAME_TCP, '22',
'22', None, None, ethertype=ethertype)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_invalid_ip_prefix(self):
name = 'webservers'
description = 'my webservers'
for bad_prefix in ['bad_ip', 256, "2001:db8:a::123/129", '172.30./24']:
with self.security_group(name, description) as sg:
sg_id = sg['security_group']['id']
remote_ip_prefix = bad_prefix
rule = self._build_security_group_rule(
sg_id,
'ingress',
const.PROTO_NAME_TCP,
'22', '22',
remote_ip_prefix)
res = self._create_security_group_rule(self.fmt, rule)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_invalid_ethertype_for_prefix(self):
name = 'webservers'
description = 'my webservers'
test_addr = {'192.168.1.1/24': 'ipv4', '192.168.1.1/24': 'IPv6',
'2001:db8:1234::/48': 'ipv6',
'2001:db8:1234::/48': 'IPv4'}
for prefix, ether in test_addr.iteritems():
with self.security_group(name, description) as sg:
sg_id = sg['security_group']['id']
ethertype = ether
remote_ip_prefix = prefix
rule = self._build_security_group_rule(
sg_id,
'ingress',
const.PROTO_NAME_TCP,
'22', '22',
remote_ip_prefix,
None,
None,
ethertype)
res = self._create_security_group_rule(self.fmt, rule)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_with_unmasked_prefix(self):
name = 'webservers'
description = 'my webservers'
addr = {'10.1.2.3': {'mask': '32', 'ethertype': 'IPv4'},
'fe80::2677:3ff:fe7d:4c': {'mask': '128', 'ethertype': 'IPv6'}}
for ip in addr:
with self.security_group(name, description) as sg:
sg_id = sg['security_group']['id']
ethertype = addr[ip]['ethertype']
remote_ip_prefix = ip
rule = self._build_security_group_rule(
sg_id,
'ingress',
const.PROTO_NAME_TCP,
'22', '22',
remote_ip_prefix,
None,
None,
ethertype)
res = self._create_security_group_rule(self.fmt, rule)
self.assertEqual(res.status_int, 201)
res_sg = self.deserialize(self.fmt, res)
prefix = res_sg['security_group_rule']['remote_ip_prefix']
self.assertEqual(prefix, '%s/%s' % (ip, addr[ip]['mask']))
def test_create_security_group_rule_tcp_protocol_as_number(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
protocol = const.PROTO_NUM_TCP # TCP
rule = self._build_security_group_rule(
security_group_id, 'ingress', protocol, '22', '22')
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
def test_create_security_group_rule_protocol_as_number(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
protocol = 2
rule = self._build_security_group_rule(
security_group_id, 'ingress', protocol)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
def test_create_security_group_rule_case_insensitive(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = 'TCP'
port_range_min = 22
port_range_max = 22
ethertype = 'ipV4'
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix,
ethertype=ethertype) as rule:
# the lower case value will be return
self.assertEqual(rule['security_group_rule']['protocol'],
protocol.lower())
self.assertEqual(rule['security_group_rule']['ethertype'],
const.IPv4)
def test_get_security_group(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
remote_group_id = sg['security_group']['id']
res = self.new_show_request('security-groups', remote_group_id)
security_group_id = sg['security_group']['id']
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = const.PROTO_NAME_TCP
port_range_min = 22
port_range_max = 22
keys = [('remote_ip_prefix', remote_ip_prefix),
('security_group_id', security_group_id),
('direction', direction),
('protocol', protocol),
('port_range_min', port_range_min),
('port_range_max', port_range_max)]
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix):
group = self.deserialize(
self.fmt, res.get_response(self.ext_api))
sg_rule = group['security_group']['security_group_rules']
self.assertEqual(group['security_group']['id'],
remote_group_id)
self.assertEqual(len(sg_rule), 3)
sg_rule = [r for r in sg_rule if r['direction'] == 'ingress']
for k, v, in keys:
self.assertEqual(sg_rule[0][k], v)
def test_delete_security_group(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description, do_delete=False) as sg:
remote_group_id = sg['security_group']['id']
self._delete('security-groups', remote_group_id,
webob.exc.HTTPNoContent.code)
def test_delete_default_security_group_admin(self):
with self.network():
res = self.new_list_request('security-groups')
sg = self.deserialize(self.fmt, res.get_response(self.ext_api))
self._delete('security-groups', sg['security_groups'][0]['id'],
webob.exc.HTTPNoContent.code)
def test_delete_default_security_group_nonadmin(self):
with self.network():
res = self.new_list_request('security-groups')
sg = self.deserialize(self.fmt, res.get_response(self.ext_api))
neutron_context = context.Context('', 'test-tenant')
self._delete('security-groups', sg['security_groups'][0]['id'],
webob.exc.HTTPConflict.code,
neutron_context=neutron_context)
def test_security_group_list_creates_default_security_group(self):
neutron_context = context.Context('', 'test-tenant')
sg = self._list('security-groups',
neutron_context=neutron_context).get('security_groups')
self.assertEqual(len(sg), 1)
def test_security_group_port_create_creates_default_security_group(self):
res = self._create_network(self.fmt, 'net1', True,
tenant_id='not_admin',
set_context=True)
net1 = self.deserialize(self.fmt, res)
res = self._create_port(self.fmt, net1['network']['id'],
tenant_id='not_admin', set_context=True)
sg = self._list('security-groups').get('security_groups')
self.assertEqual(len(sg), 1)
def test_default_security_group_rules(self):
with self.network():
res = self.new_list_request('security-groups')
groups = self.deserialize(self.fmt, res.get_response(self.ext_api))
self.assertEqual(len(groups['security_groups']), 1)
security_group_id = groups['security_groups'][0]['id']
res = self.new_list_request('security-group-rules')
rules = self.deserialize(self.fmt, res.get_response(self.ext_api))
self.assertEqual(len(rules['security_group_rules']), 4)
# Verify default rule for v4 egress
sg_rules = rules['security_group_rules']
rules = [
r for r in sg_rules
if r['direction'] == 'egress' and r['ethertype'] == const.IPv4
]
self.assertEqual(len(rules), 1)
v4_egress = rules[0]
expected = {'direction': 'egress',
'ethertype': const.IPv4,
'remote_group_id': None,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v4_egress, expected)
# Verify default rule for v6 egress
rules = [
r for r in sg_rules
if r['direction'] == 'egress' and r['ethertype'] == const.IPv6
]
self.assertEqual(len(rules), 1)
v6_egress = rules[0]
expected = {'direction': 'egress',
'ethertype': const.IPv6,
'remote_group_id': None,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v6_egress, expected)
# Verify default rule for v4 ingress
rules = [
r for r in sg_rules
if r['direction'] == 'ingress' and r['ethertype'] == const.IPv4
]
self.assertEqual(len(rules), 1)
v4_ingress = rules[0]
expected = {'direction': 'ingress',
'ethertype': const.IPv4,
'remote_group_id': security_group_id,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v4_ingress, expected)
# Verify default rule for v6 ingress
rules = [
r for r in sg_rules
if r['direction'] == 'ingress' and r['ethertype'] == const.IPv6
]
self.assertEqual(len(rules), 1)
v6_ingress = rules[0]
expected = {'direction': 'ingress',
'ethertype': const.IPv6,
'remote_group_id': security_group_id,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v6_ingress, expected)
def test_create_security_group_rule_remote_ip_prefix(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = const.PROTO_NAME_TCP
port_range_min = 22
port_range_max = 22
keys = [('remote_ip_prefix', remote_ip_prefix),
('security_group_id', security_group_id),
('direction', direction),
('protocol', protocol),
('port_range_min', port_range_min),
('port_range_max', port_range_max)]
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix) as rule:
for k, v, in keys:
self.assertEqual(rule['security_group_rule'][k], v)
def test_create_security_group_rule_group_id(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
with self.security_group(name, description) as sg2:
security_group_id = sg['security_group']['id']
direction = "ingress"
remote_group_id = sg2['security_group']['id']
protocol = const.PROTO_NAME_TCP
port_range_min = 22
port_range_max = 22
keys = [('remote_group_id', remote_group_id),
('security_group_id', security_group_id),
('direction', direction),
('protocol', protocol),
('port_range_min', port_range_min),
('port_range_max', port_range_max)]
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_group_id=remote_group_id
) as rule:
for k, v, in keys:
self.assertEqual(rule['security_group_rule'][k], v)
def test_create_security_group_rule_icmp_with_type_and_code(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = const.PROTO_NAME_ICMP
# port_range_min (ICMP type) is greater than port_range_max
# (ICMP code) in order to confirm min <= max port check is
# not called for ICMP.
port_range_min = 8
port_range_max = 5
keys = [('remote_ip_prefix', remote_ip_prefix),
('security_group_id', security_group_id),
('direction', direction),
('protocol', protocol),
('port_range_min', port_range_min),
('port_range_max', port_range_max)]
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix) as rule:
for k, v, in keys:
self.assertEqual(rule['security_group_rule'][k], v)
def test_create_security_group_rule_icmp_with_type_only(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = const.PROTO_NAME_ICMP
# ICMP type
port_range_min = 8
# ICMP code
port_range_max = None
keys = [('remote_ip_prefix', remote_ip_prefix),
('security_group_id', security_group_id),
('direction', direction),
('protocol', protocol),
('port_range_min', port_range_min),
('port_range_max', port_range_max)]
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix) as rule:
for k, v, in keys:
self.assertEqual(rule['security_group_rule'][k], v)
def test_create_security_group_source_group_ip_and_ip_prefix(self):
security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = const.PROTO_NAME_TCP
port_range_min = 22
port_range_max = 22
remote_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087"
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix,
remote_group_id)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_bad_security_group_id(self):
security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = const.PROTO_NAME_TCP
port_range_min = 22
port_range_max = 22
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
def test_create_security_group_rule_bad_tenant(self):
with self.security_group() as sg:
rule = {'security_group_rule':
{'security_group_id': sg['security_group']['id'],
'direction': 'ingress',
'protocol': const.PROTO_NAME_TCP,
'port_range_min': '22',
'port_range_max': '22',
'tenant_id': "bad_tenant"}}
res = self._create_security_group_rule(self.fmt, rule,
tenant_id='bad_tenant',
set_context=True)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
def test_create_security_group_rule_bad_tenant_remote_group_id(self):
with self.security_group() as sg:
res = self._create_security_group(self.fmt, 'webservers',
'webservers',
tenant_id='bad_tenant')
sg2 = self.deserialize(self.fmt, res)
rule = {'security_group_rule':
{'security_group_id': sg2['security_group']['id'],
'direction': 'ingress',
'protocol': const.PROTO_NAME_TCP,
'port_range_min': '22',
'port_range_max': '22',
'tenant_id': 'bad_tenant',
'remote_group_id': sg['security_group']['id']}}
res = self._create_security_group_rule(self.fmt, rule,
tenant_id='bad_tenant',
set_context=True)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
def test_create_security_group_rule_bad_tenant_security_group_rule(self):
with self.security_group() as sg:
res = self._create_security_group(self.fmt, 'webservers',
'webservers',
tenant_id='bad_tenant')
self.deserialize(self.fmt, res)
rule = {'security_group_rule':
{'security_group_id': sg['security_group']['id'],
'direction': 'ingress',
'protocol': const.PROTO_NAME_TCP,
'port_range_min': '22',
'port_range_max': '22',
'tenant_id': 'bad_tenant'}}
res = self._create_security_group_rule(self.fmt, rule,
tenant_id='bad_tenant',
set_context=True)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
def test_create_security_group_rule_bad_remote_group_id(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
remote_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
protocol = const.PROTO_NAME_TCP
port_range_min = 22
port_range_max = 22
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_group_id=remote_group_id)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
def test_create_security_group_rule_duplicate_rules(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '22', '22')
self._create_security_group_rule(self.fmt, rule)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_create_security_group_rule_min_port_greater_max(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
for protocol in [const.PROTO_NAME_TCP, const.PROTO_NAME_UDP,
const.PROTO_NUM_TCP, const.PROTO_NUM_UDP]:
rule = self._build_security_group_rule(
sg['security_group']['id'],
'ingress', protocol, '50', '22')
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int,
webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_ports_but_no_protocol(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress', None, '22', '22')
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_port_range_min_only(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '22', None)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_port_range_max_only(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, None, '22')
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_icmp_type_too_big(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_ICMP, '256', None)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_icmp_code_too_big(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_ICMP, '8', '256')
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_icmp_with_code_only(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_ICMP, None, '2')
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_list_ports_security_group(self):
with self.network() as n:
with self.subnet(n):
self._create_port(self.fmt, n['network']['id'])
req = self.new_list_request('ports')
res = req.get_response(self.api)
ports = self.deserialize(self.fmt, res)
port = ports['ports'][0]
self.assertEqual(len(port[ext_sg.SECURITYGROUPS]), 1)
self._delete('ports', port['id'])
def test_list_security_group_rules(self):
with self.security_group(name='sg') as sg:
security_group_id = sg['security_group']['id']
with contextlib.nested(self.security_group_rule(security_group_id,
direction='egress',
port_range_min=22,
port_range_max=22),
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=23,
port_range_max=23),
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=24,
port_range_max=24)
) as (sgr1, sgr2, sgr3):
# Delete default rules as they would fail the following
# assertion at the end.
self._delete_default_security_group_egress_rules(
security_group_id)
q = 'direction=egress&security_group_id=' + security_group_id
self._test_list_resources('security-group-rule',
[sgr1, sgr2, sgr3],
query_params=q)
def test_list_security_group_rules_with_sort(self):
with self.security_group(name='sg') as sg:
security_group_id = sg['security_group']['id']
with contextlib.nested(self.security_group_rule(security_group_id,
direction='egress',
port_range_min=22,
port_range_max=22),
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=23,
port_range_max=23),
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=24,
port_range_max=24)
) as (sgr1, sgr2, sgr3):
# Delete default rules as they would fail the following
# assertion at the end.
self._delete_default_security_group_egress_rules(
security_group_id)
q = 'direction=egress&security_group_id=' + security_group_id
self._test_list_with_sort('security-group-rule',
(sgr3, sgr2, sgr1),
[('port_range_max', 'desc')],
query_params=q)
def test_list_security_group_rules_with_pagination(self):
with self.security_group(name='sg') as sg:
security_group_id = sg['security_group']['id']
with contextlib.nested(self.security_group_rule(security_group_id,
direction='egress',
port_range_min=22,
port_range_max=22),
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=23,
port_range_max=23),
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=24,
port_range_max=24)
) as (sgr1, sgr2, sgr3):
# Delete default rules as they would fail the following
# assertion at the end.
self._delete_default_security_group_egress_rules(
security_group_id)
q = 'direction=egress&security_group_id=' + security_group_id
self._test_list_with_pagination(
'security-group-rule', (sgr3, sgr2, sgr1),
('port_range_max', 'desc'), 2, 2,
query_params=q)
def test_list_security_group_rules_with_pagination_reverse(self):
with self.security_group(name='sg') as sg:
security_group_id = sg['security_group']['id']
with contextlib.nested(self.security_group_rule(security_group_id,
direction='egress',
port_range_min=22,
port_range_max=22),
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=23,
port_range_max=23),
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=24,
port_range_max=24)
) as (sgr1, sgr2, sgr3):
self._test_list_with_pagination_reverse(
'security-group-rule', (sgr3, sgr2, sgr1),
('port_range_max', 'desc'), 2, 2,
query_params='direction=egress')
def test_update_port_with_security_group(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg:
res = self._create_port(self.fmt, n['network']['id'])
port = self.deserialize(self.fmt, res)
data = {'port': {'fixed_ips': port['port']['fixed_ips'],
'name': port['port']['name'],
ext_sg.SECURITYGROUPS:
[sg['security_group']['id']]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(res['port'][ext_sg.SECURITYGROUPS][0],
sg['security_group']['id'])
# Test update port without security group
data = {'port': {'fixed_ips': port['port']['fixed_ips'],
'name': port['port']['name']}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(res['port'][ext_sg.SECURITYGROUPS][0],
sg['security_group']['id'])
self._delete('ports', port['port']['id'])
def test_update_port_with_multiple_security_groups(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg1:
with self.security_group() as sg2:
res = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg1['security_group']['id'],
sg2['security_group']['id']])
port = self.deserialize(self.fmt, res)
self.assertEqual(len(
port['port'][ext_sg.SECURITYGROUPS]), 2)
self._delete('ports', port['port']['id'])
def test_update_port_remove_security_group_empty_list(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg:
res = self._create_port(self.fmt, n['network']['id'],
security_groups=(
[sg['security_group']['id']]))
port = self.deserialize(self.fmt, res)
data = {'port': {'fixed_ips': port['port']['fixed_ips'],
'name': port['port']['name'],
'security_groups': []}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(res['port'].get(ext_sg.SECURITYGROUPS),
[])
self._delete('ports', port['port']['id'])
def test_update_port_remove_security_group_none(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg:
res = self._create_port(self.fmt, n['network']['id'],
security_groups=(
[sg['security_group']['id']]))
port = self.deserialize(self.fmt, res)
data = {'port': {'fixed_ips': port['port']['fixed_ips'],
'name': port['port']['name'],
'security_groups': None}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(res['port'].get(ext_sg.SECURITYGROUPS),
[])
self._delete('ports', port['port']['id'])
def test_create_port_with_bad_security_group(self):
with self.network() as n:
with self.subnet(n):
res = self._create_port(self.fmt, n['network']['id'],
security_groups=['bad_id'])
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_delete_security_group_port_in_use(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg:
res = self._create_port(self.fmt, n['network']['id'],
security_groups=(
[sg['security_group']['id']]))
port = self.deserialize(self.fmt, res)
self.assertEqual(port['port'][ext_sg.SECURITYGROUPS][0],
sg['security_group']['id'])
# try to delete security group that's in use
res = self._delete('security-groups',
sg['security_group']['id'],
webob.exc.HTTPConflict.code)
# delete the blocking port
self._delete('ports', port['port']['id'])
def test_create_security_group_rule_bulk_native(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk "
"security_group_rule create")
with self.security_group() as sg:
rule1 = self._build_security_group_rule(sg['security_group']['id'],
'ingress',
const.PROTO_NAME_TCP, '22',
'22', '10.0.0.1/24')
rule2 = self._build_security_group_rule(sg['security_group']['id'],
'ingress',
const.PROTO_NAME_TCP, '23',
'23', '10.0.0.1/24')
rules = {'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
def test_create_security_group_rule_bulk_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
with self.security_group() as sg:
rule1 = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24')
rule2 = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '23', '23', '10.0.0.1/24')
rules = {'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]
}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
def test_create_security_group_rule_allow_all_ipv4(self):
with self.security_group() as sg:
rule = {'security_group_id': sg['security_group']['id'],
'direction': 'ingress',
'ethertype': 'IPv4',
'tenant_id': 'test-tenant'}
res = self._create_security_group_rule(
self.fmt, {'security_group_rule': rule})
rule = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
def test_create_security_group_rule_allow_all_ipv4_v6_bulk(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk "
"security_group_rule create")
with self.security_group() as sg:
rule_v4 = {'security_group_id': sg['security_group']['id'],
'direction': 'ingress',
'ethertype': 'IPv4',
'tenant_id': 'test-tenant'}
rule_v6 = {'security_group_id': sg['security_group']['id'],
'direction': 'ingress',
'ethertype': 'IPv6',
'tenant_id': 'test-tenant'}
rules = {'security_group_rules': [rule_v4, rule_v6]}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
def test_create_security_group_rule_duplicate_rule_in_post(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk "
"security_group_rule create")
with self.security_group() as sg:
rule = self._build_security_group_rule(sg['security_group']['id'],
'ingress',
const.PROTO_NAME_TCP, '22',
'22', '10.0.0.1/24')
rules = {'security_group_rules': [rule['security_group_rule'],
rule['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
rule = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_create_security_group_rule_duplicate_rule_in_post_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
with self.security_group() as sg:
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24')
rules = {'security_group_rules': [rule['security_group_rule'],
rule['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
rule = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_create_security_group_rule_duplicate_rule_db(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk "
"security_group_rule create")
with self.security_group() as sg:
rule = self._build_security_group_rule(sg['security_group']['id'],
'ingress',
const.PROTO_NAME_TCP, '22',
'22', '10.0.0.1/24')
rules = {'security_group_rules': [rule]}
self._create_security_group_rule(self.fmt, rules)
res = self._create_security_group_rule(self.fmt, rules)
rule = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_create_security_group_rule_duplicate_rule_db_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
with self.security_group() as sg:
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24')
rules = {'security_group_rules': [rule]}
self._create_security_group_rule(self.fmt, rules)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_create_security_group_rule_different_security_group_ids(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk "
"security_group_rule create")
with self.security_group() as sg1:
with self.security_group() as sg2:
rule1 = self._build_security_group_rule(
sg1['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24')
rule2 = self._build_security_group_rule(
sg2['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '23', '23', '10.0.0.1/24')
rules = {'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]
}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_with_invalid_ethertype(self):
security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = const.PROTO_NAME_TCP
port_range_min = 22
port_range_max = 22
remote_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087"
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix,
remote_group_id,
ethertype='IPv5')
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_with_invalid_protocol(self):
security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = 'tcp/ip'
port_range_min = 22
port_range_max = 22
remote_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087"
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix,
remote_group_id)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_port_with_non_uuid(self):
with self.network() as n:
with self.subnet(n):
res = self._create_port(self.fmt, n['network']['id'],
security_groups=['not_valid'])
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
class TestConvertIPPrefixToCIDR(base.BaseTestCase):
def test_convert_bad_ip_prefix_to_cidr(self):
for val in ['bad_ip', 256, "2001:db8:a::123/129"]:
self.assertRaises(n_exc.InvalidCIDR,
ext_sg.convert_ip_prefix_to_cidr, val)
self.assertIsNone(ext_sg.convert_ip_prefix_to_cidr(None))
def test_convert_ip_prefix_no_netmask_to_cidr(self):
addr = {'10.1.2.3': '32', 'fe80::2677:3ff:fe7d:4c': '128'}
for k, v in addr.iteritems():
self.assertEqual(ext_sg.convert_ip_prefix_to_cidr(k),
'%s/%s' % (k, v))
def test_convert_ip_prefix_with_netmask_to_cidr(self):
addresses = ['10.1.0.0/16', '10.1.2.3/32', '2001:db8:1234::/48']
for addr in addresses:
self.assertEqual(ext_sg.convert_ip_prefix_to_cidr(addr), addr)
class TestConvertProtocol(base.BaseTestCase):
def test_convert_numeric_protocol(self):
self.assertIsInstance(ext_sg.convert_protocol('2'), str)
def test_convert_bad_protocol(self):
for val in ['bad', '256', '-1']:
self.assertRaises(ext_sg.SecurityGroupRuleInvalidProtocol,
ext_sg.convert_protocol, val)
class TestSecurityGroupsXML(TestSecurityGroups):
fmt = 'xml'
| |
"""
Compute the LUQ decomposition of a sparse square matrix.
Based on Pawel Kowal's MatLab code.
Written by: Zachary Ferguson
"""
import numpy
import scipy.sparse
import scipy.sparse.linalg
def luq(A, do_pivot, tol = 1e-8):
"""
PURPOSE: calculates the following decomposition
A = L |Ubar 0 | Q
|0 0 |
where Ubar is a square invertible matrix
and matrices L, Q are invertible.
USAGE: [L,U,Q] = luq(A,do_pivot,tol)
INPUT:
A - a sparse matrix
do_pivot = 1 with column pivoting
= 0 without column pivoting
tol - uses the tolerance tol in separating zero and nonzero values
OUTPUT:
L,U,Q matrices
COMMENTS:
This method is based on lu decomposition,
https://en.wikipedia.org/wiki/LU_decomposition.
Based on LREM_SOLVE:
Copyright (c) Pawel Kowal (2006)
All rights reserved
LREM_SOLVE toolbox is available free for noncommercial academic use only.
pkowal3@sgh.waw.pl
"""
n, m = A.shape
# Test if A is a sparse matrix
# if ~issparse(A)
# A = sparse(A)
# end
###########################################################################
# SPECIAL CASES
###########################################################################
if(n == 0 or m == 0):
# Return (L, U, Q) = (I(nxn), A, I(mxm))
return (scipy.sparse.identity(n), A, scipy.sparse.identity(m))
###########################################################################
# LU DECOMPOSITION
###########################################################################
# Perform a LU decomposition on A.
# Returns a scipy.sparse.linalg.SuperLU
LUDecomp = scipy.sparse.linalg.splu(A)
L = LUDecomp.L
U = LUDecomp.U
P = scipy.sparse.csc_matrix((n, n))
P[numpy.arange(m), LUDecomp.perm_r] = 1
if do_pivot:
Q = scipy.sparse.csc_matrix((m, m))
Q[numpy.arange(m), LUDecomp.perm_c] = 1
Q = Q.T if do_pivot else scipy.sparse.identity(m)
else:
Q = scipy.sparse.identity(m)
# import pdb; pdb.set_trace()
p = n - L.shape[1]
LL = scipy.sparse.csc_matrix((n - p, p))
if(p != 0):
LL = scipy.sparse.vstack([LL, scipy.sparse.identity(p).tocsc()])
L = scipy.sparse.hstack([P.T.dot(L), P[(n - p):n, :].T])
if(p != 0):
U = scipy.sparse.vstack([U, scipy.sparse.csc_matrix((p, m))])
###########################################################################
# FINDS ROWS WITH ZERO AND NONZERO ELEMENTS ON THE DIAGONAL
###########################################################################
if(U.shape[0] == 1 or U.shape[1] == 1):
S = scipy.sparse.csc_matrix(U[0, 0])
else:
S = scipy.sparse.dia_matrix((U.diagonal(), [0]), shape=U.shape)
# I = find(abs(S)>tol)
I = (abs(S) > tol).nonzero()
# Jl = (1:n)'
Jl = numpy.arange(0, n).reshape((1, n)).T
# Jl(I) = []
Jl = numpy.delete(Jl, I[0])
# Jq = (1:m)'
Jq = numpy.arange(0, m).reshape((1, m)).T
# Jq(I) = []
Jq = numpy.delete(Jq, I)
# Ubar1 = U(I,I)
Ubar1 = U[I]
# Ubar2 = U(Jl,Jq)
Ubar2 = U[Jl.flatten(), Jq.flatten()]
# Qbar1 = Q(I,:)
Qbar1 = Q[I[0], :]
# Lbar1 = L(:,I)
Lbar1 = L[:, I[1]]
###########################################################################
# ELIMINATES NONZEZO ELEMENTS BELOW AND ON THE RIGHT OF THE
# INVERTIBLE BLOCK OF THE MATRIX U
#
# UPDATES MATRICES L, Q
###########################################################################
# if ~isempty(I)
import pdb
pdb.set_trace()
if(I[0].shape[0] != 0):
# Utmp = U(I,Jq)
Utmp = U[I[0], Jq]
# X = Ubar1'\U(Jl,I)'
X = scipy.sparse.linalg.spsolve(Ubar1.T, U[Jl, I].T)
# Ubar2 = Ubar2-X'*Utmp
Ubar2 = Ubar2 - X.T.dot(Utmp)
# Lbar1 = Lbar1+L(:,Jl)*X'
Lbar1 = Lbar1 + L[:, Jl].dot(X.T)
# X = Ubar1\Utmp
X = scipy.sparse.linalg.spsolve(Ubar1, Utmp)
# Qbar1 = Qbar1+X*Q(Jq,:)
Qbar1 = Qbar1 + X.dot(Q[Jq, :])
# Utmp = []
Utmp = numpy.empty(1)
# X = []
X = numpy.empty(1)
# end
###########################################################################
# FINDS ROWS AND COLUMNS WITH ONLY ZERO ELEMENTS
###########################################################################
# I2 = find(max(abs(Ubar2),[],2)>tol)
I2 = ((abs(Ubar2)).max(1) > tol).nonzero()
# I5 = find(max(abs(Ubar2),[],1)>tol)
I5 = ((abs(Ubar2)).max(0) > tol).nonzero()
# I3 = Jl(I2)
I3 = Jl[I2]
# I4 = Jq(I5)
I4 = Jq[I5]
# Jq(I5) = []
Jq[I5] = numpy.empty(1)
# Jl(I2) = []
J1[I2] = numpy.empty(1)
# U = []
U = numpy.empty(1)
###########################################################################
# FINDS A PART OF THE MATRIX U WHICH IS NOT IN THE REQIRED FORM
###########################################################################
# A = Ubar2(I2,I5)
A = Ubar[I2, I5]
###########################################################################
# PERFORMS LUQ DECOMPOSITION OF THE MATRIX A
###########################################################################
# [L1,U1,Q1] = luq(A,do_pivot,tol)
L1, U1, Q1 = luq(A, do_pivot, tol)
###########################################################################
# UPDATES MATRICES L, U, Q
###########################################################################
# Lbar2 = L(:,I3)*L1
Lbar2 = L[:, I3].dot(L1)
# Qbar2 = Q1*Q(I4,:)
Qbar2 = Q1.dot(Q[I4, :])
# L = [Lbar1 Lbar2 L(:,Jl)]
L = scipy.sparse.hstack([Lbar1, Lbar2, L[:, Jl]])
# Q = [Qbar1; Qbar2; Q(Jq,:)]
Q = scipy.sparse.vstack([Qbar1, Qbar2, Q[Jq, :]])
# n1 = length(I)
n1 = I.shape[0]
# n2 = length(I3)
n2 = I3.shape[0]
# m2 = length(I4)
m2 = I4.shape[0]
# U = [Ubar1 sparse(n1,m-n1);sparse(n2,n1) U1 sparse(n2,m-n1-m2);
# sparse(n-n1-n2,m)]
U = scipy.sparse.vstack([
scipy.sparse.hstack([Ubar1, scipy.sparse.csc_matrix(
shape = (n1, m - n1))]),
scipy.sparse.hstack([scipy.sparse.csc_matrix(
shape = (n2, n1)), U1, scipy.sparse.csc_matrix(
shape = (n2, m - n1 - m2))]),
scipy.sparse.csc_matrix(n - n1 - n2, m)
])
return L, U, Q
if __name__ == "__main__":
# A = scipy.sparse.csc_matrix(numpy.ones((4, 4)))
A = scipy.sparse.identity(4).tocsc()
L, U, Q = luq(A, True)
print("L:\n%s" % L)
print("U:\n%s" % U)
print("Q:\n%s" % Q)
print("A = L*U*Q:\n%s" % L.dot(U).dot(Q))
| |
#!/usr/bin/python2.4
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Command processor for GRIT. This is the script you invoke to run the various
GRIT tools.
'''
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '..'))
import getopt
from grit import util
import grit.exception
import grit.tool.android2grd
import grit.tool.build
import grit.tool.count
import grit.tool.diff_structures
import grit.tool.menu_from_parts
import grit.tool.newgrd
import grit.tool.resize
import grit.tool.rc2grd
import grit.tool.test
import grit.tool.transl2tc
import grit.tool.unit
# Copyright notice
_COPYRIGHT = '''\
GRIT - the Google Resource and Internationalization Tool
Copyright (c) Google Inc. %d
''' % util.GetCurrentYear()
# Keys for the following map
_CLASS = 1
_REQUIRES_INPUT = 2
_HIDDEN = 3 # optional key - presence indicates tool is hidden
# Maps tool names to the tool's module. Done as a list of (key, value) tuples
# instead of a map to preserve ordering.
_TOOLS = [
['build', { _CLASS : grit.tool.build.RcBuilder, _REQUIRES_INPUT : True }],
['newgrd', { _CLASS : grit.tool.newgrd.NewGrd, _REQUIRES_INPUT : False }],
['rc2grd', { _CLASS : grit.tool.rc2grd.Rc2Grd, _REQUIRES_INPUT : False }],
['transl2tc', { _CLASS : grit.tool.transl2tc.TranslationToTc,
_REQUIRES_INPUT : False }],
['sdiff', { _CLASS : grit.tool.diff_structures.DiffStructures,
_REQUIRES_INPUT : False }],
['resize', {
_CLASS : grit.tool.resize.ResizeDialog, _REQUIRES_INPUT : True }],
['unit', { _CLASS : grit.tool.unit.UnitTestTool, _REQUIRES_INPUT : False }],
['count', { _CLASS : grit.tool.count.CountMessage, _REQUIRES_INPUT : True }],
['test', {
_CLASS: grit.tool.test.TestTool, _REQUIRES_INPUT : True,
_HIDDEN : True }],
['menufromparts', {
_CLASS: grit.tool.menu_from_parts.MenuTranslationsFromParts,
_REQUIRES_INPUT : True, _HIDDEN : True }],
['android2grd', {
_CLASS : grit.tool.android2grd.Android2Grd,
_REQUIRES_INPUT : False }],
]
def PrintUsage():
print _COPYRIGHT
tool_list = ''
for (tool, info) in _TOOLS:
if not _HIDDEN in info.keys():
tool_list += ' %-12s %s\n' % (tool, info[_CLASS]().ShortDescription())
# TODO(joi) Put these back into the usage when appropriate:
#
# -d Work disconnected. This causes GRIT not to attempt connections with
# e.g. Perforce.
#
# -c Use the specified Perforce CLIENT when talking to Perforce.
print '''Usage: grit [GLOBALOPTIONS] TOOL [args to tool]
Global options:
-i INPUT Specifies the INPUT file to use (a .grd file). If this is not
specified, GRIT will look for the environment variable GRIT_INPUT.
If it is not present either, GRIT will try to find an input file
named 'resource.grd' in the current working directory.
-v Print more verbose runtime information.
-x Print extremely verbose runtime information. Implies -v
-p FNAME Specifies that GRIT should profile its execution and output the
results to the file FNAME.
Tools:
TOOL can be one of the following:
%s
For more information on how to use a particular tool, and the specific
arguments you can send to that tool, execute 'grit help TOOL'
''' % (tool_list)
class Options(object):
'''Option storage and parsing.'''
def __init__(self):
self.disconnected = False
self.client = ''
self.input = None
self.verbose = False
self.extra_verbose = False
self.output_stream = sys.stdout
self.profile_dest = None
def ReadOptions(self, args):
'''Reads options from the start of args and returns the remainder.'''
(opts, args) = getopt.getopt(args, 'g:dvxc:i:p:')
for (key, val) in opts:
if key == '-d': self.disconnected = True
elif key == '-c': self.client = val
elif key == '-i': self.input = val
elif key == '-v':
self.verbose = True
util.verbose = True
elif key == '-x':
self.verbose = True
util.verbose = True
self.extra_verbose = True
util.extra_verbose = True
elif key == '-p': self.profile_dest = val
if not self.input:
if 'GRIT_INPUT' in os.environ:
self.input = os.environ['GRIT_INPUT']
else:
self.input = 'resource.grd'
return args
def __repr__(self):
return '(disconnected: %d, verbose: %d, client: %s, input: %s)' % (
self.disconnected, self.verbose, self.client, self.input)
def _GetToolInfo(tool):
'''Returns the info map for the tool named 'tool' or None if there is no
such tool.'''
matches = filter(lambda t: t[0] == tool, _TOOLS)
if not len(matches):
return None
else:
return matches[0][1]
def Main(args):
'''Parses arguments and does the appropriate thing.'''
util.ChangeStdoutEncoding()
if not len(args) or len(args) == 1 and args[0] == 'help':
PrintUsage()
return 0
elif len(args) == 2 and args[0] == 'help':
tool = args[1].lower()
if not _GetToolInfo(tool):
print "No such tool. Try running 'grit help' for a list of tools."
return 2
print ("Help for 'grit %s' (for general help, run 'grit help'):\n"
% (tool))
print _GetToolInfo(tool)[_CLASS].__doc__
return 0
else:
options = Options()
args = options.ReadOptions(args) # args may be shorter after this
if not args:
print "No tool provided. Try running 'grit help' for a list of tools."
return 2
tool = args[0]
if not _GetToolInfo(tool):
print "No such tool. Try running 'grit help' for a list of tools."
return 2
try:
if _GetToolInfo(tool)[_REQUIRES_INPUT]:
os.stat(options.input)
except OSError:
print ('Input file %s not found.\n'
'To specify a different input file:\n'
' 1. Use the GRIT_INPUT environment variable.\n'
' 2. Use the -i command-line option. This overrides '
'GRIT_INPUT.\n'
' 3. Specify neither GRIT_INPUT or -i and GRIT will try to load '
"'resource.grd'\n"
' from the current directory.' % options.input)
return 2
toolobject = _GetToolInfo(tool)[_CLASS]()
if options.profile_dest:
import hotshot
prof = hotshot.Profile(options.profile_dest)
prof.runcall(toolobject.Run, options, args[1:])
else:
toolobject.Run(options, args[1:])
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| |
from __future__ import unicode_literals, print_function
import os
import sys
import glob
import pkg_resources
from itertools import chain
from os.path import abspath, dirname, exists, join
import paver.virtual as virtual
from paver.easy import * # paver docs pretty consistently want you to do this
from paver.path import path # primarily here to support the rmtree method of a path object
__here__ = path(abspath(dirname(__file__)))
PLUGINS_DIR = __here__ / path('plugins')
SIDEBOARD_DIR = __here__ / path('sideboard')
def bootstrap_venv(intended_venv, bootstrap_name=None):
# bootstrap wants options available in options.virtualenv which is a Bunch
if exists(intended_venv):
intended_venv.rmtree()
venv = getattr(options, 'virtualenv', Bunch())
with open(path(dirname(intended_venv)) / path('requirements.txt')) as reqs:
# we expect this to be reversed in setup.py
venv.packages_to_install = [line.strip() for line in reqs.readlines()[::-1] if line.strip()]
venv.dest_dir = intended_venv
if bootstrap_name:
venv.script_name = '{}-bootstrap.py'.format(bootstrap_name)
options.virtualenv = venv
virtual.bootstrap()
if sys.executable:
# if we can figure out the python associated with this paver, execute the bootstrap script
# and we can expect the virtual env will then exist
sh('{python_path} "{script_name}"'.format(python_path=sys.executable,
script_name=venv.script_name))
def guess_plugin_module_name(containing_folder):
"""
given a containing folder, guess what the plugin name should be
:param containing_folder: the folder that possibly contains a plugin
:type containing_folder: unicode
:return:
"""
# TODO: this only works as long as insist that the plugin dir be the module name
return os.path.split(containing_folder)[-1].replace('-', '_')
def collect_plugin_dirs(module=False):
"""
:param module: if True, return the module within a plugin directory, else (default) just return
the plugin directory
:return: the plugin folders in a form that can be iterated over
:rtype: collections.Iterator
"""
for potential_folder in glob.glob(PLUGINS_DIR / path('*')):
if all(exists(join(potential_folder, req_file)) for req_file in ('setup.py', 'requirements.txt')):
if module:
yield join(potential_folder, guess_plugin_module_name(potential_folder))
else:
yield potential_folder
@task
def make_venv():
"""
make a virtualenv for the sideboard project
"""
bootstrap_venv(__here__ / path('env'), 'sideboard')
develop_sideboard()
def install_pip_requirements_in_dir(dir_of_requirements_txt):
path_to_pip = __here__ / path('env/bin/pip')
print("---- installing dependencies in {} ----"
.format(dir_of_requirements_txt))
sh('{pip} install -e {dir_of_requirements_txt}'
.format(
pip=path_to_pip,
dir_of_requirements_txt=dir_of_requirements_txt))
def run_setup_py(path):
venv_python = str(__here__ / 'env' / 'bin' / 'python')
sh('cd {path} && {python_path} {setup_path} develop'
.format(
path=path,
python_path=venv_python if exists(venv_python) else sys.executable,
setup_path=join(path, 'setup.py')))
def develop_sideboard():
run_setup_py(__here__)
@task
def pull_plugins():
"""
invoke git pull from each plug-in directory, your global git either needs to allow this to \
happen auth-free, or you need to enter your credentials each time
"""
for plugin_dir in collect_plugin_dirs():
sh('cd "{}";git pull'.format(plugin_dir))
@task
def assert_all_files_import_unicode_literals():
"""
error if a python file is found in sideboard or plugins that does not import unicode_literals; \
this is skipped for Python 3
"""
if sys.version_info[0] == 2:
all_files_found = []
cmd = ("find '%s' -name '*.py' ! -size 0 "
"-exec grep -RL 'from __future__ import.*unicode_literals.*$' {} \;")
for test_dir in chain(['sideboard'], collect_plugin_dirs(module=True)):
output = sh(cmd % test_dir, capture=True)
if output:
all_files_found.append(output)
if all_files_found:
print('the following files did not include "from __future__ import unicode_literals":')
print(''.join(all_files_found))
raise BuildFailure("there were files that didn't include "
'"from __future__ import unicode_literals"')
@task
def assert_all_projects_correctly_define_a_version():
"""
error if there are plugins where we can't find a version defined
"""
all_files_with_bad_versions = []
# FIXME: should we try to execfile? that's what setup.py is going to do anyway
cmd = (r'grep -xP "__version__\s*=\s*[\'\"][0-9]+\.[0-9]+(\.[0-9]+)?[\'\+]" {0}/_version.py')
for test_dir in chain(['sideboard'], collect_plugin_dirs(module=True)):
try:
sh(cmd.format(test_dir))
except BuildFailure:
all_files_with_bad_versions.append(test_dir)
if all_files_with_bad_versions:
print('the following directories do not include a _version.py file with __version__ '
'specified:')
print('\n'.join(all_files_with_bad_versions))
print('Your plugin should be in agreement with this stack overflow post:')
print('http://stackoverflow.com/questions/458550/'
'standard-way-to-embed-version-into-python-package/7071358#7071358')
raise BuildFailure("there were projects that didn't include correctly specify __version__")
@task
@needs(['assert_all_files_import_unicode_literals',
'assert_all_projects_correctly_define_a_version'])
def run_all_assertions():
"""
run all the assertion tasks that sideboard supports
"""
@task
@cmdopts([
('name=', 'n', 'name of the plugin to create'),
('drop', 'd', 'delete existing plugin if present'),
('no_webapp', 'w', 'do not expose webpages in the plugin'),
('no_sqlalchemy', 'a', 'do not use SQLAlchemy in the plugin'),
('no_service', 'r', 'do not expose a service in the plugin'),
('no_sphinx', 's', 'do not generate Sphinx docs'),
('django=', 'j', 'create a Django project alongside the plugin with this name'),
('cli', 'c', 'make this a cli application; implies -w/-r')
])
def create_plugin(options):
"""create a plugin skeleton to start a new project"""
# this is actually needed thanks to the skeleton using jinja2 (and six, although that's changeable)
try:
pkg_resources.get_distribution("sideboard")
except pkg_resources.DistributionNotFound:
raise BuildFailure("This command must be run from within a configured virtual environment.")
plugin_name = options.create_plugin.name
if getattr(options.create_plugin, 'drop', False) and (PLUGINS_DIR / path(plugin_name.replace('_', '-'))).exists():
# rmtree fails if the dir doesn't exist apparently
(PLUGINS_DIR / path(plugin_name.replace('_', '-'))).rmtree()
kwargs = {}
for opt in ['webapp', 'sqlalchemy', 'service', 'sphinx']:
kwargs[opt] = not getattr(options.create_plugin, 'no_' + opt, False)
kwargs['cli'] = getattr(options.create_plugin, 'cli', False)
kwargs['django'] = getattr(options.create_plugin, 'django', None)
if kwargs['cli']:
kwargs['webapp'] = False
kwargs['service'] = False
from data.paver import skeleton
skeleton.create_plugin(PLUGINS_DIR, plugin_name, **kwargs)
print('{} successfully created'.format(options.create_plugin.name))
@task
def install_deps():
install_pip_requirements_in_dir(__here__)
for pdir in collect_plugin_dirs():
install_pip_requirements_in_dir(pdir)
@task
def clean():
"""
clean all pyc and __pycache__ files
"""
sh("find . -name '*.pyc' | xargs rm -f")
sh("find . -name __pycache__ | xargs rm -fr")
| |
#!/usr/bin/env python
from __future__ import print_function
import logging
import sys
import imp
from plumbum import cli, cmd, local
from plumbum.commands import ProcessExecutionError
import plumbum.path.utils as plumbum_utils
from loslassa import __version__
from loslassa.devserver import serve_with_reloader
from loslassa.utils import (
simple_dbg, find_file, adjust_log,
friendly_exception_handler, LoslassaError)
LOSLASSA = "loslassa"
log = logging.getLogger()
class LoslassaProject(object):
SPHINX_CONFIG = "conf.py"
HERE = local.path(__file__).dirname
PROJECTS = HERE / "projects"
EXAMPLE_PROJECT = PROJECTS / "example"
SKELETON_PROJECT = PROJECTS / "skeleton"
def __init__(self, projectPath):
assert projectPath, "No project path set"
self.projectPath = projectPath
self.inputContainer = local.path(local.cwd / projectPath)
self.projectName = self.inputContainer.basename
self.sphinxConfig = self.inputContainer / self.SPHINX_CONFIG
self.buildPath = local.path(self.inputContainer / "__build")
self.doctreesPath = self.buildPath / "doctrees"
self.outputPath = self.buildPath / "html"
log.info("[PROJECT INFO]: input from %s - html generated in %s" %
(self.inputContainer, self.outputPath))
def __str__(self):
return simple_dbg(self)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.inputContainer)
def create_project(self):
plumbum_utils.copy(self.SKELETON_PROJECT, self.inputContainer)
log.info("created project at %s" % (self.inputContainer))
@property
def isProject(self):
return self.sphinxConfig.exists()
def generate_html(self):
args = ["-v", "-b", "dirhtml", "-d", self.doctreesPath._path,
self.inputContainer._path, self.outputPath._path]
log.debug("sphinx-build %s" % (" ".join(args)))
output = cmd.sphinx_build(*args)
log.info("sphinx output...\n%s" % (output))
def add_new_files(self):
# fixme use GitPorcelainPorcelain
with local.cwd(self.projectPath):
log.info(cmd.git("add", "--all", "."))
def commit_files(self):
# fixme use GitPorcelainPorcelain
with local.cwd(self.projectPath):
log.info(cmd.git("commit", "-m", "new build"))
def commit_all(self):
self.add_new_files()
try:
self.commit_files()
except ProcessExecutionError as e:
if "nothing to commit" not in e.stdout:
raise
def build_generate_only(self):
self.generate_html()
def build_and_autocommit(self):
self.generate_html()
self.commit_all()
class LoslassaConfig(object):
"""Access to Loslassa settings in configuration file"""
def __init__(self, projectPath, configName="conf"):
configPath = local.path(projectPath, configName + ".py")
assert configPath.exists(), configPath
fp, path, suffixes = imp.find_module(configName, [projectPath._path])
try:
self.conf = imp.load_module(configName, fp, path, suffixes)
finally:
fp.close()
self.settings = self.conf.LoslassaSettings
def __str__(self):
return simple_dbg(self)
def __getattr__(self, item):
return getattr(self.settings, item)
class GitPorcelainPorcelain(object):
def __init__(self, projectPath):
self.projectPath = projectPath
self.projectName = projectPath.basename
self.settings = LoslassaConfig(projectPath)
self.gitPath = self.projectPath / ".git"
def create_repo(self):
with local.cwd(self.projectPath):
log.info(cmd.git("init"))
log.info(cmd.git("add", "."))
log.info(cmd.git("commit", "-m", "initial commit"))
def connect_project(self):
"""
http://mikeeverhart.net/git/using-git-to-deploy-code/
local
-----
prepare local project:
cd into projectPath
git remote add www ssh://<sshUser>@<remoteFqdn>/<bareclonepath>
prepare remote bare repo:
git clone --bare <proj> <proj>.git
cd into bare clone repo
$ cat > hooks/post-receive
#!/bin/sh
GIT_WORK_TREE=<remote bare clones path> git checkout -f
chmod +x hooks/post-receive
to remote:
rsync -avx
<proj>.git bestuebe@best-uebersetzungen.de: ./projects/<proj>/
remote
------
git clone ~/projects/<proj>.git ~/www_content/<proj>
ln -s
~/www_content/bilderwerkstatt_ravensburg .de/__build/html
<dir containing web content>
initial push
------------
git push www +master:refs/heads/master
all other pushes
----------------
git push www master
"""
# todo
# todo look into plumbum remote path
# ... or use posixpath
# import posixpath as pp
# fixme just a sketch check these paths - very likely wrong
# self.bareCLoneName = self.projectName + ".git"
# self.bareCLonePath = self.projectPath + ".git"
# remHomePath = "/home/%s" % (self.sshUser)
# remoteBareClonesContainer = pp.join(remHomePath, "projects")
# remoteBareClonePath = pp.join(
# remoteBareClonesContainer, self.bareCLoneName)
# remoteContentsContainer = pp.join(remHomePath, "www_content")
# remoteContentsPath = pp.join(remHomePath, "www_content")
@property
def isRepo(self):
return self.gitPath.exists()
@property
def sshOptions(self):
return (
["-i", self.settings.privateKeyPath,
"%s@%s" % (self.settings.sshUser, self.settings.remoteFqdn)])
@property
def sshUser(self):
return self.settings.sshUser
@property
def remoteFqdn(self):
return self.settings.remoteFqdn
@property
def privateKeyPath(self):
return self.settings.privateKeyPath
@property
def sshIsOk(self):
try:
cmd.ssh(self.sshOptions + ["id"])
return True
except Exception:
log.warning("ssh connection failed", exc_info=True)
return False
class LoslassaCliApplication(cli.Application):
PROGNAME = LOSLASSA
VERSION = __version__
USAGE = LOSLASSA + " [start|play|loslassa] [OPTIONS]"
projectPath = None
logLevel = logging.DEBUG
logFilePath = None
def __str__(self):
return simple_dbg(
self, excludeNames=["parent", "nested_command"])
@cli.autoswitch(str)
def project_name(self, projectName):
"""Set name (can be a relative or absolute path as well"""
self.projectPath = local.path(projectName)
@cli.autoswitch(str)
def verbosity(self, level):
"""Adjust the talkativeness of loslassing activities.
:param str level: log level (one of the accepted logging values)
Levels from very chatty to almost silent: debug, info, warning, error
"""
self.logLevel = level
@cli.autoswitch(str)
def log_to_file(self, filePath):
"""Log to a file instead of the console"""
self.logFilePath = filePath
def _init(self, create=False):
if not self.projectPath:
log.warning("no conf.py here ... searching (press CTRL-C to stop)")
confPath = find_file(local.cwd, LoslassaProject.SPHINX_CONFIG)
self.projectPath = local.path(confPath.dirname)
self.project = LoslassaProject(self.projectPath)
if create:
self.project.create_project()
adjust_log(level=self.logLevel, filePath=self.logFilePath)
log.info("working with project '%s'" % (self.project.projectName))
if log.getEffectiveLevel() > logging.DEBUG:
sys.excepthook = friendly_exception_handler
class Loslassa(LoslassaCliApplication):
def main(self, *args):
log.debug("executing command %s" % str(self.nested_command))
if args:
log.error("unknown command %r" % (args[0]))
return 1
if not self.nested_command:
log.error("Which kind of loslassing? "
"Try %s --help" % (Loslassa.PROGNAME))
return 1
@Loslassa.subcommand("start")
class LoslassaStart(LoslassaCliApplication):
"""Starts a new project by creating the initial project structure"""
def main(self):
log.info("start loslassing ...")
if not self.projectPath:
raise LoslassaError("please provide a name for the project")
if self.projectPath.exists():
raise LoslassaError(
"'%s' already exists (try a different name?)." %
(self.projectPath.basename))
self._init(create=True)
log.info("Created project '%s' at %s" %
(self.project.projectName, self.project.inputContainer))
@Loslassa.subcommand("play")
class LoslassaPlay(LoslassaCliApplication):
"""Start playing with the source and create your page"""
serverPort = 8080
autocommit = True
@cli.autoswitch()
def no_autocommit(self):
"""switch off automatic commits to the repository"""
self.autocommit = False
@cli.autoswitch(int)
def serve_on_port(self, serverPort):
"""Set port manually"""
self.serverPort = serverPort
def _init(self, create=False):
super(LoslassaPlay, self)._init(create)
# fixme reloader reloads main method instead just the server!?
if not self.project.sphinxConfig.exists():
raise LoslassaError(
"no config found at %s" % (self.project.sphinxConfig))
def main(self):
"""Create the project representation and start serving"""
log.info("play loslassing...")
self._init()
buildCommand = (self.project.build_and_autocommit if self.autocommit
else self.project.build_generate_only)
serve_with_reloader(
serveFromPath=self.project.outputPath,
port=self.serverPort,
changedCallback=buildCommand,
pathToWatch=self.project.inputContainer,
pathToIgnore=self.project.buildPath,
# cleanFileNames=["conf", "index"],
cleanFileNames="ALL",
cleanPaths=[self.project.outputPath, self.project.doctreesPath])
@Loslassa.subcommand(LOSLASSA)
class LoslassaLoslassa(LoslassaCliApplication):
"""Practice loslassing by pushing your page into the interwebs"""
def main(self):
self._init(create=True)
# todo make a progress bar consisting of loslassa :)
log.info("loslassa loslassa loslassa ...")
# raise LoslassaError("coming soon...")
def main():
logging.basicConfig(level=logging.INFO, format='%(message)s')
Loslassa.run()
if __name__ == "__main__":
sys.exit(main())
| |
#!/usr/bin/env python
from __future__ import division, print_function
from functools import partial
import os
import sys
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from .features import Features, DEFAULT_VARFRAC
from .utils import positive_int, portion, nonnegative_float, confirm_outfile
_do_nothing = lambda *a, **k: None
################################################################################
### Handling blank descriptors
DEFAULT_BLANK_HANDLER = 'fill'
DEFAULT_BLANK_THRESH = 1000
class FillBlanks(BaseEstimator, TransformerMixin):
'''
Fills in any almost-blank SIFT descriptors with a random one.
copy: if False, change the input in-place. By default, operates on a copy.
blank_thresh: the threshold for considering a descriptor to be blank.
Any descriptors whose sum is less than this threshold are replaced.
'''
def __init__(self, copy=True, blank_thresh=DEFAULT_BLANK_THRESH):
self.copy = copy
self.blank_thresh = blank_thresh
def fit(self, X, y=None):
"Does nothing, since this transform doesn't require fitting."
return self
def transform(self, X, y=None, copy=None):
copy = copy if copy is not None else self.copy
n_f, dim = X.shape
if copy:
X = X.copy()
mag = 3 * (2 * self.blank_thresh / dim)
blank_idx = np.sum(X, axis=1) < self.blank_thresh
X[blank_idx, :] = mag * np.random.rand(blank_idx.sum(), dim)
return X
class ZeroBlanks(BaseEstimator, TransformerMixin):
'''
Zeroes out any almost-blank SIFT descriptors.
copy: if False, change the input in-place. By default, operates on a copy.
blank_thresh: the threshold for considering a descriptor to be blank.
Any descriptors whose sum is less than this threshold are replaced.
'''
def __init__(self, copy=True, blank_thresh=DEFAULT_BLANK_THRESH):
self.copy = copy
self.blank_thresh = blank_thresh
def fit(self, X, y=None):
"Does nothing, since this transform doesn't require fitting."
return self
def transform(self, X, y=None, copy=None):
copy = copy if copy is not None else self.copy
if copy:
X = X.copy()
X[np.sum(X, axis=1) < self.blank_thresh, :] = 0
return X
BLANK_HANDLERS = {
'fill': FillBlanks,
'zero': ZeroBlanks,
'drop': None,
}
def handle_blanks(features, blank_thresh=DEFAULT_BLANK_THRESH,
blank_handler=DEFAULT_BLANK_HANDLER,
inplace=False):
'''Handles any SIFT descriptors that are blank, or nearly blank.'''
if blank_handler not in BLANK_HANDLERS:
msg = "unknown blank handler {!r}, expected one of {}".format(
blank_handler, ", ".join(map(repr, BLANK_HANDLERS)))
raise ValueError(msg)
if blank_handler == 'drop':
# TODO handle this more efficiently
feats = [
f[np.sum(f, axis=1) >= blank_thresh, :] for f in features.features
]
return features._replace_bags(feats, inplace=inplace)
handler = BLANK_HANDLERS[blank_handler](blank_thresh=blank_thresh)
r = features._apply_transform(handler, fit_first=True, inplace=inplace)
if not inplace:
return r
################################################################################
### Add spatial information
def add_spatial_info(features, add_x=True, add_y=True, inplace=False,
dtype=None):
'''
Adds spatial information to image features (which should contain a frames
attribute in the format created by extract_image_features).
Adds a feature for x (if add_x) and y (if add_y), which are relative (x, y)
locations within the image of the feature between 0 and 1 (inclusive).
Returns a new Features object with these additional features, or modifies
features and returns None if inplace is True.
If dtype is not None, the resulting array will have that dtype. Otherwise,
it will maintain features.dtype if it's a float type, or float32 if not.
'''
if not add_x and not add_y:
return None if inplace else features
indices = []
if add_x:
indices.append(0)
if add_y:
indices.append(1)
if dtype is None:
dtype = features.dtype
if dtype.kind != 'f':
dtype = np.float32
spatial = np.asarray(np.vstack(features.frames)[:, indices], dtype=dtype)
spatial /= spatial.max(axis=0)
new_feats = np.hstack((features._features, spatial))
if inplace:
features._features = new_feats
features._refresh_features()
else:
return Features(
new_feats, n_pts=features._n_pts,
categories=features.categories, names=features.names,
**dict((k, features.data[k]) for k in features._extra_names))
################################################################################
### Wrapper for general processing
def process_image_features(features, verbose=False, inplace=False,
blank_thresh=DEFAULT_BLANK_THRESH, blank_handler=DEFAULT_BLANK_HANDLER,
do_pca=True, pca=None,
pca_k=None, pca_varfrac=DEFAULT_VARFRAC, pca_random=False,
pca_whiten=False,
add_x=True, add_y=True,
standardize_feats=True, scaler=None,
ret_pca=False, ret_scaler=False):
'''
Does the full image processing stack:
- blank handling with handle_blanks()
- dimensionality reduction with features.pca()
- adds spatial information with add_spatial_info()
- standardizes the features with features.standardize()
'''
# TODO: use sklearn.Pipeline instead?
pr = partial(print, file=sys.stderr) if verbose else _do_nothing
if blank_handler not in (None, "none"):
pr("Handling blanks...")
ret = handle_blanks(features, blank_thresh=blank_thresh,
blank_handler=blank_handler, inplace=inplace)
if not inplace:
features = ret
if do_pca:
pr("Running PCA...")
old_dim = features.dim
ret = features.pca(
pca=pca, ret_pca=True, k=pca_k, varfrac=pca_varfrac,
randomize=pca_random, whiten=pca_whiten, inplace=inplace)
if inplace:
pca = ret
else:
features, pca = ret
new_dim = features.dim
pr("Reduced dimensionality from {} to {}.".format(old_dim, new_dim))
if add_x or add_y:
pr("Adding spatial info...")
ret = add_spatial_info(features, add_x, add_y, inplace=inplace)
if not inplace:
features = ret
if standardize_feats:
pr("Standardizing features...")
ret = features.standardize(scaler=scaler, ret_scaler=True,
inplace=inplace)
if inplace:
scaler = ret
else:
features, scaler = ret
if not ret_pca and not ret_scaler:
return features
ret = [features]
if ret_pca:
ret.append(pca)
if ret_scaler:
ret.append(scaler)
return ret
def parse_args(args=None):
import argparse
# helper for boolean flags
# based on http://stackoverflow.com/a/9236426/344821
class ActionNoYes(argparse.Action):
def __init__(self, opt_name, off_name=None, dest=None,
default=True, required=False, help=None):
if off_name is None:
off_name = 'no-' + opt_name
self.off_name = '--' + off_name
if dest is None:
dest = opt_name.replace('-', '_')
super(ActionNoYes, self).__init__(
['--' + opt_name, '--' + off_name],
dest, nargs=0, const=None,
default=default, required=required, help=help)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, option_string != self.off_name)
_def = "; default %(default)s."
parser = argparse.ArgumentParser(
description="Processes raw PHOW features before running the SDM.")
blanks = parser.add_argument_group("Blank handling")
blanks.add_argument('--blank-threshold', dest='blank_thresh',
default=DEFAULT_BLANK_THRESH, type=nonnegative_float,
help="Consider descriptors with sum < BLANK_THRESH to be blank" + _def)
blanks.add_argument('--blank-handler',
choices=list(BLANK_HANDLERS) + ['none'], default=DEFAULT_BLANK_HANDLER,
help="What to do with blanks" + _def)
pca = parser.add_argument_group("PCA options")
pca._add_action(ActionNoYes('do-pca', 'no-pca', default=True,
help="Whether to run PCA; does by default."))
dim = pca.add_mutually_exclusive_group()
dim.add_argument('--pca-k', type=positive_int, default=None,
help="An explicit dimensionality to reduce the features to.")
dim.add_argument('--pca-varfrac', default=DEFAULT_VARFRAC,
type=portion, metavar='FRAC',
help="The fraction of variance to maintain in the PCA" + _def)
pca._add_action(ActionNoYes('pca-random', default=False,
help="Whether to use a randomized PCA implementation; default don't."))
pca._add_action(ActionNoYes('pca-whiten', default=False,
help="Whether to do whitening in the PCA, removing linear correlations "
"between dimensions; default don't."))
spa = parser.add_argument_group('Spatial information')
spa._add_action(ActionNoYes('add-x', default=True,
help="Append normalized x coord of patches; does by default."))
spa._add_action(ActionNoYes('add-y', default=True,
help="Append normalized y coord of patches; does by default."))
std = parser.add_argument_group('Standardization')
std._add_action(ActionNoYes('standardize-feats', default=True,
help="Normalize features to mean 0, variance 1 at the end (default)."))
parser._add_action(ActionNoYes('verbose', 'quiet', default=True,
help="Print out info along the way (the default)."))
parser.add_argument('load_file',
help="Load features from this file (output of extract_features).")
parser.add_argument('save_file', help="Save into this file.")
args = parser.parse_args(args)
load = args.load_file
save = args.save_file
del args.load_file, args.save_file
return args, load, save
def main():
args, load_file, save_file = parse_args()
confirm_outfile(save_file)
pr = partial(print, file=sys.stderr) if args.verbose else _do_nothing
pr("Loading features from '{}'...".format(load_file))
kwargs = {'load_attrs': True, 'features_dtype': np.float32}
if os.path.isdir(load_file):
orig, attrs = Features.load_from_perbag(load_file, **kwargs)
else:
orig, attrs = Features.load_from_hdf5(load_file, **kwargs)
new, pca, scaler = process_image_features(
orig, ret_pca=True, ret_scaler=True, **vars(args))
if pca is not None:
attrs['pca_mean'] = pca.mean_
attrs['pca_components'] = pca.components_
if scaler is not None:
attrs['scaler_mean'] = scaler.mean_
attrs['scaler_std'] = scaler.std_
pr("Saving features to '{}'...".format(save_file))
new.save_as_hdf5(save_file, process_args=repr(vars(args)), **attrs)
if __name__ == '__main__':
main()
| |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for oss_fuzz_build_status."""
import datetime
import json
import unittest
import flask
import mock
import six
import webtest
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.tests.test_libs import helpers as test_helpers
from clusterfuzz._internal.tests.test_libs import test_utils
from handlers.cron import oss_fuzz_build_status
from libs.issue_management import monorail
from libs.issue_management.monorail.issue import Issue
class MockResponse(object):
"""Mock url request's response."""
def __init__(self, text):
self.text = text
def raise_for_status(self):
pass
class IssueTrackerManager(object):
"""Mock issue tracker manager."""
def __init__(self, project_name):
self.project_name = project_name
self.issues = {}
self.next_id = 1
def get_issue(self, issue_id):
"""Get original issue."""
issue = self.issues[issue_id]
issue.itm = self
return issue
def save(self, issue, *args, **kwargs): # pylint: disable=unused-argument
"""Save an issue."""
if issue.new:
issue.id = self.next_id
self.next_id += 1
self.issues[issue.id] = issue
@test_utils.with_cloud_emulators('datastore')
class OssFuzzBuildStatusTest(unittest.TestCase):
"""Tests for oss_fuzz_build_status."""
def setUp(self):
flaskapp = flask.Flask('testflask')
flaskapp.add_url_rule(
'/build-status',
view_func=oss_fuzz_build_status.Handler.as_view('/build-status'))
self.app = webtest.TestApp(flaskapp)
test_helpers.patch(self, [
'clusterfuzz._internal.base.utils.utcnow',
'handlers.base_handler.Handler.is_cron',
'libs.issue_management.issue_tracker_utils.get_issue_tracker',
'clusterfuzz._internal.metrics.logs.log_error',
'requests.get',
])
self.mock.utcnow.return_value = datetime.datetime(2018, 2, 1)
self.mock.is_cron.return_value = True
self.itm = IssueTrackerManager('oss-fuzz')
self.mock.get_issue_tracker.return_value = monorail.IssueTracker(self.itm)
self.maxDiff = None # pylint: disable=invalid-name
def test_no_build_failures(self):
"""Test run with no build failures."""
# Return the same status for all build types.
self.mock.get.return_value = MockResponse(
json.dumps({
'projects': [
{
'history': [{
'finish_time': '2018-02-01T00:00:00.000000Z',
'build_id': 'proj0-id',
'success': True
}],
'name':
'proj0',
},
{
'history': [{
'finish_time': '2018-02-01T00:00:00.000000Z',
'build_id': 'proj1-id',
'success': True
}],
'name':
'proj1',
},
]
}))
self.app.get('/build-status')
self.assertEqual(0, data_types.OssFuzzBuildFailure.query().count())
self.assertEqual(0, len(self.itm.issues))
def test_build_failures(self):
"""Test run with multiple build failures of different type."""
def _mock_requests_get(url):
"""Mock requests.get."""
if url == oss_fuzz_build_status.FUZZING_STATUS_URL:
return MockResponse(
json.dumps({
'projects': [
# Both fuzzing and coverage build types are successful.
{
'history': [{
'finish_time': '2018-02-01T00:00:00.000000Z',
'build_id': 'proj0-id-f',
'success': True
}],
'name':
'proj0',
},
# Only coverage build type is broken for a while.
{
'history': [{
'finish_time': '2018-02-01T00:00:00.000000Z',
'build_id': 'proj5-id-f',
'success': True
}],
'name':
'proj5',
},
# Only coverage build type broken.
{
'history': [{
'finish_time': '2018-02-01T00:00:00.000000Z',
'build_id': 'proj6-id-f',
'success': True
}],
'name':
'proj6',
},
# New failure (first 1).
{
'history': [{
'finish_time': '2018-02-01T00:00:00.000000000Z',
'build_id': 'proj1-id-f',
'success': False
}],
'name':
'proj1',
},
# Seen failure (second consecutive).
{
'history': [{
'finish_time': '2018-02-01T00:00:00.000000Z',
'build_id': 'proj2-id-f',
'success': False
}],
'name':
'proj2',
},
# Seen failure (not updated).
{
'history': [{
'finish_time': '2018-01-31T00:00:00.000000Z',
'build_id': 'proj3-id-f',
'success': False
}],
'name':
'proj3',
},
# Seen failure (third consecutive, bug already filed).
{
'history': [{
'finish_time': '2018-02-01T00:00:00.000000Z',
'build_id': 'proj4-id-f',
'success': False
}],
'name':
'proj4',
},
]
}))
assert url == oss_fuzz_build_status.COVERAGE_STATUS_URL
return MockResponse(
json.dumps({
'projects': [
# Both fuzzing and coverage build types are successful.
{
'history': [{
'finish_time': '2018-02-01T00:00:00.000000Z',
'build_id': 'proj0-id-c',
'success': True
}],
'name':
'proj0',
},
# New failure (first 1).
{
'history': [{
'finish_time': '2018-02-01T00:00:00.000000000Z',
'build_id': 'proj1-id-c',
'success': False
}],
'name':
'proj1',
},
# Seen failure (second consecutive).
{
'history': [{
'name': 'proj2',
'finish_time': '2018-02-01T00:00:00.000000Z',
'success': False
}],
'name':
'proj2',
},
# Seen failure (not updated).
{
'history': [{
'finish_time': '2018-01-31T00:00:00.000000Z',
'build_id': 'proj3-id-c',
'success': False
}],
'name':
'proj3',
},
# Seen failure (third consecutive, bug already filed).
{
'history': [{
'finish_time': '2018-02-01T00:00:00.000000Z',
'build_id': 'proj4-id-c',
'success': False
}],
'name':
'proj4',
},
# Coverage build type is broken for a while.
{
'history': [{
'finish_time': '2018-02-01T00:00:00.000000Z',
'build_id': 'proj5-id-c',
'success': False
}],
'name':
'proj5',
},
# Only coverage build type broken (second consecutive).
{
'history': [{
'finish_time': '2018-02-01T00:00:00.000000Z',
'build_id': 'proj6-id-c',
'success': False
}],
'name':
'proj6',
},
]
}))
self.mock.get.side_effect = _mock_requests_get
data_types.OssFuzzBuildFailure(
id='proj2',
project_name='proj2',
last_checked_timestamp=datetime.datetime(2018, 1, 31),
consecutive_failures=2,
build_type='fuzzing').put()
data_types.OssFuzzBuildFailure(
id='proj3',
project_name='proj3',
last_checked_timestamp=datetime.datetime(2018, 1, 31),
consecutive_failures=1,
build_type='fuzzing').put()
data_types.OssFuzzBuildFailure(
id='proj4',
project_name='proj4',
last_checked_timestamp=datetime.datetime(2018, 1, 31),
issue_id='1337',
consecutive_failures=3,
build_type='fuzzing').put()
data_types.OssFuzzBuildFailure(
id='proj5-coverage',
project_name='proj5',
last_checked_timestamp=datetime.datetime(2018, 1, 31),
issue_id='31337',
consecutive_failures=5,
build_type='coverage').put()
data_types.OssFuzzBuildFailure(
id='proj6-coverage',
project_name='proj6',
last_checked_timestamp=datetime.datetime(2018, 1, 31),
issue_id=None,
consecutive_failures=2,
build_type='coverage').put()
data_types.OssFuzzProject(
id='proj2', name='proj2', ccs=['a@user.com']).put()
data_types.OssFuzzProject(
id='proj6', name='proj7', ccs=['b@user.com']).put()
self.app.get('/build-status')
six.assertCountEqual(self, [
{
'build_type': 'fuzzing',
'consecutive_failures': 1,
'issue_id': None,
'last_checked_timestamp': datetime.datetime(2018, 2, 1, 0, 0),
'project_name': u'proj1'
},
{
'build_type': 'fuzzing',
'consecutive_failures': 3,
'issue_id': '1',
'last_checked_timestamp': datetime.datetime(2018, 2, 1, 0, 0),
'project_name': u'proj2'
},
{
'build_type': 'fuzzing',
'consecutive_failures': 1,
'issue_id': None,
'last_checked_timestamp': datetime.datetime(2018, 1, 31, 0, 0),
'project_name': u'proj3'
},
{
'build_type': 'fuzzing',
'consecutive_failures': 4,
'issue_id': '1337',
'last_checked_timestamp': datetime.datetime(2018, 2, 1, 0, 0),
'project_name': u'proj4'
},
{
'build_type': 'coverage',
'consecutive_failures': 6,
'issue_id': '31337',
'last_checked_timestamp': datetime.datetime(2018, 2, 1, 0, 0),
'project_name': u'proj5'
},
{
'build_type': 'coverage',
'consecutive_failures': 3,
'issue_id': '2',
'last_checked_timestamp': datetime.datetime(2018, 2, 1, 0, 0),
'project_name': u'proj6'
},
], [
failure.to_dict() for failure in data_types.OssFuzzBuildFailure.query()
])
self.assertEqual(2, len(self.itm.issues))
issue = self.itm.issues[1]
six.assertCountEqual(self, ['a@user.com'], issue.cc)
self.assertEqual('New', issue.status)
self.assertEqual('proj2: Fuzzing build failure', issue.summary)
self.assertEqual(
'The last 3 builds for proj2 have been failing.\n'
'<b>Build log:</b> '
'https://oss-fuzz-build-logs.storage.googleapis.com/'
'log-proj2-id-f.txt\n'
'Build type: fuzzing\n\n'
'To reproduce locally, please see: '
'https://google.github.io/oss-fuzz/advanced-topics/reproducing'
'#reproducing-build-failures\n\n'
'<b>This bug tracker is not being monitored by OSS-Fuzz team.</b> '
'If you have any questions, please create an issue at '
'https://github.com/google/oss-fuzz/issues/new.\n\n'
'**This bug will be automatically closed within a '
'day once it is fixed.**', issue.body)
self.assertTrue(issue.has_label('Proj-proj2'))
self.assertTrue(issue.has_label('Type-Build-Failure'))
issue = self.itm.issues[2]
six.assertCountEqual(self, ['b@user.com'], issue.cc)
self.assertEqual('New', issue.status)
self.assertEqual('proj6: Coverage build failure', issue.summary)
self.assertEqual(
'The last 3 builds for proj6 have been failing.\n'
'<b>Build log:</b> '
'https://oss-fuzz-build-logs.storage.googleapis.com/'
'log-proj6-id-c.txt\n'
'Build type: coverage\n\n'
'To reproduce locally, please see: '
'https://google.github.io/oss-fuzz/advanced-topics/reproducing'
'#reproducing-build-failures\n\n'
'<b>This bug tracker is not being monitored by OSS-Fuzz team.</b> '
'If you have any questions, please create an issue at '
'https://github.com/google/oss-fuzz/issues/new.\n\n'
'**This bug will be automatically closed within a '
'day once it is fixed.**', issue.body)
self.assertTrue(issue.has_label('Proj-proj6'))
self.assertTrue(issue.has_label('Type-Build-Failure'))
def test_recovered_build_failure(self):
"""Test fixed build failures."""
# Use the same status for all build types.
self.mock.get.return_value = MockResponse(
json.dumps({
'projects': [{
'history': [{
'finish_time': '2018-02-01T00:00:00.000000Z',
'build_id': 'proj0-id',
'success': True
}],
'name':
'proj0',
}]
}))
data_types.OssFuzzBuildFailure(
id='proj0',
project_name='proj0',
last_checked_timestamp=datetime.datetime(2018, 1, 31),
issue_id='1',
consecutive_failures=2,
build_type='fuzzing').put()
issue = Issue()
issue.open = True
issue.add_label('Type-Build-Failure')
issue.add_label('Proj-proj2')
issue.summary = 'Build failure in proj2'
issue.body = 'Build failure'
self.itm.issues[1] = issue
self.app.get('/build-status')
self.assertEqual(0, data_types.OssFuzzBuildFailure.query().count())
issue = self.itm.issues[1]
self.assertEqual('Verified', issue.status)
self.assertEqual('The latest build has succeeded, closing this issue.',
issue.comment)
def test_missing_builds(self):
"""Test missing builds."""
def _mock_requests_get(url):
"""Mock fetch."""
if url == oss_fuzz_build_status.FUZZING_STATUS_URL:
return MockResponse(
json.dumps({
'projects': [
{
'history': [{
'finish_time': '2018-01-30T00:00:00.000000Z',
'build_id': 'proj0-id-f',
'success': True
}],
'name':
'proj0',
},
{
'history': [{
'finish_time': '2018-02-01T00:00:00.000000Z',
'build_id': 'proj0-id-f',
'success': True
}],
'name':
'proj1',
},
]
}))
assert url == oss_fuzz_build_status.COVERAGE_STATUS_URL
return MockResponse(
json.dumps({
'projects': [
{
'history': [{
'finish_time': '2018-02-01T00:00:00.000000Z',
'build_id': 'proj0-id-c',
'success': True
}],
'name':
'proj0',
},
{
'history': [{
'finish_time': '2018-01-30T00:00:00.000000Z',
'build_id': 'proj1-id-c',
'success': True
}],
'name':
'proj1',
},
]
}))
self.mock.get.side_effect = _mock_requests_get
self.app.get('/build-status')
self.mock.log_error.assert_has_calls([
mock.call('proj0 has not been built in fuzzing config for 2 days.'),
mock.call('proj1 has not been built in coverage config for 2 days.')
])
def test_disabled_project(self):
"""Test disabled project."""
# Return the same status for all build types.
self.mock.get.return_value = MockResponse(
json.dumps({
'projects': [{
'history': [{
'finish_time': '2018-02-01T00:00:00.000000Z',
'build_id': 'proj2-id',
'success': False
}],
'name':
'disabled_proj',
},]
}))
# Only fuzzing build type failure should be stored.
data_types.OssFuzzBuildFailure(
id='disabled_proj',
project_name='disabled_proj',
last_checked_timestamp=datetime.datetime(2018, 1, 31),
consecutive_failures=2,
build_type='fuzzing').put()
self.app.get('/build-status')
six.assertCountEqual(self, [
{
'build_type': 'fuzzing',
'consecutive_failures': 2,
'issue_id': None,
'last_checked_timestamp': datetime.datetime(2018, 1, 31, 0, 0),
'project_name': u'disabled_proj',
},
], [
failure.to_dict() for failure in data_types.OssFuzzBuildFailure.query()
])
self.assertEqual(0, len(self.itm.issues))
def test_reminder(self):
"""Test reminders."""
# Return the same status for all build types.
self.mock.get.return_value = MockResponse(
json.dumps({
'projects': [
{
'history': [{
'finish_time': '2018-02-01T00:00:00.000000Z',
'build_id': 'proj0-id',
'success': False
}],
'name':
'proj0',
},
{
'history': [{
'finish_time': '2018-02-01T00:00:00.000000Z',
'build_id': 'proj0-id',
'success': False
}],
'name':
'proj1',
},
]
}))
data_types.OssFuzzProject(
id='proj0', name='proj0', ccs=['a@user.com']).put()
data_types.OssFuzzBuildFailure(
id='proj0',
project_name='proj0',
last_checked_timestamp=datetime.datetime(2018, 1, 31),
issue_id='1',
consecutive_failures=8,
build_type='fuzzing').put()
data_types.OssFuzzProject(
id='proj1', name='proj1', ccs=['a@user.com']).put()
data_types.OssFuzzBuildFailure(
id='proj1',
project_name='proj1',
last_checked_timestamp=datetime.datetime(2018, 1, 31),
issue_id='2',
consecutive_failures=4,
build_type='fuzzing').put()
self.itm.issues[1] = Issue()
self.itm.issues[2] = Issue()
self.app.get('/build-status')
self.assertEqual(
'Friendly reminder that the the build is still failing.\n'
'Please try to fix this failure to ensure that fuzzing remains '
'productive.\n'
'Latest build log: https://oss-fuzz-build-logs.storage.googleapis.com/'
'log-proj0-id.txt\n', self.itm.issues[1].comment)
self.assertEqual('', self.itm.issues[2].comment)
| |
# This is the gtk-dependent HTTPRequest module.
# For the pyjamas/javascript version, see platform/HTTPRequestPyJS.py
import pyjd
import pygwt
from __pyjamas__ import JS
if pyjd.is_desktop:
from __pyjamas__ import get_main_frame
import sys # needed by __browser__/pyjamas/HTTPRequest.py
handlers = {}
class XULrunnerHackCallback(object):
def __init__(self, htr, mode, user, pwd, url, postData=None, handler=None,
return_xml=False, content_type=None, headers = None):
pass
def callback(self):
return self.htr.asyncImpl(self.mode, self.user, self.pwd, self.url,
self.postData, self.handler, self.return_xml,
self.content_type, self.headers)
class HTTPRequest(object):
def asyncGet(self, url, handler, returnxml=False,
content_type=None, headers=None, user=None, pwd=None):
postData = None
if not hasattr(handler, 'onCompletion'):
raise RuntimeError("Invalid call to asyncGet: handler is not a valid request handler")
return self.asyncImpl('GET', user, pwd, url, postData, handler,
returnxml, content_type, headers)
def asyncPost(self, url, postData, handler, returnxml=False,
content_type=None, headers=None, user=None, pwd=None):
if not hasattr(handler, 'onCompletion'):
raise RuntimeError("Invalid call to asyncPost: handler is not a valid request handler")
return self.asyncImpl('POST', user, pwd, url, postData, handler,
returnxml, content_type, headers)
def asyncDelete(self, url, handler, returnxml=False,
content_type=None, headers=None, user=None, pwd=None):
postData = None
if not hasattr(handler, 'onCompletion'):
raise RuntimeError("Invalid call to asyncDelete: handler is not a valid request handler")
return self.asyncImpl('DELETE', user, pwd, url, postData, handler,
returnxml, content_type, headers)
def asyncPut(self, url, postData, handler, returnxml=False,
content_type=None, headers=None, user=None, pwd=None):
if not hasattr(handler, 'onCompletion'):
raise RuntimeError("Invalid call to asyncPut: handler is not a valid request handler")
return self.asyncImpl('PUT', user, pwd, url, postData, handler,
returnxml, content_type, headers)
def createXmlHTTPRequest(self):
return self.doCreateXmlHTTPRequest()
def doCreateXmlHTTPRequest(self):
return get_main_frame().getXmlHttpRequest()
def onProgress(self, sender, event, ignorearg):
xmlHttp = event.target
localHandler = handlers.get(xmlHttp)
if hasattr(localHandler, "onProgress"):
localHandler.onProgress(event)
def onLoad(self, sender, event, ignorearg):
xmlHttp = event.target
localHandler = handlers.get(xmlHttp)
del handlers[xmlHttp]
responseText = xmlHttp.responseText
status = xmlHttp.status
handler = None
xmlHttp = None
# XXX HACK! webkit wrapper returns 0 not 200!
if status == 0:
print "HACK ALERT! webkit wrapper returns 0 not 200!"
if status == 200 or status == 0:
localHandler.onCompletion(responseText)
else :
localHandler.onError(responseText, status)
def onReadyStateChange(self, xmlHttp, event, ignorearg):
try:
xmlHttp = get_main_frame().gobject_wrap(xmlHttp) # HACK!
except:
pass # hula / XUL
#print xmlHttp.readyState
if xmlHttp.readyState != 4:
return
# TODO - delete xmlHttp.onreadystatechange
localHandler = handlers.get(xmlHttp)
del handlers[xmlHttp]
responseText = xmlHttp.responseText
#print "headers", xmlHttp.getAllResponseHeaders()
status = xmlHttp.status
handler = None
xmlHttp = None
#print "status", status
#print "local handler", localHandler
# XXX HACK! webkit wrapper returns 0 not 200!
#if status == 0:
# print "HACK ALERT! webkit wrapper returns 0 not 200!"
if status == 200 or status == 0:
localHandler.onCompletion(responseText)
else :
localHandler.onError(responseText, status)
def _convertUrlToAbsolute(self, url):
uri = pygwt.getModuleBaseURL()
if url[0] == '/':
# url is /somewhere.
sep = uri.find('://')
if not uri.startswith('file://'):
slash = uri.find('/', sep+3)
if slash > 0:
uri = uri[:slash]
return "%s%s" % (uri, url)
else:
if url[:7] != 'file://' and url[:7] != 'http://' and \
url[:8] != 'https://':
slash = uri.rfind('/')
return uri[:slash+1] + url
return url
def asyncImpl(self, method, user, pwd, url, postData, handler,
returnxml=False, content_type=None, headers=None):
if headers is None:
headers = {}
if user and pwd and not "Authorization" in headers:
import base64
headers["Authorization"] = 'Basic %s' % (base64.b64encode('%s:%s' % (user, pwd)))
mf = get_main_frame()
if content_type is not None:
headers["Content-Type"] = content_type
if not "Content-Type" in headers:
if returnxml:
headers["Content-Type"] = "application/xml; charset=utf-8"
else:
headers["Content-Type"] = "text/plain; charset=utf-8"
#for c in Cookies.get_crumbs():
# xmlHttp.setRequestHeader("Set-Cookie", c)
# print "setting cookie", c
xmlHttp = self.doCreateXmlHTTPRequest()
url = self._convertUrlToAbsolute(url)
#print "xmlHttp", method, user, pwd, url, postData, handler, dir(xmlHttp)
if mf.platform == 'webkit':
mf._addXMLHttpRequestEventListener(
xmlHttp, "readystatechange", self.onReadyStateChange,
)
elif mf.platform == 'mshtml':
mf._addXMLHttpRequestEventListener(
xmlHttp, "onreadystatechange", self.onReadyStateChange,
)
else:
mf._addXMLHttpRequestEventListener(
xmlHttp, "load", self.onLoad,
)
if mf.platform != 'mshtml' and mf.platform != 'ie6':
mf._addXMLHttpRequestEventListener(
xmlHttp, "progress", self.onProgress,
)
#try :
if mf.platform == 'webkit' or mf.platform == 'mshtml':
xmlHttp.open(method, url, True, '', '')
else:
# EEK! xmlhttprequest.open in xpcom is a miserable bastard.
try:
res = xmlHttp.open(method, url, True, '', '')
except:
res = xmlHttp.open(method, url)
#print url, res
for h in headers:
if isinstance(headers[h], basestring):
xmlHttp.setRequestHeader(h, headers[h])
else:
hval = ';'.join([str(i) for i in headers[h]])
xmlHttp.setRequestHeader(h, hval)
#if not "Set-Cookie" in headers:
# headers["Set-Cookie"] = []
#for c in Cookies.get_crumbs():
# headers["Set-Cookie"].append(c)
# print "setting cookie", c
handlers[xmlHttp] = handler
try:
xmlHttp.send(postData or '')
except:
handler.onError("xmlHttp.send error", -1)
return xmlHttp
#except:
#del xmlHttp.onreadystatechange
handler = None
xmlHttp = None
localHandler.onError(str(e))
return None
| |
"""Miscellaneous utility functions and classes.
This module is used internally by Tornado. It is not necessarily expected
that the functions and classes defined here will be useful to other
applications, but they are documented here in case they are.
The one public-facing part of this module is the `Configurable` class
and its `~Configurable.configure` method, which becomes a part of the
interface of its subclasses, including `.AsyncHTTPClient`, `.IOLoop`,
and `.Resolver`.
"""
from __future__ import absolute_import, division, print_function, with_statement
import inspect
import sys
import zlib
class ObjectDict(dict):
"""Makes a dictionary behave like an object, with attribute-style access.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
self[name] = value
class GzipDecompressor(object):
"""Streaming gzip decompressor.
The interface is like that of `zlib.decompressobj` (without some of the
optional arguments, but it understands gzip headers and checksums.
"""
def __init__(self):
# Magic parameter makes zlib module understand gzip header
# http://stackoverflow.com/questions/1838699/how-can-i-decompress-a-gzip-stream-with-zlib
# This works on cpython and pypy, but not jython.
self.decompressobj = zlib.decompressobj(16 + zlib.MAX_WBITS)
def decompress(self, value, max_length=None):
"""Decompress a chunk, returning newly-available data.
Some data may be buffered for later processing; `flush` must
be called when there is no more input data to ensure that
all data was processed.
If ``max_length`` is given, some input data may be left over
in ``unconsumed_tail``; you must retrieve this value and pass
it back to a future call to `decompress` if it is not empty.
"""
return self.decompressobj.decompress(value, max_length)
@property
def unconsumed_tail(self):
"""Returns the unconsumed portion left over
"""
return self.decompressobj.unconsumed_tail
def flush(self):
"""Return any remaining buffered data not yet returned by decompress.
Also checks for errors such as truncated input.
No other methods may be called on this object after `flush`.
"""
return self.decompressobj.flush()
def import_object(name):
"""Imports an object by name.
import_object('x') is equivalent to 'import x'.
import_object('x.y.z') is equivalent to 'from x.y import z'.
>>> import tornado.escape
>>> import_object('tornado.escape') is tornado.escape
True
>>> import_object('tornado.escape.utf8') is tornado.escape.utf8
True
>>> import_object('tornado') is tornado
True
>>> import_object('tornado.missing_module')
Traceback (most recent call last):
...
ImportError: No module named missing_module
"""
if name.count('.') == 0:
return __import__(name, None, None)
parts = name.split('.')
obj = __import__('.'.join(parts[:-1]), None, None, [parts[-1]], 0)
try:
return getattr(obj, parts[-1])
except AttributeError:
raise ImportError("No module named %s" % parts[-1])
# Fake unicode literal support: Python 3.2 doesn't have the u'' marker for
# literal strings, and alternative solutions like "from __future__ import
# unicode_literals" have other problems (see PEP 414). u() can be applied
# to ascii strings that include \u escapes (but they must not contain
# literal non-ascii characters).
if type('') is not type(b''):
def u(s):
return s
bytes_type = bytes
unicode_type = str
basestring_type = str
else:
def u(s):
return s.decode('unicode_escape')
bytes_type = str
unicode_type = unicode
basestring_type = basestring
if sys.version_info > (3,):
exec("""
def raise_exc_info(exc_info):
raise exc_info[1].with_traceback(exc_info[2])
def exec_in(code, glob, loc=None):
if isinstance(code, str):
code = compile(code, '<string>', 'exec', dont_inherit=True)
exec(code, glob, loc)
""")
else:
exec("""
def raise_exc_info(exc_info):
raise exc_info[0], exc_info[1], exc_info[2]
def exec_in(code, glob, loc=None):
if isinstance(code, basestring):
# exec(string) inherits the caller's future imports; compile
# the string first to prevent that.
code = compile(code, '<string>', 'exec', dont_inherit=True)
exec code in glob, loc
""")
def errno_from_exception(e):
"""Provides the errno from an Exception object.
There are cases that the errno attribute was not set so we pull
the errno out of the args but if someone instatiates an Exception
without any args you will get a tuple error. So this function
abstracts all that behavior to give you a safe way to get the
errno.
"""
if hasattr(e, 'errno'):
return e.errno
elif e.args:
return e.args[0]
else:
return None
class Configurable(object):
"""Base class for configurable interfaces.
A configurable interface is an (abstract) class whose constructor
acts as a factory function for one of its implementation subclasses.
The implementation subclass as well as optional keyword arguments to
its initializer can be set globally at runtime with `configure`.
By using the constructor as the factory method, the interface
looks like a normal class, `isinstance` works as usual, etc. This
pattern is most useful when the choice of implementation is likely
to be a global decision (e.g. when `~select.epoll` is available,
always use it instead of `~select.select`), or when a
previously-monolithic class has been split into specialized
subclasses.
Configurable subclasses must define the class methods
`configurable_base` and `configurable_default`, and use the instance
method `initialize` instead of ``__init__``.
"""
__impl_class = None
__impl_kwargs = None
def __new__(cls, **kwargs):
base = cls.configurable_base()
args = {}
if cls is base:
impl = cls.configured_class()
if base.__impl_kwargs:
args.update(base.__impl_kwargs)
else:
impl = cls
args.update(kwargs)
instance = super(Configurable, cls).__new__(impl)
# initialize vs __init__ chosen for compatiblity with AsyncHTTPClient
# singleton magic. If we get rid of that we can switch to __init__
# here too.
instance.initialize(**args)
return instance
@classmethod
def configurable_base(cls):
"""Returns the base class of a configurable hierarchy.
This will normally return the class in which it is defined.
(which is *not* necessarily the same as the cls classmethod parameter).
"""
raise NotImplementedError()
@classmethod
def configurable_default(cls):
"""Returns the implementation class to be used if none is configured."""
raise NotImplementedError()
def initialize(self):
"""Initialize a `Configurable` subclass instance.
Configurable classes should use `initialize` instead of ``__init__``.
"""
@classmethod
def configure(cls, impl, **kwargs):
"""Sets the class to use when the base class is instantiated.
Keyword arguments will be saved and added to the arguments passed
to the constructor. This can be used to set global defaults for
some parameters.
"""
base = cls.configurable_base()
if isinstance(impl, (unicode_type, bytes_type)):
impl = import_object(impl)
if impl is not None and not issubclass(impl, cls):
raise ValueError("Invalid subclass of %s" % cls)
base.__impl_class = impl
base.__impl_kwargs = kwargs
@classmethod
def configured_class(cls):
"""Returns the currently configured class."""
base = cls.configurable_base()
if cls.__impl_class is None:
base.__impl_class = cls.configurable_default()
return base.__impl_class
@classmethod
def _save_configuration(cls):
base = cls.configurable_base()
return (base.__impl_class, base.__impl_kwargs)
@classmethod
def _restore_configuration(cls, saved):
base = cls.configurable_base()
base.__impl_class = saved[0]
base.__impl_kwargs = saved[1]
class ArgReplacer(object):
"""Replaces one value in an ``args, kwargs`` pair.
Inspects the function signature to find an argument by name
whether it is passed by position or keyword. For use in decorators
and similar wrappers.
"""
def __init__(self, func, name):
self.name = name
try:
self.arg_pos = inspect.getargspec(func).args.index(self.name)
except ValueError:
# Not a positional parameter
self.arg_pos = None
def get_old_value(self, args, kwargs, default=None):
"""Returns the old value of the named argument without replacing it.
Returns ``default`` if the argument is not present.
"""
if self.arg_pos is not None and len(args) > self.arg_pos:
return args[self.arg_pos]
else:
return kwargs.get(self.name, default)
def replace(self, new_value, args, kwargs):
"""Replace the named argument in ``args, kwargs`` with ``new_value``.
Returns ``(old_value, args, kwargs)``. The returned ``args`` and
``kwargs`` objects may not be the same as the input objects, or
the input objects may be mutated.
If the named argument was not found, ``new_value`` will be added
to ``kwargs`` and None will be returned as ``old_value``.
"""
if self.arg_pos is not None and len(args) > self.arg_pos:
# The arg to replace is passed positionally
old_value = args[self.arg_pos]
args = list(args) # *args is normally a tuple
args[self.arg_pos] = new_value
else:
# The arg to replace is either omitted or passed by keyword.
old_value = kwargs.get(self.name)
kwargs[self.name] = new_value
return old_value, args, kwargs
def doctests():
import doctest
return doctest.DocTestSuite()
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow estimators for Linear and DNN joined training models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import six
from tensorflow.contrib import layers
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import training_util
# The default learning rates are a historical artifact of the initial
# implementation, but seem a reasonable choice.
_DNN_LEARNING_RATE = 0.05
_LINEAR_LEARNING_RATE = 0.2
_FIX_GLOBAL_STEP_INCREMENT_DATE = "2017-04-15"
_FIX_GLOBAL_STEP_INCREMENT_INSTRUCTIONS = (
"Please set fix_global_step_increment_bug=True and update training steps "
"in your pipeline. See pydoc for details.")
def _as_iterable(preds, output):
for pred in preds:
yield pred[output]
def _get_feature_dict(features):
if isinstance(features, dict):
return features
return {"": features}
def _get_optimizer(optimizer):
if callable(optimizer):
return optimizer()
else:
return optimizer
def _linear_learning_rate(num_linear_feature_columns):
"""Returns the default learning rate of the linear model.
The calculation is a historical artifact of this initial implementation, but
has proven a reasonable choice.
Args:
num_linear_feature_columns: The number of feature columns of the linear
model.
Returns:
A float.
"""
default_learning_rate = 1. / math.sqrt(num_linear_feature_columns)
return min(_LINEAR_LEARNING_RATE, default_learning_rate)
def _add_hidden_layer_summary(value, tag):
logging_ops.scalar_summary("%s/fraction_of_zero_values" % tag,
nn.zero_fraction(value))
logging_ops.histogram_summary("%s/activation" % tag, value)
def _get_embedding_variable(column, collection_key, input_layer_scope):
return ops.get_collection(collection_key,
input_layer_scope + "/" + column.name)
def _extract_embedding_lr_multipliers(embedding_lr_multipliers, collection_key,
input_layer_scope):
"""Converts embedding lr multipliers to variable based gradient multiplier."""
if not embedding_lr_multipliers:
return None
gradient_multipliers = {}
for column, lr_mult in embedding_lr_multipliers.items():
if not isinstance(column, feature_column_lib._EmbeddingColumn): # pylint: disable=protected-access
raise ValueError(
"learning rate multipler can only be defined for embedding columns. "
"It is defined for {}".format(column))
embedding = _get_embedding_variable(
column, collection_key, input_layer_scope)
if not embedding:
raise ValueError("Couldn't find a variable for column {}".format(column))
for v in embedding:
gradient_multipliers[v] = lr_mult
return gradient_multipliers
def _dnn_linear_combined_model_fn(features, labels, mode, params, config=None):
"""Deep Neural Net and Linear combined model_fn.
Args:
features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of dtype
`int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance.
* linear_feature_columns: An iterable containing all the feature columns
used by the Linear model.
* linear_optimizer: string, `Optimizer` object, or callable that defines
the optimizer to use for training the Linear model. Defaults to the
Ftrl optimizer.
* joint_linear_weights: If True a single (possibly partitioned) variable
will be used to store the linear model weights. It's faster, but
requires all columns are sparse and have the 'sum' combiner.
* dnn_feature_columns: An iterable containing all the feature columns used
by the DNN model.
* dnn_optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training the DNN model. Defaults to the Adagrad
optimizer.
* dnn_hidden_units: List of hidden units per DNN layer.
* dnn_activation_fn: Activation function applied to each DNN layer. If
`None`, will use `tf.nn.relu`.
* dnn_dropout: When not `None`, the probability we will drop out a given
DNN coordinate.
* gradient_clip_norm: A float > 0. If provided, gradients are
clipped to their global norm with this clipping ratio.
* embedding_lr_multipliers: Optional. A dictionary from
`EmbeddingColumn` to a `float` multiplier. Multiplier will be used to
multiply with learning rate for the embedding variables.
* input_layer_min_slice_size: Optional. The min slice size of input layer
partitions. If not provided, will use the default of 64M.
config: `RunConfig` object to configure the runtime settings.
Returns:
`ModelFnOps`
Raises:
ValueError: If both `linear_feature_columns` and `dnn_features_columns`
are empty at the same time.
"""
head = params["head"]
linear_feature_columns = params.get("linear_feature_columns")
linear_optimizer = params.get("linear_optimizer") or "Ftrl"
joint_linear_weights = params.get("joint_linear_weights")
dnn_feature_columns = params.get("dnn_feature_columns")
dnn_optimizer = params.get("dnn_optimizer") or "Adagrad"
dnn_hidden_units = params.get("dnn_hidden_units")
dnn_activation_fn = params.get("dnn_activation_fn") or nn.relu
dnn_dropout = params.get("dnn_dropout")
gradient_clip_norm = params.get("gradient_clip_norm")
input_layer_min_slice_size = (
params.get("input_layer_min_slice_size") or 64 << 20)
num_ps_replicas = config.num_ps_replicas if config else 0
embedding_lr_multipliers = params.get("embedding_lr_multipliers", {})
fix_global_step_increment_bug = params.get(
"fix_global_step_increment_bug", True)
if not linear_feature_columns and not dnn_feature_columns:
raise ValueError(
"Either linear_feature_columns or dnn_feature_columns must be defined.")
features = _get_feature_dict(features)
# Build DNN Logits.
dnn_parent_scope = "dnn"
if not dnn_feature_columns:
dnn_logits = None
else:
if not dnn_hidden_units:
raise ValueError(
"dnn_hidden_units must be defined when dnn_feature_columns is "
"specified.")
dnn_partitioner = (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas))
with variable_scope.variable_scope(
dnn_parent_scope,
values=tuple(six.itervalues(features)),
partitioner=dnn_partitioner):
input_layer_partitioner = (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=input_layer_min_slice_size))
with variable_scope.variable_scope(
"input_from_feature_columns",
values=tuple(six.itervalues(features)),
partitioner=input_layer_partitioner) as dnn_input_scope:
net = layers.input_from_feature_columns(
columns_to_tensors=features,
feature_columns=dnn_feature_columns,
weight_collections=[dnn_parent_scope],
scope=dnn_input_scope)
for layer_id, num_hidden_units in enumerate(dnn_hidden_units):
with variable_scope.variable_scope(
"hiddenlayer_%d" % layer_id,
values=(net,)) as dnn_hidden_layer_scope:
net = layers.fully_connected(
net,
num_hidden_units,
activation_fn=dnn_activation_fn,
variables_collections=[dnn_parent_scope],
scope=dnn_hidden_layer_scope)
if dnn_dropout is not None and mode == model_fn.ModeKeys.TRAIN:
net = layers.dropout(
net,
keep_prob=(1.0 - dnn_dropout))
# TODO(b/31209633): Consider adding summary before dropout.
_add_hidden_layer_summary(net, dnn_hidden_layer_scope.name)
with variable_scope.variable_scope(
"logits",
values=(net,)) as dnn_logits_scope:
dnn_logits = layers.fully_connected(
net,
head.logits_dimension,
activation_fn=None,
variables_collections=[dnn_parent_scope],
scope=dnn_logits_scope)
_add_hidden_layer_summary(dnn_logits, dnn_logits_scope.name)
# Build Linear logits.
linear_parent_scope = "linear"
if not linear_feature_columns:
linear_logits = None
else:
linear_partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20)
with variable_scope.variable_scope(
linear_parent_scope,
values=tuple(six.itervalues(features)),
partitioner=linear_partitioner) as scope:
if joint_linear_weights:
linear_logits, _, _ = layers.joint_weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=linear_feature_columns,
num_outputs=head.logits_dimension,
weight_collections=[linear_parent_scope],
scope=scope)
else:
linear_logits, _, _ = layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=linear_feature_columns,
num_outputs=head.logits_dimension,
weight_collections=[linear_parent_scope],
scope=scope)
# Combine logits and build full model.
if dnn_logits is not None and linear_logits is not None:
logits = dnn_logits + linear_logits
elif dnn_logits is not None:
logits = dnn_logits
else:
logits = linear_logits
def _make_training_op(training_loss):
"""Training op for the DNN linear combined model."""
train_ops = []
global_step = training_util.get_global_step()
if dnn_logits is not None:
train_ops.append(
optimizers.optimize_loss(
loss=training_loss,
global_step=global_step,
learning_rate=_DNN_LEARNING_RATE,
optimizer=_get_optimizer(dnn_optimizer),
gradient_multipliers=_extract_embedding_lr_multipliers( # pylint: disable=protected-access
embedding_lr_multipliers, dnn_parent_scope,
dnn_input_scope.name),
clip_gradients=gradient_clip_norm,
variables=ops.get_collection(dnn_parent_scope),
name=dnn_parent_scope,
# Empty summaries, because head already logs "loss" summary.
summaries=[],
increment_global_step=not fix_global_step_increment_bug))
if linear_logits is not None:
train_ops.append(
optimizers.optimize_loss(
loss=training_loss,
global_step=global_step,
learning_rate=_linear_learning_rate(len(linear_feature_columns)),
optimizer=_get_optimizer(linear_optimizer),
clip_gradients=gradient_clip_norm,
variables=ops.get_collection(linear_parent_scope),
name=linear_parent_scope,
# Empty summaries, because head already logs "loss" summary.
summaries=[],
increment_global_step=not fix_global_step_increment_bug))
train_op = control_flow_ops.group(*train_ops)
if fix_global_step_increment_bug:
with ops.control_dependencies([train_op]):
with ops.colocate_with(global_step):
return state_ops.assign_add(global_step, 1).op
return train_op
return head.create_model_fn_ops(
features=features,
mode=mode,
labels=labels,
train_op_fn=_make_training_op,
logits=logits)
class DNNLinearCombinedEstimator(estimator.Estimator):
"""An estimator for TensorFlow Linear and DNN joined training models.
Note: New users must set `fix_global_step_increment_bug=True` when creating an
estimator.
Input of `fit`, `train`, and `evaluate` should have following features,
otherwise there will be a `KeyError`:
if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
for each `column` in `dnn_feature_columns` + `linear_feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column
name. Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
@deprecated_arg_values(
_FIX_GLOBAL_STEP_INCREMENT_DATE,
_FIX_GLOBAL_STEP_INCREMENT_INSTRUCTIONS,
fix_global_step_increment_bug=False)
def __init__(self, # _joint_linear_weights pylint: disable=invalid-name
head,
model_dir=None,
linear_feature_columns=None,
linear_optimizer=None,
_joint_linear_weights=False,
dnn_feature_columns=None,
dnn_optimizer=None,
dnn_hidden_units=None,
dnn_activation_fn=None,
dnn_dropout=None,
gradient_clip_norm=None,
config=None,
feature_engineering_fn=None,
embedding_lr_multipliers=None,
fix_global_step_increment_bug=False):
"""Initializes a DNNLinearCombinedEstimator instance.
Note: New users must set `fix_global_step_increment_bug=True` when creating
an estimator.
Args:
head: A _Head object.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
linear_feature_columns: An iterable containing all the feature columns
used by linear part of the model. All items in the set should be
instances of classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the linear part of the model. If `None`, will use a FTRL optimizer.
_joint_linear_weights: If True will use a single (possibly partitioned)
variable to store all weights for the linear model. More efficient if
there are many columns, however requires all columns are sparse and
have the 'sum' combiner.
dnn_feature_columns: An iterable containing all the feature columns used
by deep part of the model. All items in the set should be instances of
classes derived from `FeatureColumn`.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the deep part of the model. If `None`, will use an Adagrad optimizer.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_activation_fn: Activation function applied to each layer. If `None`,
will use `tf.nn.relu`.
dnn_dropout: When not None, the probability we will drop out
a given coordinate.
gradient_clip_norm: A float > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
tf.clip_by_global_norm for more details.
config: RunConfig object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
embedding_lr_multipliers: Optional. A dictionary from `EmbeddingColumn` to
a `float` multiplier. Multiplier will be used to multiply with
learning rate for the embedding variables.
fix_global_step_increment_bug: If `False`, the estimator needs two fit
steps to optimize both linear and dnn parts. If `True`, this bug is
fixed. New users must set this to `True`, but the default value is
`False` for backwards compatibility.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
empty at the same time.
"""
linear_feature_columns = tuple(linear_feature_columns or [])
dnn_feature_columns = tuple(dnn_feature_columns or [])
if not linear_feature_columns + dnn_feature_columns:
raise ValueError("Either linear_feature_columns or dnn_feature_columns "
"must be defined.")
super(DNNLinearCombinedEstimator, self).__init__(
model_fn=_dnn_linear_combined_model_fn,
model_dir=model_dir,
config=config,
params={
"head": head,
"linear_feature_columns": linear_feature_columns,
"linear_optimizer": linear_optimizer,
"joint_linear_weights": _joint_linear_weights,
"dnn_feature_columns": dnn_feature_columns,
"dnn_optimizer": dnn_optimizer,
"dnn_hidden_units": dnn_hidden_units,
"dnn_activation_fn": dnn_activation_fn,
"dnn_dropout": dnn_dropout,
"gradient_clip_norm": gradient_clip_norm,
"embedding_lr_multipliers": embedding_lr_multipliers,
"fix_global_step_increment_bug": fix_global_step_increment_bug,
},
feature_engineering_fn=feature_engineering_fn)
class DNNLinearCombinedClassifier(estimator.Estimator):
"""A classifier for TensorFlow Linear and DNN joined training models.
Note: New users must set `fix_global_step_increment_bug=True` when creating an
estimator.
Example:
```python
sparse_feature_a = sparse_column_with_hash_bucket(...)
sparse_feature_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_x_sparse_feature_b = crossed_column(...)
sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a,
...)
sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b,
...)
estimator = DNNLinearCombinedClassifier(
# common settings
n_classes=n_classes,
weight_column_name=weight_column_name,
# wide settings
linear_feature_columns=[sparse_feature_a_x_sparse_feature_b],
linear_optimizer=tf.train.FtrlOptimizer(...),
# deep settings
dnn_feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.AdagradOptimizer(...))
# Input builders
def input_fn_train: # returns x, y (where y represents label's class index).
...
def input_fn_eval: # returns x, y (where y represents label's class index).
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x) # returns predicted labels (i.e. label's class index).
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
for each `column` in `dnn_feature_columns` + `linear_feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
@deprecated_arg_values(
_FIX_GLOBAL_STEP_INCREMENT_DATE,
_FIX_GLOBAL_STEP_INCREMENT_INSTRUCTIONS,
fix_global_step_increment_bug=False)
def __init__(self, # _joint_linear_weights pylint: disable=invalid-name
model_dir=None,
n_classes=2,
weight_column_name=None,
linear_feature_columns=None,
linear_optimizer=None,
_joint_linear_weights=False,
dnn_feature_columns=None,
dnn_optimizer=None,
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
dnn_dropout=None,
gradient_clip_norm=None,
enable_centered_bias=False,
config=None,
feature_engineering_fn=None,
embedding_lr_multipliers=None,
input_layer_min_slice_size=None,
fix_global_step_increment_bug=False):
"""Constructs a DNNLinearCombinedClassifier instance.
Note: New users must set `fix_global_step_increment_bug=True` when creating
an estimator.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
n_classes: number of label classes. Default is binary classification.
Note that class labels are integers representing the class index (i.e.
values from 0 to n_classes-1). For arbitrary label values (e.g. string
labels), convert to class indices first.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training.
It will be multiplied by the loss of the example.
linear_feature_columns: An iterable containing all the feature columns
used by linear part of the model. All items in the set must be
instances of classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the linear part of the model. If `None`, will use a FTRL optimizer.
_joint_linear_weights: If True a single (possibly partitioned) variable
will be used to store the linear model weights. It's faster, but
requires all columns are sparse and have the 'sum' combiner.
dnn_feature_columns: An iterable containing all the feature columns used
by deep part of the model. All items in the set must be instances of
classes derived from `FeatureColumn`.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the deep part of the model. If `None`, will use an Adagrad optimizer.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_activation_fn: Activation function applied to each layer. If `None`,
will use `tf.nn.relu`.
dnn_dropout: When not None, the probability we will drop out
a given coordinate.
gradient_clip_norm: A float > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
tf.clip_by_global_norm for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
config: RunConfig object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
embedding_lr_multipliers: Optional. A dictionary from `EmbeddingColumn` to
a `float` multiplier. Multiplier will be used to multiply with
learning rate for the embedding variables.
input_layer_min_slice_size: Optional. The min slice size of input layer
partitions. If not provided, will use the default of 64M.
fix_global_step_increment_bug: If `False`, the estimator needs two fit
steps to optimize both linear and dnn parts. If `True`, this bug is
fixed. New users must set this to `True`, but it the default value is
`False` for backwards compatibility.
Raises:
ValueError: If `n_classes` < 2.
ValueError: If both `linear_feature_columns` and `dnn_features_columns`
are empty at the same time.
"""
if n_classes < 2:
raise ValueError("n_classes should be greater than 1. Given: {}".format(
n_classes))
linear_feature_columns = tuple(linear_feature_columns or [])
dnn_feature_columns = tuple(dnn_feature_columns or [])
self._feature_columns = linear_feature_columns + dnn_feature_columns
if not self._feature_columns:
raise ValueError("Either linear_feature_columns or dnn_feature_columns "
"must be defined.")
head = head_lib.multi_class_head(
n_classes=n_classes,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias)
super(DNNLinearCombinedClassifier, self).__init__(
model_fn=_dnn_linear_combined_model_fn,
model_dir=model_dir,
config=config,
params={
"head": head,
"linear_feature_columns": linear_feature_columns,
"linear_optimizer": linear_optimizer,
"joint_linear_weights": _joint_linear_weights,
"dnn_feature_columns": dnn_feature_columns,
"dnn_optimizer": dnn_optimizer,
"dnn_hidden_units": dnn_hidden_units,
"dnn_activation_fn": dnn_activation_fn,
"dnn_dropout": dnn_dropout,
"gradient_clip_norm": gradient_clip_norm,
"embedding_lr_multipliers": embedding_lr_multipliers,
"input_layer_min_slice_size": input_layer_min_slice_size,
"fix_global_step_increment_bug": fix_global_step_increment_bug,
},
feature_engineering_fn=feature_engineering_fn)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
@deprecated_arg_values(
"2017-03-01",
"Please switch to predict_classes, or set `outputs` argument.",
outputs=None)
def predict(self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
By default, returns predicted classes. But this default will be dropped
soon. Users should either pass `outputs`, or call `predict_classes` method.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
outputs: list of `str`, name of the output to predict.
If `None`, returns classes.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted classes with shape [batch_size] (or an iterable
of predicted classes if as_iterable is True). Each predicted class is
represented by its class index (i.e. integer from 0 to n_classes-1).
If `outputs` is set, returns a dict of predictions.
"""
if not outputs:
return self.predict_classes(
x=x,
input_fn=input_fn,
batch_size=batch_size,
as_iterable=as_iterable)
return super(DNNLinearCombinedClassifier, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=outputs,
as_iterable=as_iterable)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_classes(self, x=None, input_fn=None, batch_size=None,
as_iterable=True):
"""Returns predicted classes for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted classes with shape [batch_size] (or an iterable
of predicted classes if as_iterable is True). Each predicted class is
represented by its class index (i.e. integer from 0 to n_classes-1).
"""
key = prediction_key.PredictionKey.CLASSES
preds = super(DNNLinearCombinedClassifier, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return _as_iterable(preds, output=key)
return preds[key].reshape(-1)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_proba(
self, x=None, input_fn=None, batch_size=None, as_iterable=True):
"""Returns prediction probabilities for given features.
Args:
x: features.
input_fn: Input function. If set, x and y must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted probabilities with shape [batch_size, n_classes]
(or an iterable of predicted probabilities if as_iterable is True).
"""
key = prediction_key.PredictionKey.PROBABILITIES
preds = super(DNNLinearCombinedClassifier, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return _as_iterable(preds, output=key)
return preds[key]
@deprecated("2017-03-25", "Please use Estimator.export_savedmodel() instead.")
def export(self,
export_dir,
input_fn=None,
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
default_batch_size=1,
exports_to_keep=None):
"""See BasEstimator.export."""
def default_input_fn(unused_estimator, examples):
return layers.parse_feature_columns_from_examples(
examples, self._feature_columns)
return super(DNNLinearCombinedClassifier, self).export(
export_dir=export_dir,
input_fn=input_fn or default_input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
signature_fn=(signature_fn or
export.classification_signature_fn_with_prob),
prediction_key=prediction_key.PredictionKey.PROBABILITIES,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
class DNNLinearCombinedRegressor(estimator.Estimator):
"""A regressor for TensorFlow Linear and DNN joined training models.
Note: New users must set `fix_global_step_increment_bug=True` when creating an
estimator.
Example:
```python
sparse_feature_a = sparse_column_with_hash_bucket(...)
sparse_feature_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_x_sparse_feature_b = crossed_column(...)
sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a,
...)
sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b,
...)
estimator = DNNLinearCombinedRegressor(
# common settings
weight_column_name=weight_column_name,
# wide settings
linear_feature_columns=[sparse_feature_a_x_sparse_feature_b],
linear_optimizer=tf.train.FtrlOptimizer(...),
# deep settings
dnn_feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.ProximalAdagradOptimizer(...))
# To apply L1 and L2 regularization, you can set optimizers as follows:
tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.001)
# It is same for FtrlOptimizer.
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.train(input_fn_train)
estimator.evaluate(input_fn_eval)
estimator.predict(x)
```
Input of `fit`, `train`, and `evaluate` should have following features,
otherwise there will be a `KeyError`:
if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
for each `column` in `dnn_feature_columns` + `linear_feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
@deprecated_arg_values(
_FIX_GLOBAL_STEP_INCREMENT_DATE,
_FIX_GLOBAL_STEP_INCREMENT_INSTRUCTIONS,
fix_global_step_increment_bug=False)
def __init__(self, # _joint_linear_weights pylint: disable=invalid-name
model_dir=None,
weight_column_name=None,
linear_feature_columns=None,
linear_optimizer=None,
_joint_linear_weights=False,
dnn_feature_columns=None,
dnn_optimizer=None,
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
dnn_dropout=None,
gradient_clip_norm=None,
enable_centered_bias=False,
label_dimension=1,
config=None,
feature_engineering_fn=None,
embedding_lr_multipliers=None,
input_layer_min_slice_size=None,
fix_global_step_increment_bug=False):
"""Initializes a DNNLinearCombinedRegressor instance.
Note: New users must set `fix_global_step_increment_bug=True` when creating
an estimator.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
linear_feature_columns: An iterable containing all the feature columns
used by linear part of the model. All items in the set must be
instances of classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the linear part of the model. If `None`, will use a FTRL optimizer.
_joint_linear_weights: If True a single (possibly partitioned) variable
will be used to store the linear model weights. It's faster, but
requires that all columns are sparse and have the 'sum' combiner.
dnn_feature_columns: An iterable containing all the feature columns used
by deep part of the model. All items in the set must be instances of
classes derived from `FeatureColumn`.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the deep part of the model. If `None`, will use an Adagrad optimizer.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_activation_fn: Activation function applied to each layer. If None,
will use `tf.nn.relu`.
dnn_dropout: When not None, the probability we will drop out
a given coordinate.
gradient_clip_norm: A float > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
tf.clip_by_global_norm for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
label_dimension: Number of regression targets per example. This is the
size of the last dimension of the labels and logits `Tensor` objects
(typically, these have shape `[batch_size, label_dimension]`).
config: RunConfig object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
embedding_lr_multipliers: Optional. A dictionary from `EmbeddingColumn` to
a `float` multiplier. Multiplier will be used to multiply with
learning rate for the embedding variables.
input_layer_min_slice_size: Optional. The min slice size of input layer
partitions. If not provided, will use the default of 64M.
fix_global_step_increment_bug: If `False`, the estimator needs two fit
steps to optimize both linear and dnn parts. If `True`, this bug is
fixed. New users must set this to `True`, but it the default value is
`False` for backwards compatibility.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
empty at the same time.
"""
linear_feature_columns = linear_feature_columns or []
dnn_feature_columns = dnn_feature_columns or []
self._feature_columns = linear_feature_columns + dnn_feature_columns
if not self._feature_columns:
raise ValueError("Either linear_feature_columns or dnn_feature_columns "
"must be defined.")
head = head_lib.regression_head(
weight_column_name=weight_column_name,
label_dimension=label_dimension,
enable_centered_bias=enable_centered_bias)
super(DNNLinearCombinedRegressor, self).__init__(
model_fn=_dnn_linear_combined_model_fn,
model_dir=model_dir,
config=config,
params={
"head": head,
"linear_feature_columns": linear_feature_columns,
"linear_optimizer": linear_optimizer,
"joint_linear_weights": _joint_linear_weights,
"dnn_feature_columns": dnn_feature_columns,
"dnn_optimizer": dnn_optimizer,
"dnn_hidden_units": dnn_hidden_units,
"dnn_activation_fn": dnn_activation_fn,
"dnn_dropout": dnn_dropout,
"gradient_clip_norm": gradient_clip_norm,
"embedding_lr_multipliers": embedding_lr_multipliers,
"input_layer_min_slice_size": input_layer_min_slice_size,
"fix_global_step_increment_bug": fix_global_step_increment_bug,
},
feature_engineering_fn=feature_engineering_fn)
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None):
"""See evaluable.Evaluable."""
# TODO(zakaria): remove once deprecation is finished (b/31229024)
custom_metrics = {}
if metrics:
for key, metric in six.iteritems(metrics):
if (not isinstance(metric, metric_spec.MetricSpec) and
not isinstance(key, tuple)):
custom_metrics[(key, prediction_key.PredictionKey.SCORES)] = metric
else:
custom_metrics[key] = metric
return super(DNNLinearCombinedRegressor, self).evaluate(
x=x,
y=y,
input_fn=input_fn,
feed_fn=feed_fn,
batch_size=batch_size,
steps=steps,
metrics=custom_metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
@deprecated_arg_values(
"2017-03-01",
"Please switch to predict_scores, or set `outputs` argument.",
outputs=None)
def predict(self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
By default, returns predicted scores. But this default will be dropped
soon. Users should either pass `outputs`, or call `predict_scores` method.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
outputs: list of `str`, name of the output to predict.
If `None`, returns scores.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted scores (or an iterable of predicted scores if
as_iterable is True). If `label_dimension == 1`, the shape of the output
is `[batch_size]`, otherwise the shape is `[batch_size, label_dimension]`.
If `outputs` is set, returns a dict of predictions.
"""
if not outputs:
return self.predict_scores(
x=x,
input_fn=input_fn,
batch_size=batch_size,
as_iterable=as_iterable)
return super(DNNLinearCombinedRegressor, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=outputs,
as_iterable=as_iterable)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_scores(self, x=None, input_fn=None, batch_size=None,
as_iterable=True):
"""Returns predicted scores for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted scores (or an iterable of predicted scores if
as_iterable is True). If `label_dimension == 1`, the shape of the output
is `[batch_size]`, otherwise the shape is `[batch_size, label_dimension]`.
"""
key = prediction_key.PredictionKey.SCORES
preds = super(DNNLinearCombinedRegressor, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return (pred[key] for pred in preds)
return preds[key]
@deprecated("2017-03-25", "Please use Estimator.export_savedmodel() instead.")
def export(self,
export_dir,
input_fn=None,
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
default_batch_size=1,
exports_to_keep=None):
"""See BaseEstimator.export."""
def default_input_fn(unused_estimator, examples):
return layers.parse_feature_columns_from_examples(
examples, self._feature_columns)
return super(DNNLinearCombinedRegressor, self).export(
export_dir=export_dir,
input_fn=input_fn or default_input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
signature_fn=signature_fn or export.regression_signature_fn,
prediction_key=prediction_key.PredictionKey.SCORES,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
# Aliases
# TODO(zakaria): Remove these aliases, See b/34751732
_DNNLinearCombinedEstimator = DNNLinearCombinedEstimator
| |
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
cells_group = cfg.OptGroup('cells',
title='Cells Options',
help="""
Cells options allow you to use cells functionality in openstack
deployment.
""")
cells_opts = [
cfg.StrOpt('topic',
default='cells',
deprecated_for_removal=True,
deprecated_since='15.0.0',
deprecated_reason="""
Configurable RPC topics provide little value and can result in a wide variety
of errors. They should not be used.
""",
help="""
Topic.
This is the message queue topic that cells nodes listen on. It is
used when the cells service is started up to configure the queue,
and whenever an RPC call to the scheduler is made.
Possible values:
* cells: This is the recommended and the default value.
"""),
cfg.BoolOpt('enable',
default=False,
help="""
Enable cell functionality.
When this functionality is enabled, it lets you to scale an OpenStack
Compute cloud in a more distributed fashion without having to use
complicated technologies like database and message queue clustering.
Cells are configured as a tree. The top-level cell should have a host
that runs a nova-api service, but no nova-compute services. Each
child cell should run all of the typical nova-* services in a regular
Compute cloud except for nova-api. You can think of cells as a normal
Compute deployment in that each cell has its own database server and
message queue broker.
Related options:
* name: A unique cell name must be given when this functionality
is enabled.
* cell_type: Cell type should be defined for all cells.
"""),
cfg.StrOpt('name',
default='nova',
help="""
Name of the current cell.
This value must be unique for each cell. Name of a cell is used as
its id, leaving this option unset or setting the same name for
two or more cells may cause unexpected behaviour.
Related options:
* enabled: This option is meaningful only when cells service
is enabled
"""),
cfg.ListOpt('capabilities',
default=['hypervisor=xenserver;kvm', 'os=linux;windows'],
help="""
Cell capabilities.
List of arbitrary key=value pairs defining capabilities of the
current cell to be sent to the parent cells. These capabilities
are intended to be used in cells scheduler filters/weighers.
Possible values:
* key=value pairs list for example;
``hypervisor=xenserver;kvm,os=linux;windows``
"""),
cfg.IntOpt('call_timeout',
default=60,
min=0,
help="""
Call timeout.
Cell messaging module waits for response(s) to be put into the
eventlet queue. This option defines the seconds waited for
response from a call to a cell.
Possible values:
* An integer, corresponding to the interval time in seconds.
"""),
# TODO(sfinucan): Add min parameter
cfg.FloatOpt('reserve_percent',
default=10.0,
help="""
Reserve percentage
Percentage of cell capacity to hold in reserve, so the minimum
amount of free resource is considered to be;
min_free = total * (reserve_percent / 100.0)
This option affects both memory and disk utilization.
The primary purpose of this reserve is to ensure some space is
available for users who want to resize their instance to be larger.
Note that currently once the capacity expands into this reserve
space this option is ignored.
Possible values:
* An integer or float, corresponding to the percentage of cell capacity to
be held in reserve.
"""),
cfg.StrOpt('cell_type',
default='compute',
choices=('api', 'compute'),
help="""
Type of cell.
When cells feature is enabled the hosts in the OpenStack Compute
cloud are partitioned into groups. Cells are configured as a tree.
The top-level cell's cell_type must be set to ``api``. All other
cells are defined as a ``compute cell`` by default.
Related option:
* quota_driver: Disable quota checking for the child cells.
(nova.quota.NoopQuotaDriver)
"""),
# TODO(sfinucan): Add min parameter
cfg.IntOpt('mute_child_interval',
default=300,
help="""
Mute child interval.
Number of seconds after which a lack of capability and capacity
update the child cell is to be treated as a mute cell. Then the
child cell will be weighed as recommend highly that it be skipped.
Possible values:
* An integer, corresponding to the interval time in seconds.
"""),
# TODO(sfinucan): Add min parameter
cfg.IntOpt('bandwidth_update_interval',
default=600,
help="""
Bandwidth update interval.
Seconds between bandwidth usage cache updates for cells.
Possible values:
* An integer, corresponding to the interval time in seconds.
"""),
# TODO(sfinucan): Add min parameter
cfg.IntOpt('instance_update_sync_database_limit',
default=100,
help="""
Instance update sync database limit.
Number of instances to pull from the database at one time for
a sync. If there are more instances to update the results will
be paged through.
Possible values:
* An integer, corresponding to a number of instances.
"""),
]
mute_weigher_opts = [
# TODO(sfinucan): Add max parameter
cfg.FloatOpt('mute_weight_multiplier',
default=-10000.0,
help="""
Mute weight multiplier.
Multiplier used to weigh mute children. Mute children cells are
recommended to be skipped so their weight is multiplied by this
negative value.
Possible values:
* Negative numeric number
"""),
]
ram_weigher_opts = [
# TODO(sfinucan): Add min parameter
cfg.FloatOpt('ram_weight_multiplier',
default=10.0,
help="""
Ram weight multiplier.
Multiplier used for weighing ram. Negative numbers indicate that
Compute should stack VMs on one host instead of spreading out new
VMs to more hosts in the cell.
Possible values:
* Numeric multiplier
"""),
]
weigher_opts = [
# TODO(sfinucan): Add min parameter
cfg.FloatOpt('offset_weight_multiplier',
default=1.0,
help="""
Offset weight multiplier
Multiplier used to weigh offset weigher. Cells with higher
weight_offsets in the DB will be preferred. The weight_offset
is a property of a cell stored in the database. It can be used
by a deployer to have scheduling decisions favor or disfavor
cells based on the setting.
Possible values:
* Numeric multiplier
"""),
]
cell_manager_opts = [
# TODO(sfinucan): Add min parameter
cfg.IntOpt('instance_updated_at_threshold',
default=3600,
help="""
Instance updated at threshold
Number of seconds after an instance was updated or deleted to
continue to update cells. This option lets cells manager to only
attempt to sync instances that have been updated recently.
i.e., a threshold of 3600 means to only update instances that
have modified in the last hour.
Possible values:
* Threshold in seconds
Related options:
* This value is used with the ``instance_update_num_instances``
value in a periodic task run.
"""),
# TODO(sfinucan): Add min parameter
cfg.IntOpt("instance_update_num_instances",
default=1,
help="""
Instance update num instances
On every run of the periodic task, nova cells manager will attempt to
sync instance_updated_at_threshold number of instances. When the
manager gets the list of instances, it shuffles them so that multiple
nova-cells services do not attempt to sync the same instances in
lockstep.
Possible values:
* Positive integer number
Related options:
* This value is used with the ``instance_updated_at_threshold``
value in a periodic task run.
""")
]
cell_messaging_opts = [
# TODO(sfinucan): Add min parameter
cfg.IntOpt('max_hop_count',
default=10,
help="""
Maximum hop count
When processing a targeted message, if the local cell is not the
target, a route is defined between neighbouring cells. And the
message is processed across the whole routing path. This option
defines the maximum hop counts until reaching the target.
Possible values:
* Positive integer value
"""),
cfg.StrOpt('scheduler',
default='nova.cells.scheduler.CellsScheduler',
help="""
Cells scheduler.
The class of the driver used by the cells scheduler. This should be
the full Python path to the class to be used. If nothing is specified
in this option, the CellsScheduler is used.
""")
]
cell_rpc_driver_opts = [
cfg.StrOpt('rpc_driver_queue_base',
default='cells.intercell',
help="""
RPC driver queue base.
When sending a message to another cell by JSON-ifying the message
and making an RPC cast to 'process_message', a base queue is used.
This option defines the base queue name to be used when communicating
between cells. Various topics by message type will be appended to this.
Possible values:
* The base queue name to be used when communicating between cells.
""")
]
cell_scheduler_opts = [
cfg.ListOpt('scheduler_filter_classes',
default=['nova.cells.filters.all_filters'],
help="""
Scheduler filter classes.
Filter classes the cells scheduler should use. An entry of
"nova.cells.filters.all_filters" maps to all cells filters
included with nova. As of the Mitaka release the following
filter classes are available:
Different cell filter: A scheduler hint of 'different_cell'
with a value of a full cell name may be specified to route
a build away from a particular cell.
Image properties filter: Image metadata named
'hypervisor_version_requires' with a version specification
may be specified to ensure the build goes to a cell which
has hypervisors of the required version. If either the version
requirement on the image or the hypervisor capability of the
cell is not present, this filter returns without filtering out
the cells.
Target cell filter: A scheduler hint of 'target_cell' with a
value of a full cell name may be specified to route a build to
a particular cell. No error handling is done as there's no way
to know whether the full path is a valid.
As an admin user, you can also add a filter that directs builds
to a particular cell.
"""),
cfg.ListOpt('scheduler_weight_classes',
default=['nova.cells.weights.all_weighers'],
help="""
Scheduler weight classes.
Weigher classes the cells scheduler should use. An entry of
"nova.cells.weights.all_weighers" maps to all cell weighers
included with nova. As of the Mitaka release the following
weight classes are available:
mute_child: Downgrades the likelihood of child cells being
chosen for scheduling requests, which haven't sent capacity
or capability updates in a while. Options include
mute_weight_multiplier (multiplier for mute children; value
should be negative).
ram_by_instance_type: Select cells with the most RAM capacity
for the instance type being requested. Because higher weights
win, Compute returns the number of available units for the
instance type requested. The ram_weight_multiplier option defaults
to 10.0 that adds to the weight by a factor of 10. Use a negative
number to stack VMs on one host instead of spreading out new VMs
to more hosts in the cell.
weight_offset: Allows modifying the database to weight a particular
cell. The highest weight will be the first cell to be scheduled for
launching an instance. When the weight_offset of a cell is set to 0,
it is unlikely to be picked but it could be picked if other cells
have a lower weight, like if they're full. And when the weight_offset
is set to a very high value (for example, '999999999999999'), it is
likely to be picked if another cell do not have a higher weight.
"""),
# TODO(sfinucan): Add min parameter
cfg.IntOpt('scheduler_retries',
default=10,
help="""
Scheduler retries.
How many retries when no cells are available. Specifies how many
times the scheduler tries to launch a new instance when no cells
are available.
Possible values:
* Positive integer value
Related options:
* This value is used with the ``scheduler_retry_delay`` value
while retrying to find a suitable cell.
"""),
# TODO(sfinucan): Add min parameter
cfg.IntOpt('scheduler_retry_delay',
default=2,
help="""
Scheduler retry delay.
Specifies the delay (in seconds) between scheduling retries when no
cell can be found to place the new instance on. When the instance
could not be scheduled to a cell after ``scheduler_retries`` in
combination with ``scheduler_retry_delay``, then the scheduling
of the instance failed.
Possible values:
* Time in seconds.
Related options:
* This value is used with the ``scheduler_retries`` value
while retrying to find a suitable cell.
""")
]
cell_state_manager_opts = [
# TODO(sfinucan): Add min parameter
cfg.IntOpt('db_check_interval',
default=60,
help="""
DB check interval.
Cell state manager updates cell status for all cells from the DB
only after this particular interval time is passed. Otherwise cached
status are used. If this value is 0 or negative all cell status are
updated from the DB whenever a state is needed.
Possible values:
* Interval time, in seconds.
"""),
cfg.StrOpt('cells_config',
help="""
Optional cells configuration.
Configuration file from which to read cells configuration. If given,
overrides reading cells from the database.
Cells store all inter-cell communication data, including user names
and passwords, in the database. Because the cells data is not updated
very frequently, use this option to specify a JSON file to store
cells data. With this configuration, the database is no longer
consulted when reloading the cells data. The file must have columns
present in the Cell model (excluding common database fields and the
id column). You must specify the queue connection information through
a transport_url field, instead of username, password, and so on.
The transport_url has the following form:
rabbit://USERNAME:PASSWORD@HOSTNAME:PORT/VIRTUAL_HOST
Possible values:
The scheme can be either qpid or rabbit, the following sample shows
this optional configuration:
{
"parent": {
"name": "parent",
"api_url": "http://api.example.com:8774",
"transport_url": "rabbit://rabbit.example.com",
"weight_offset": 0.0,
"weight_scale": 1.0,
"is_parent": true
},
"cell1": {
"name": "cell1",
"api_url": "http://api.example.com:8774",
"transport_url": "rabbit://rabbit1.example.com",
"weight_offset": 0.0,
"weight_scale": 1.0,
"is_parent": false
},
"cell2": {
"name": "cell2",
"api_url": "http://api.example.com:8774",
"transport_url": "rabbit://rabbit2.example.com",
"weight_offset": 0.0,
"weight_scale": 1.0,
"is_parent": false
}
}
""")
]
ALL_CELLS_OPTS = (cells_opts +
mute_weigher_opts +
ram_weigher_opts +
weigher_opts +
cell_manager_opts +
cell_messaging_opts +
cell_rpc_driver_opts +
cell_scheduler_opts +
cell_state_manager_opts)
def register_opts(conf):
conf.register_group(cells_group)
conf.register_opts(ALL_CELLS_OPTS, group=cells_group)
def list_opts():
return {cells_group: ALL_CELLS_OPTS}
| |
# coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for scripts/linters/html_linter.py."""
from __future__ import annotations
import multiprocessing
import os
from core.tests import test_utils
from . import html_linter
from . import pre_commit_linter
NAME_SPACE = multiprocessing.Manager().Namespace()
PROCESSES = multiprocessing.Manager().dict()
NAME_SPACE.files = pre_commit_linter.FileCache()
FILE_CACHE = NAME_SPACE.files
LINTER_TESTS_DIR = os.path.join(os.getcwd(), 'scripts', 'linters', 'test_files')
VALID_HTML_FILEPATH = os.path.join(LINTER_TESTS_DIR, 'valid.html')
INVALID_STYLE_INDENTATION_HTML_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_style_indentation.html')
INVALID_INDENTATION_HTML_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_indentation.html')
INVALID_QUOTES_HTML_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_quotes.html')
INVALID_ALIGNMENT_HTML_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_alignment_of_tags.html')
INVALID_MISSING_HTML_TAG_HTML_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_missing_html_tag.html')
INVALID_TAG_MISMATCH_HTML_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_tag_mismatch.html')
INVALID_MISMATCH_INDENTATION_HTML_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_mismatch_indentation.html')
INVALID_MISMATCHED_TAGS_HTML_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_mismatched_tags.html')
INVALID_SPACE_AROUND_ATTRIBUTE_HTML_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_space_around_attribute.html')
INVALID_SPACE_AROUND_INNERHTML_ATTRIBUTE_HTML_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_space_around_innerhtml_attribute.html')
INVALID_SPACE_AROUND_DUPLICATE_ATTRIBUTE_HTML_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_space_around_duplicate_attribute.html')
class CustomHTMLParserTests(test_utils.LinterTestBase):
"""Tests for CustomHTMLParser class."""
def test_custom_linter_with_invalid_style_indentation(self):
lint_task_report = html_linter.HTMLLintChecksManager(
[INVALID_STYLE_INDENTATION_HTML_FILEPATH], FILE_CACHE
).check_html_tags_and_attributes()
self.assert_same_list_elements([
'invalid_style_indentation.html --> Expected indentation of 6,'
' found indentation of 4 for content of style tag on line 7'
], lint_task_report.trimmed_messages)
self.assertEqual('HTML tag and attribute', lint_task_report.name)
self.assertTrue(lint_task_report.failed)
def test_custom_linter_with_invalid_indentation(self):
lint_task_report = html_linter.HTMLLintChecksManager(
[INVALID_INDENTATION_HTML_FILEPATH], FILE_CACHE
).check_html_tags_and_attributes()
self.assert_same_list_elements([
'Expected indentation of 10, found indentation of 12 for '
'classroom-page tag on line 14'], lint_task_report.trimmed_messages)
self.assertEqual('HTML tag and attribute', lint_task_report.name)
self.assertTrue(lint_task_report.failed)
def test_custom_linter_with_invalid_quotes(self):
lint_task_report = html_linter.HTMLLintChecksManager(
[INVALID_QUOTES_HTML_FILEPATH], FILE_CACHE
).check_html_tags_and_attributes()
self.assert_same_list_elements([
'The value color:white; of attribute '
'style for the tag content on line 12 should be enclosed '
'within double quotes.'], lint_task_report.trimmed_messages)
self.assertEqual('HTML tag and attribute', lint_task_report.name)
self.assertTrue(lint_task_report.failed)
def test_custom_linter_with_invalid_alignment(self):
lint_task_report = html_linter.HTMLLintChecksManager(
[INVALID_ALIGNMENT_HTML_FILEPATH], FILE_CACHE
).check_html_tags_and_attributes()
self.assert_same_list_elements([
'Attribute for tag content on line 13 should align with the '
'leftmost attribute on line 12'], lint_task_report.trimmed_messages)
self.assertEqual('HTML tag and attribute', lint_task_report.name)
self.assertTrue(lint_task_report.failed)
def test_custom_linter_with_invalid_tags(self):
with self.assertRaisesRegexp(
html_linter.TagMismatchException, 'Error in line 2 of file'):
html_linter.HTMLLintChecksManager(
[INVALID_MISMATCHED_TAGS_HTML_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
def test_custom_linter_with_tag_mismatch(self):
with self.assertRaisesRegexp(
html_linter.TagMismatchException, 'Error in line 13 of file'):
html_linter.HTMLLintChecksManager(
[INVALID_TAG_MISMATCH_HTML_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
def test_custom_linter_with_mismatched_indentation(self):
lint_task_report = html_linter.HTMLLintChecksManager(
[INVALID_MISMATCH_INDENTATION_HTML_FILEPATH], FILE_CACHE
).check_html_tags_and_attributes()
self.assert_same_list_elements([
'Indentation for end tag content on line 18 does not match the'
' indentation of the start tag content on line 12'
], lint_task_report.trimmed_messages)
self.assertEqual('HTML tag and attribute', lint_task_report.name)
self.assertTrue(lint_task_report.failed)
def test_custom_without_html_end_tag(self):
with self.assertRaisesRegexp(
html_linter.TagMismatchException, 'Error in file'):
html_linter.HTMLLintChecksManager(
[INVALID_MISSING_HTML_TAG_HTML_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
def test_space_around_attribute_name_reports_correctly(self):
lint_task_report = html_linter.HTMLLintChecksManager(
[INVALID_SPACE_AROUND_ATTRIBUTE_HTML_FILEPATH], FILE_CACHE
).check_html_tags_and_attributes()
self.assert_same_list_elements([
'Attribute class for tag div on line 4 ',
'has unwanted white spaces around it'
], lint_task_report.trimmed_messages)
self.assertTrue(lint_task_report)
def test_space_around_attr_having_camelcase_name_reports_correctly(self):
lint_task_report = html_linter.HTMLLintChecksManager(
[INVALID_SPACE_AROUND_INNERHTML_ATTRIBUTE_HTML_FILEPATH], FILE_CACHE
).check_html_tags_and_attributes()
self.assert_same_list_elements([
'Attribute [innerhtml] for tag h1 on line 5 ',
'has unwanted white spaces around it'
], lint_task_report.trimmed_messages)
self.assertTrue(lint_task_report)
def test_space_around_duplicate_attr_reports_correctly(self):
lint_task_report = html_linter.HTMLLintChecksManager(
[INVALID_SPACE_AROUND_DUPLICATE_ATTRIBUTE_HTML_FILEPATH], FILE_CACHE
).check_html_tags_and_attributes()
self.assert_same_list_elements([
'Attribute class for tag div on line 4 ',
'has unwanted white spaces around it'
], lint_task_report.trimmed_messages)
self.assertTrue(lint_task_report)
def test_valid_html_file_with_custom_linter(self):
lint_task_report = html_linter.HTMLLintChecksManager(
[VALID_HTML_FILEPATH], FILE_CACHE).check_html_tags_and_attributes()
self.assertEqual(
['SUCCESS HTML tag and attribute check passed'],
lint_task_report.get_report())
self.assertEqual('HTML tag and attribute', lint_task_report.name)
self.assertFalse(lint_task_report.failed)
def test_custom_linter_with_no_files(self):
lint_task_report = html_linter.HTMLLintChecksManager(
[], FILE_CACHE).perform_all_lint_checks()
self.assertEqual(
[
'There are no HTML files to lint.',
'SUCCESS HTML lint check passed'],
lint_task_report[0].get_report())
self.assertEqual('HTML lint', lint_task_report[0].name)
self.assertFalse(lint_task_report[0].failed)
def test_third_party_linter_with_no_files(self):
lint_task_report = html_linter.ThirdPartyHTMLLintChecksManager(
[]).perform_all_lint_checks()
self.assertEqual(
[
'There are no HTML files to lint.',
'SUCCESS HTML lint check passed'],
lint_task_report[0].get_report())
self.assertEqual('HTML lint', lint_task_report[0].name)
self.assertFalse(lint_task_report[0].failed)
def test_third_party_linter_with_lint_errors(self):
lint_task_report = html_linter.ThirdPartyHTMLLintChecksManager(
[INVALID_QUOTES_HTML_FILEPATH]).lint_html_files()
self.assert_same_list_elements(
['line 10, col 20, line contains trailing whitespace'],
lint_task_report.trimmed_messages)
self.assertEqual('HTMLLint', lint_task_report.name)
self.assertTrue(lint_task_report.failed)
def test_third_party_perform_all_lint_checks(self):
lint_task_report = html_linter.ThirdPartyHTMLLintChecksManager(
[INVALID_QUOTES_HTML_FILEPATH]).perform_all_lint_checks()
self.assertTrue(isinstance(lint_task_report, list))
def test_get_linters_with_success(self):
custom_linter, third_party_linter = html_linter.get_linters(
[VALID_HTML_FILEPATH], FILE_CACHE)
self.assertTrue(
isinstance(custom_linter, html_linter.HTMLLintChecksManager))
self.assertTrue(
isinstance(
third_party_linter,
html_linter.ThirdPartyHTMLLintChecksManager))
| |
import numpy as np
from ._extensions._pywt import Wavelet, Modes, _check_dtype
from ._extensions._dwt import (dwt_single, dwt_axis, idwt_single, idwt_axis,
upcoef as _upcoef, downcoef as _downcoef,
dwt_max_level as _dwt_max_level,
dwt_coeff_len as _dwt_coeff_len)
__all__ = ["dwt", "idwt", "downcoef", "upcoef", "dwt_max_level", "dwt_coeff_len"]
def dwt_max_level(data_len, filter_len):
"""
dwt_max_level(data_len, filter_len)
Compute the maximum useful level of decomposition.
Parameters
----------
data_len : int
Input data length.
filter_len : int
Wavelet filter length.
Returns
-------
max_level : int
Maximum level.
Examples
--------
>>> import pywt
>>> w = pywt.Wavelet('sym5')
>>> pywt.dwt_max_level(data_len=1000, filter_len=w.dec_len)
6
>>> pywt.dwt_max_level(1000, w)
6
"""
if isinstance(filter_len, Wavelet):
filter_len = filter_len.dec_len
return _dwt_max_level(data_len, filter_len)
def dwt_coeff_len(data_len, filter_len, mode):
"""
dwt_coeff_len(data_len, filter_len, mode='symmetric')
Returns length of dwt output for given data length, filter length and mode
Parameters
----------
data_len : int
Data length.
filter_len : int
Filter length.
mode : str, optional (default: 'symmetric')
Signal extension mode, see Modes
Returns
-------
len : int
Length of dwt output.
Notes
-----
For all modes except periodization::
len(cA) == len(cD) == floor((len(data) + wavelet.dec_len - 1) / 2)
for periodization mode ("per")::
len(cA) == len(cD) == ceil(len(data) / 2)
"""
if isinstance(filter_len, Wavelet):
filter_len = filter_len.dec_len
return _dwt_coeff_len(data_len, filter_len, Modes.from_object(mode))
def dwt(data, wavelet, mode='symmetric', axis=-1):
"""
dwt(data, wavelet, mode='symmetric', axis=-1)
Single level Discrete Wavelet Transform.
Parameters
----------
data : array_like
Input signal
wavelet : Wavelet object or name
Wavelet to use
mode : str, optional
Signal extension mode, see Modes
axis: int, optional
Axis over which to compute the DWT. If not given, the
last axis is used.
Returns
-------
(cA, cD) : tuple
Approximation and detail coefficients.
Notes
-----
Length of coefficients arrays depends on the selected mode.
For all modes except periodization:
``len(cA) == len(cD) == floor((len(data) + wavelet.dec_len - 1) / 2)``
For periodization mode ("per"):
``len(cA) == len(cD) == ceil(len(data) / 2)``
Examples
--------
>>> import pywt
>>> (cA, cD) = pywt.dwt([1, 2, 3, 4, 5, 6], 'db1')
>>> cA
array([ 2.12132034, 4.94974747, 7.77817459])
>>> cD
array([-0.70710678, -0.70710678, -0.70710678])
"""
if np.iscomplexobj(data):
data = np.asarray(data)
cA_r, cD_r = dwt(data.real, wavelet, mode, axis)
cA_i, cD_i = dwt(data.imag, wavelet, mode, axis)
return (cA_r + 1j*cA_i, cD_r + 1j*cD_i)
# accept array_like input; make a copy to ensure a contiguous array
dt = _check_dtype(data)
data = np.array(data, dtype=dt)
mode = Modes.from_object(mode)
if not isinstance(wavelet, Wavelet):
wavelet = Wavelet(wavelet)
if axis < 0:
axis = axis + data.ndim
if not 0 <= axis < data.ndim:
raise ValueError("Axis greater than data dimensions")
if data.ndim == 1:
cA, cD = dwt_single(data, wavelet, mode)
# TODO: Check whether this makes a copy
cA, cD = np.asarray(cA, dt), np.asarray(cD, dt)
else:
cA, cD = dwt_axis(data, wavelet, mode, axis=axis)
return (cA, cD)
def idwt(cA, cD, wavelet, mode='symmetric', axis=-1):
"""
idwt(cA, cD, wavelet, mode='symmetric', axis=-1)
Single level Inverse Discrete Wavelet Transform.
Parameters
----------
cA : array_like or None
Approximation coefficients. If None, will be set to array of zeros
with same shape as `cD`.
cD : array_like or None
Detail coefficients. If None, will be set to array of zeros
with same shape as `cA`.
wavelet : Wavelet object or name
Wavelet to use
mode : str, optional (default: 'symmetric')
Signal extension mode, see Modes
axis: int, optional
Axis over which to compute the inverse DWT. If not given, the
last axis is used.
Returns
-------
rec: array_like
Single level reconstruction of signal from given coefficients.
"""
# TODO: Lots of possible allocations to eliminate (zeros_like, asarray(rec))
# accept array_like input; make a copy to ensure a contiguous array
if cA is None and cD is None:
raise ValueError("At least one coefficient parameter must be "
"specified.")
# for complex inputs: compute real and imaginary separately then combine
if np.iscomplexobj(cA) or np.iscomplexobj(cD):
if cA is None:
cD = np.asarray(cD)
cA = np.zeros_like(cD)
elif cD is None:
cA = np.asarray(cA)
cD = np.zeros_like(cA)
return (idwt(cA.real, cD.real, wavelet, mode, axis) +
1j*idwt(cA.imag, cD.imag, wavelet, mode, axis))
if cA is not None:
dt = _check_dtype(cA)
cA = np.array(cA, dtype=dt)
if cD is not None:
dt = _check_dtype(cD)
cD = np.array(cD, dtype=dt)
if cA is not None and cD is not None:
if cA.dtype != cD.dtype:
# need to upcast to common type
cA = cA.astype(np.float64)
cD = cD.astype(np.float64)
elif cA is None:
cA = np.zeros_like(cD)
elif cD is None:
cD = np.zeros_like(cA)
# cA and cD should be same dimension by here
ndim = cA.ndim
mode = Modes.from_object(mode)
if not isinstance(wavelet, Wavelet):
wavelet = Wavelet(wavelet)
if axis < 0:
axis = axis + ndim
if not 0 <= axis < ndim:
raise ValueError("Axis greater than coefficient dimensions")
if ndim == 1:
rec = idwt_single(cA, cD, wavelet, mode)
else:
rec = idwt_axis(cA, cD, wavelet, mode, axis=axis)
return rec
def downcoef(part, data, wavelet, mode='symmetric', level=1):
"""
downcoef(part, data, wavelet, mode='symmetric', level=1)
Partial Discrete Wavelet Transform data decomposition.
Similar to `pywt.dwt`, but computes only one set of coefficients.
Useful when you need only approximation or only details at the given level.
Parameters
----------
part : str
Coefficients type:
* 'a' - approximations reconstruction is performed
* 'd' - details reconstruction is performed
data : array_like
Input signal.
wavelet : Wavelet object or name
Wavelet to use
mode : str, optional
Signal extension mode, see `Modes`. Default is 'symmetric'.
level : int, optional
Decomposition level. Default is 1.
Returns
-------
coeffs : ndarray
1-D array of coefficients.
See Also
--------
upcoef
"""
if np.iscomplexobj(data):
return (downcoef(part, data.real, wavelet, mode, level) +
1j*downcoef(part, data.imag, wavelet, mode, level))
# accept array_like input; make a copy to ensure a contiguous array
dt = _check_dtype(data)
data = np.array(data, dtype=dt)
if part not in 'ad':
raise ValueError("Argument 1 must be 'a' or 'd', not '%s'." % part)
mode = Modes.from_object(mode)
if not isinstance(wavelet, Wavelet):
wavelet = Wavelet(wavelet)
return np.asarray(_downcoef(part == 'a', data, wavelet, mode, level))
def upcoef(part, coeffs, wavelet, level=1, take=0):
"""
upcoef(part, coeffs, wavelet, level=1, take=0)
Direct reconstruction from coefficients.
Parameters
----------
part : str
Coefficients type:
* 'a' - approximations reconstruction is performed
* 'd' - details reconstruction is performed
coeffs : array_like
Coefficients array to recontruct
wavelet : Wavelet object or name
Wavelet to use
level : int, optional
Multilevel reconstruction level. Default is 1.
take : int, optional
Take central part of length equal to 'take' from the result.
Default is 0.
Returns
-------
rec : ndarray
1-D array with reconstructed data from coefficients.
See Also
--------
downcoef
Examples
--------
>>> import pywt
>>> data = [1,2,3,4,5,6]
>>> (cA, cD) = pywt.dwt(data, 'db2', 'smooth')
>>> pywt.upcoef('a', cA, 'db2') + pywt.upcoef('d', cD, 'db2')
array([-0.25 , -0.4330127 , 1. , 2. , 3. ,
4. , 5. , 6. , 1.78589838, -1.03108891])
>>> n = len(data)
>>> pywt.upcoef('a', cA, 'db2', take=n) + pywt.upcoef('d', cD, 'db2', take=n)
array([ 1., 2., 3., 4., 5., 6.])
"""
if np.iscomplexobj(coeffs):
return (upcoef(part, coeffs.real, wavelet, level, take) +
1j*upcoef(part, coeffs.imag, wavelet, level, take))
# accept array_like input; make a copy to ensure a contiguous array
dt = _check_dtype(coeffs)
coeffs = np.array(coeffs, dtype=dt)
if not isinstance(wavelet, Wavelet):
wavelet = Wavelet(wavelet)
if part not in 'ad':
raise ValueError("Argument 1 must be 'a' or 'd', not '%s'." % part)
return np.asarray(_upcoef(part == 'a', coeffs, wavelet, level, take))
| |
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions related to preprocessing inputs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
from six.moves import zip
import tensorflow as tf
def flip_dim(tensor_list, prob=0.5, dim=1):
"""Randomly flips a dimension of the given tensor.
The decision to randomly flip the `Tensors` is made together. In other words,
all or none of the images pass in are flipped.
Note that tf.random_flip_left_right and tf.random_flip_up_down isn't used so
that we can control for the probability as well as ensure the same decision
is applied across the images.
Args:
tensor_list: A list of `Tensors` with the same number of dimensions.
prob: The probability of a left-right flip.
dim: The dimension to flip, 0, 1, ..
Returns:
outputs: A list of the possibly flipped `Tensors` as well as an indicator
`Tensor` at the end whose value is `True` if the inputs were flipped and
`False` otherwise.
Raises:
ValueError: If dim is negative or greater than the dimension of a `Tensor`.
"""
random_value = tf.random_uniform([])
def flip():
flipped = []
for tensor in tensor_list:
if dim < 0 or dim >= len(tensor.get_shape().as_list()):
raise ValueError('dim must represent a valid dimension.')
flipped.append(tf.reverse_v2(tensor, [dim]))
return flipped
is_flipped = tf.less_equal(random_value, prob)
outputs = tf.cond(is_flipped, flip, lambda: tensor_list)
if not isinstance(outputs, (list, tuple)):
outputs = [outputs]
outputs.append(is_flipped)
return outputs
def _image_dimensions(image, rank):
"""Returns the dimensions of an image tensor.
Args:
image: A rank-D Tensor. For 3-D of shape: `[height, width, channels]`.
rank: The expected rank of the image
Returns:
A list of corresponding to the dimensions of the input image. Dimensions
that are statically known are python integers, otherwise they are integer
scalar tensors.
"""
if image.get_shape().is_fully_defined():
return image.get_shape().as_list()
else:
static_shape = image.get_shape().with_rank(rank).as_list()
dynamic_shape = tf.unstack(tf.shape(image), rank)
return [
s if s is not None else d for s, d in zip(static_shape, dynamic_shape)
]
def get_label_resize_method(label):
"""Returns the resize method of labels depending on label dtype.
Args:
label: Groundtruth label tensor.
Returns:
tf.image.ResizeMethod.BILINEAR, if label dtype is floating.
tf.image.ResizeMethod.NEAREST_NEIGHBOR, if label dtype is integer.
Raises:
ValueError: If label is neither floating nor integer.
"""
if label.dtype.is_floating:
return tf.image.ResizeMethod.BILINEAR
elif label.dtype.is_integer:
return tf.image.ResizeMethod.NEAREST_NEIGHBOR
else:
raise ValueError('Label type must be either floating or integer.')
def pad_to_bounding_box(image, offset_height, offset_width, target_height,
target_width, pad_value):
"""Pads the given image with the given pad_value.
Works like tf.image.pad_to_bounding_box, except it can pad the image
with any given arbitrary pad value and also handle images whose sizes are not
known during graph construction.
Args:
image: 3-D tensor with shape [height, width, channels]
offset_height: Number of rows of zeros to add on top.
offset_width: Number of columns of zeros to add on the left.
target_height: Height of output image.
target_width: Width of output image.
pad_value: Value to pad the image tensor with.
Returns:
3-D tensor of shape [target_height, target_width, channels].
Raises:
ValueError: If the shape of image is incompatible with the offset_* or
target_* arguments.
"""
with tf.name_scope(None, 'pad_to_bounding_box', [image]):
image = tf.convert_to_tensor(image, name='image')
original_dtype = image.dtype
if original_dtype != tf.float32 and original_dtype != tf.float64:
# If image dtype is not float, we convert it to int32 to avoid overflow.
image = tf.cast(image, tf.int32)
image_rank_assert = tf.Assert(
tf.logical_or(
tf.equal(tf.rank(image), 3),
tf.equal(tf.rank(image), 4)),
['Wrong image tensor rank.'])
with tf.control_dependencies([image_rank_assert]):
image -= pad_value
image_shape = image.get_shape()
is_batch = True
if image_shape.ndims == 3:
is_batch = False
image = tf.expand_dims(image, 0)
elif image_shape.ndims is None:
is_batch = False
image = tf.expand_dims(image, 0)
image.set_shape([None] * 4)
elif image.get_shape().ndims != 4:
raise ValueError('Input image must have either 3 or 4 dimensions.')
_, height, width, _ = _image_dimensions(image, rank=4)
target_width_assert = tf.Assert(
tf.greater_equal(
target_width, width),
['target_width must be >= width'])
target_height_assert = tf.Assert(
tf.greater_equal(target_height, height),
['target_height must be >= height'])
with tf.control_dependencies([target_width_assert]):
after_padding_width = target_width - offset_width - width
with tf.control_dependencies([target_height_assert]):
after_padding_height = target_height - offset_height - height
offset_assert = tf.Assert(
tf.logical_and(
tf.greater_equal(after_padding_width, 0),
tf.greater_equal(after_padding_height, 0)),
['target size not possible with the given target offsets'])
batch_params = tf.stack([0, 0])
height_params = tf.stack([offset_height, after_padding_height])
width_params = tf.stack([offset_width, after_padding_width])
channel_params = tf.stack([0, 0])
with tf.control_dependencies([offset_assert]):
paddings = tf.stack([batch_params, height_params, width_params,
channel_params])
padded = tf.pad(image, paddings)
if not is_batch:
padded = tf.squeeze(padded, axis=[0])
outputs = padded + pad_value
if outputs.dtype != original_dtype:
outputs = tf.cast(outputs, original_dtype)
return outputs
def _crop(image, offset_height, offset_width, crop_height, crop_width):
"""Crops the given image using the provided offsets and sizes.
Note that the method doesn't assume we know the input image size but it does
assume we know the input image rank.
Args:
image: an image of shape [height, width, channels].
offset_height: a scalar tensor indicating the height offset.
offset_width: a scalar tensor indicating the width offset.
crop_height: the height of the cropped image.
crop_width: the width of the cropped image.
Returns:
The cropped (and resized) image.
Raises:
ValueError: if `image` doesn't have rank of 3.
InvalidArgumentError: if the rank is not 3 or if the image dimensions are
less than the crop size.
"""
original_shape = tf.shape(image)
if len(image.get_shape().as_list()) != 3:
raise ValueError('input must have rank of 3')
original_channels = image.get_shape().as_list()[2]
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.'])
with tf.control_dependencies([rank_assertion]):
cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.cast(tf.stack([offset_height, offset_width, 0]), tf.int32)
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
with tf.control_dependencies([size_assertion]):
image = tf.slice(image, offsets, cropped_shape)
image = tf.reshape(image, cropped_shape)
image.set_shape([crop_height, crop_width, original_channels])
return image
def random_crop(image_list, crop_height, crop_width):
"""Crops the given list of images.
The function applies the same crop to each image in the list. This can be
effectively applied when there are multiple image inputs of the same
dimension such as:
image, depths, normals = random_crop([image, depths, normals], 120, 150)
Args:
image_list: a list of image tensors of the same dimension but possibly
varying channel.
crop_height: the new height.
crop_width: the new width.
Returns:
the image_list with cropped images.
Raises:
ValueError: if there are multiple image inputs provided with different size
or the images are smaller than the crop dimensions.
"""
if not image_list:
raise ValueError('Empty image_list.')
# Compute the rank assertions.
rank_assertions = []
for i in range(len(image_list)):
image_rank = tf.rank(image_list[i])
rank_assert = tf.Assert(
tf.equal(image_rank, 3),
['Wrong rank for tensor %s [expected] [actual]',
image_list[i].name, 3, image_rank])
rank_assertions.append(rank_assert)
with tf.control_dependencies([rank_assertions[0]]):
image_shape = tf.shape(image_list[0])
image_height = image_shape[0]
image_width = image_shape[1]
crop_size_assert = tf.Assert(
tf.logical_and(
tf.greater_equal(image_height, crop_height),
tf.greater_equal(image_width, crop_width)),
['Crop size greater than the image size.'])
asserts = [rank_assertions[0], crop_size_assert]
for i in range(1, len(image_list)):
image = image_list[i]
asserts.append(rank_assertions[i])
with tf.control_dependencies([rank_assertions[i]]):
shape = tf.shape(image)
height = shape[0]
width = shape[1]
height_assert = tf.Assert(
tf.equal(height, image_height),
['Wrong height for tensor %s [expected][actual]',
image.name, height, image_height])
width_assert = tf.Assert(
tf.equal(width, image_width),
['Wrong width for tensor %s [expected][actual]',
image.name, width, image_width])
asserts.extend([height_assert, width_assert])
# Create a random bounding box.
#
# Use tf.random_uniform and not numpy.random.rand as doing the former would
# generate random numbers at graph eval time, unlike the latter which
# generates random numbers at graph definition time.
with tf.control_dependencies(asserts):
max_offset_height = tf.reshape(image_height - crop_height + 1, [])
max_offset_width = tf.reshape(image_width - crop_width + 1, [])
offset_height = tf.random_uniform(
[], maxval=max_offset_height, dtype=tf.int32)
offset_width = tf.random_uniform(
[], maxval=max_offset_width, dtype=tf.int32)
return [_crop(image, offset_height, offset_width,
crop_height, crop_width) for image in image_list]
def get_random_scale(min_scale_factor, max_scale_factor, step_size):
"""Gets a random scale value.
Args:
min_scale_factor: Minimum scale value.
max_scale_factor: Maximum scale value.
step_size: The step size from minimum to maximum value.
Returns:
A random scale value selected between minimum and maximum value.
Raises:
ValueError: min_scale_factor has unexpected value.
"""
if min_scale_factor < 0 or min_scale_factor > max_scale_factor:
raise ValueError('Unexpected value of min_scale_factor.')
if min_scale_factor == max_scale_factor:
return tf.cast(min_scale_factor, tf.float32)
# When step_size = 0, we sample the value uniformly from [min, max).
if step_size == 0:
return tf.random_uniform([1],
minval=min_scale_factor,
maxval=max_scale_factor)
# When step_size != 0, we randomly select one discrete value from [min, max].
num_steps = int((max_scale_factor - min_scale_factor) / step_size + 1)
scale_factors = tf.lin_space(min_scale_factor, max_scale_factor, num_steps)
shuffled_scale_factors = tf.random_shuffle(scale_factors)
return shuffled_scale_factors[0]
def randomly_scale_image_and_label(image, label=None, scale=1.0):
"""Randomly scales image and label.
Args:
image: Image with shape [height, width, 3].
label: Label with shape [height, width, 1].
scale: The value to scale image and label.
Returns:
Scaled image and label.
"""
# No random scaling if scale == 1.
if scale == 1.0:
return image, label
image_shape = tf.shape(image)
new_dim = tf.cast(
tf.cast([image_shape[0], image_shape[1]], tf.float32) * scale,
tf.int32)
# Need squeeze and expand_dims because image interpolation takes
# 4D tensors as input.
image = tf.squeeze(tf.image.resize_bilinear(
tf.expand_dims(image, 0),
new_dim,
align_corners=True), [0])
if label is not None:
label = tf.image.resize(
label,
new_dim,
method=get_label_resize_method(label),
align_corners=True)
return image, label
def resolve_shape(tensor, rank=None, scope=None):
"""Fully resolves the shape of a Tensor.
Use as much as possible the shape components already known during graph
creation and resolve the remaining ones during runtime.
Args:
tensor: Input tensor whose shape we query.
rank: The rank of the tensor, provided that we know it.
scope: Optional name scope.
Returns:
shape: The full shape of the tensor.
"""
with tf.name_scope(scope, 'resolve_shape', [tensor]):
if rank is not None:
shape = tensor.get_shape().with_rank(rank).as_list()
else:
shape = tensor.get_shape().as_list()
if None in shape:
shape_dynamic = tf.shape(tensor)
for i in range(len(shape)):
if shape[i] is None:
shape[i] = shape_dynamic[i]
return shape
def resize_to_range(image,
label=None,
min_size=None,
max_size=None,
factor=None,
keep_aspect_ratio=True,
align_corners=True,
label_layout_is_chw=False,
scope=None,
method=tf.image.ResizeMethod.BILINEAR):
"""Resizes image or label so their sides are within the provided range.
The output size can be described by two cases:
1. If the image can be rescaled so its minimum size is equal to min_size
without the other side exceeding max_size, then do so.
2. Otherwise, resize so the largest side is equal to max_size.
An integer in `range(factor)` is added to the computed sides so that the
final dimensions are multiples of `factor` plus one.
Args:
image: A 3D tensor of shape [height, width, channels].
label: (optional) A 3D tensor of shape [height, width, channels] (default)
or [channels, height, width] when label_layout_is_chw = True.
min_size: (scalar) desired size of the smaller image side.
max_size: (scalar) maximum allowed size of the larger image side. Note
that the output dimension is no larger than max_size and may be slightly
smaller than max_size when factor is not None.
factor: Make output size multiple of factor plus one.
keep_aspect_ratio: Boolean, keep aspect ratio or not. If True, the input
will be resized while keeping the original aspect ratio. If False, the
input will be resized to [max_resize_value, max_resize_value] without
keeping the original aspect ratio.
align_corners: If True, exactly align all 4 corners of input and output.
label_layout_is_chw: If true, the label has shape [channel, height, width].
We support this case because for some instance segmentation dataset, the
instance segmentation is saved as [num_instances, height, width].
scope: Optional name scope.
method: Image resize method. Defaults to tf.image.ResizeMethod.BILINEAR.
Returns:
A 3-D tensor of shape [new_height, new_width, channels], where the image
has been resized (with the specified method) so that
min(new_height, new_width) == ceil(min_size) or
max(new_height, new_width) == ceil(max_size).
Raises:
ValueError: If the image is not a 3D tensor.
"""
with tf.name_scope(scope, 'resize_to_range', [image]):
new_tensor_list = []
min_size = tf.cast(min_size, tf.float32)
if max_size is not None:
max_size = tf.cast(max_size, tf.float32)
# Modify the max_size to be a multiple of factor plus 1 and make sure the
# max dimension after resizing is no larger than max_size.
if factor is not None:
max_size = (max_size - (max_size - 1) % factor)
[orig_height, orig_width, _] = resolve_shape(image, rank=3)
orig_height = tf.cast(orig_height, tf.float32)
orig_width = tf.cast(orig_width, tf.float32)
orig_min_size = tf.minimum(orig_height, orig_width)
# Calculate the larger of the possible sizes
large_scale_factor = min_size / orig_min_size
large_height = tf.cast(tf.floor(orig_height * large_scale_factor), tf.int32)
large_width = tf.cast(tf.floor(orig_width * large_scale_factor), tf.int32)
large_size = tf.stack([large_height, large_width])
new_size = large_size
if max_size is not None:
# Calculate the smaller of the possible sizes, use that if the larger
# is too big.
orig_max_size = tf.maximum(orig_height, orig_width)
small_scale_factor = max_size / orig_max_size
small_height = tf.cast(
tf.floor(orig_height * small_scale_factor), tf.int32)
small_width = tf.cast(tf.floor(orig_width * small_scale_factor), tf.int32)
small_size = tf.stack([small_height, small_width])
new_size = tf.cond(
tf.cast(tf.reduce_max(large_size), tf.float32) > max_size,
lambda: small_size,
lambda: large_size)
# Ensure that both output sides are multiples of factor plus one.
if factor is not None:
new_size += (factor - (new_size - 1) % factor) % factor
if not keep_aspect_ratio:
# If not keep the aspect ratio, we resize everything to max_size, allowing
# us to do pre-processing without extra padding.
new_size = [tf.reduce_max(new_size), tf.reduce_max(new_size)]
new_tensor_list.append(tf.image.resize(
image, new_size, method=method, align_corners=align_corners))
if label is not None:
if label_layout_is_chw:
# Input label has shape [channel, height, width].
resized_label = tf.expand_dims(label, 3)
resized_label = tf.image.resize(
resized_label,
new_size,
method=get_label_resize_method(label),
align_corners=align_corners)
resized_label = tf.squeeze(resized_label, 3)
else:
# Input label has shape [height, width, channel].
resized_label = tf.image.resize(
label,
new_size,
method=get_label_resize_method(label),
align_corners=align_corners)
new_tensor_list.append(resized_label)
else:
new_tensor_list.append(None)
return new_tensor_list
| |
"""
Copyright (C) 2013-2018 Calliope contributors listed in AUTHORS.
Licensed under the Apache 2.0 License (see LICENSE file).
constraint_sets.py
~~~~~~~~~~~~~~~~~~
loc_techs, loc_carriers, and loc_tech_carriers subsets used per constraint, to
reduce constraint complexity
"""
from calliope.core.preprocess.util import constraint_exists
import numpy as np
def generate_constraint_sets(model_run):
"""
Generate loc-tech sets for a given pre-processed ``model_run``
Parameters
----------
model_run : AttrDict
"""
sets = model_run.sets
## From here on, everything is a `key=value` pair within a dictionary
constraint_sets = dict()
# energy_balance.py
constraint_sets['loc_carriers_system_balance_constraint'] = sets.loc_carriers
constraint_sets['loc_techs_balance_supply_constraint'] = sets.loc_techs_finite_resource_supply
constraint_sets['loc_techs_balance_demand_constraint'] = sets.loc_techs_finite_resource_demand
constraint_sets['loc_techs_resource_availability_supply_plus_constraint'] = sets.loc_techs_finite_resource_supply_plus
constraint_sets['loc_techs_balance_transmission_constraint'] = sets.loc_techs_transmission
constraint_sets['loc_techs_balance_supply_plus_constraint'] = sets.loc_techs_supply_plus
constraint_sets['loc_techs_balance_storage_constraint'] = sets.loc_techs_storage
if model_run.run.cyclic_storage is True:
constraint_sets['loc_techs_storage_initial_constraint'] = [
i for i in sets.loc_techs_store
if constraint_exists(model_run, i, 'constraints.storage_initial') is not None
]
constraint_sets['carriers_reserve_margin_constraint'] = [
i for i in sets.carriers
if i in model_run.model.get_key('reserve_margin', {}).keys()
]
# clustering-specific balance constraints
if (model_run.model.get_key('time.function', None) == 'apply_clustering' and
model_run.model.get_key('time.function_options.storage_inter_cluster', True)):
set_name = 'loc_techs_balance_storage_inter_cluster_constraint'
constraint_sets[set_name] = sets.loc_techs_store
# costs.py
constraint_sets['loc_techs_cost_constraint'] = sets.loc_techs_cost
constraint_sets['loc_techs_cost_investment_constraint'] = sets.loc_techs_investment_cost
constraint_sets['loc_techs_cost_var_constraint'] = [
i for i in sets.loc_techs_om_cost
if i not in sets.loc_techs_conversion_plus + sets.loc_techs_conversion
]
# export.py
constraint_sets['loc_carriers_update_system_balance_constraint'] = [
i for i in sets.loc_carriers if sets.loc_techs_export
and any(['{0}::{2}'.format(*j.split('::')) == i
for j in sets.loc_tech_carriers_export])
]
constraint_sets['loc_tech_carriers_export_balance_constraint'] = (
sets.loc_tech_carriers_export
)
constraint_sets['loc_techs_update_costs_var_constraint'] = [
i for i in sets.loc_techs_om_cost if i in sets.loc_techs_export
]
constraint_sets['loc_tech_carriers_export_max_constraint'] = [
i for i in sets.loc_tech_carriers_export
if constraint_exists(
model_run, i.rsplit('::', 1)[0], 'constraints.export_cap'
) is not None
]
# capacity.py
constraint_sets['loc_techs_storage_capacity_constraint'] = [
i for i in sets.loc_techs_store if i not in sets.loc_techs_milp
]
constraint_sets['loc_techs_energy_capacity_storage_constraint'] = [
i for i in sets.loc_techs_store
if constraint_exists(model_run, i, 'constraints.charge_rate')
]
constraint_sets['loc_techs_resource_capacity_constraint'] = [
i for i in sets.loc_techs_finite_resource_supply_plus
if any([
constraint_exists(model_run, i, 'constraints.resource_cap_equals'),
constraint_exists(model_run, i, 'constraints.resource_cap_max'),
constraint_exists(model_run, i, 'constraints.resource_cap_min')
])
]
constraint_sets['loc_techs_resource_capacity_equals_energy_capacity_constraint'] = [
i for i in sets.loc_techs_finite_resource_supply_plus
if constraint_exists(model_run, i, 'constraints.resource_cap_equals_energy_cap')
]
constraint_sets['loc_techs_resource_area_constraint'] = sets.loc_techs_area
constraint_sets['loc_techs_resource_area_per_energy_capacity_constraint'] = [
i for i in sets.loc_techs_area
if constraint_exists(model_run, i, 'constraints.resource_area_per_energy_cap')
is not None
]
constraint_sets['locs_resource_area_capacity_per_loc_constraint'] = [
i for i in sets.locs
if model_run.locations[i].get_key('available_area', None) is not None
and sets.loc_techs_area
]
constraint_sets['loc_techs_energy_capacity_constraint'] = [
i for i in sets.loc_techs
if i not in sets.loc_techs_milp + sets.loc_techs_purchase
]
constraint_sets['techs_energy_capacity_systemwide_constraint'] = [
i for i in sets.techs
if model_run.get_key('techs.{}.constraints.energy_cap_max_systemwide'.format(i), None)
or model_run.get_key('techs.{}.constraints.energy_cap_equals_systemwide'.format(i), None)
]
# dispatch.py
constraint_sets['loc_tech_carriers_carrier_production_max_constraint'] = [
i for i in sets.loc_tech_carriers_prod
if i not in sets.loc_tech_carriers_conversion_plus
and i.rsplit('::', 1)[0] not in sets.loc_techs_milp
]
constraint_sets['loc_tech_carriers_carrier_production_min_constraint'] = [
i for i in sets.loc_tech_carriers_prod
if i not in sets.loc_tech_carriers_conversion_plus
and constraint_exists(model_run, i.rsplit('::', 1)[0], 'constraints.energy_cap_min_use')
and i.rsplit('::', 1)[0] not in sets.loc_techs_milp
]
constraint_sets['loc_tech_carriers_carrier_consumption_max_constraint'] = [
i for i in sets.loc_tech_carriers_con
if i.rsplit('::', 1)[0] in sets.loc_techs_demand +
sets.loc_techs_storage + sets.loc_techs_transmission
and i.rsplit('::', 1)[0] not in sets.loc_techs_milp
]
constraint_sets['loc_techs_resource_max_constraint'] = sets.loc_techs_supply_plus
constraint_sets['loc_tech_carriers_ramping_constraint'] = [
i for i in sets.loc_tech_carriers_prod
if i.rsplit('::', 1)[0] in sets.loc_techs_ramping
]
# clustering-specific dispatch constraints
if (model_run.model.get_key('time.function', None) == 'apply_clustering' and
model_run.model.get_key('time.function_options.storage_inter_cluster', True)):
constraint_sets['loc_techs_storage_intra_max_constraint'] = sets.loc_techs_store
constraint_sets['loc_techs_storage_intra_min_constraint'] = sets.loc_techs_store
constraint_sets['loc_techs_storage_inter_max_constraint'] = sets.loc_techs_store
constraint_sets['loc_techs_storage_inter_min_constraint'] = sets.loc_techs_store
else:
constraint_sets['loc_techs_storage_max_constraint'] = sets.loc_techs_store
# milp.py
constraint_sets['loc_techs_unit_commitment_constraint'] = sets.loc_techs_milp
constraint_sets['loc_techs_unit_capacity_constraint'] = sets.loc_techs_milp
constraint_sets['loc_tech_carriers_carrier_production_max_milp_constraint'] = [
i for i in sets.loc_tech_carriers_prod
if i not in sets.loc_tech_carriers_conversion_plus
and i.rsplit('::', 1)[0] in sets.loc_techs_milp
]
constraint_sets['loc_techs_carrier_production_max_conversion_plus_milp_constraint'] = [
i for i in sets.loc_techs_conversion_plus
if i in sets.loc_techs_milp
]
constraint_sets['loc_tech_carriers_carrier_production_min_milp_constraint'] = [
i for i in sets.loc_tech_carriers_prod
if i not in sets.loc_tech_carriers_conversion_plus
and constraint_exists(model_run, i.rsplit('::', 1)[0], 'constraints.energy_cap_min_use')
and i.rsplit('::', 1)[0] in sets.loc_techs_milp
]
constraint_sets['loc_techs_carrier_production_min_conversion_plus_milp_constraint'] = [
i for i in sets.loc_techs_conversion_plus
if constraint_exists(model_run, i, 'constraints.energy_cap_min_use')
and i in sets.loc_techs_milp
]
constraint_sets['loc_tech_carriers_carrier_consumption_max_milp_constraint'] = [
i for i in sets.loc_tech_carriers_con
if i.rsplit('::', 1)[0] in sets.loc_techs_demand +
sets.loc_techs_storage + sets.loc_techs_transmission
and i.rsplit('::', 1)[0] in sets.loc_techs_milp
]
constraint_sets['loc_techs_energy_capacity_units_constraint'] = [
i for i in sets.loc_techs_milp
if constraint_exists(model_run, i, 'constraints.energy_cap_per_unit')
is not None
]
constraint_sets['loc_techs_storage_capacity_units_constraint'] = [
i for i in sets.loc_techs_milp if i in sets.loc_techs_store
]
constraint_sets['loc_techs_energy_capacity_max_purchase_constraint'] = [
i for i in sets.loc_techs_purchase
if (constraint_exists(model_run, i, 'constraints.energy_cap_equals') is not None
or (constraint_exists(model_run, i, 'constraints.energy_cap_max') is not None
and constraint_exists(model_run, i, 'constraints.energy_cap_max') != np.inf))
]
constraint_sets['loc_techs_energy_capacity_min_purchase_constraint'] = [
i for i in sets.loc_techs_purchase
if (not constraint_exists(model_run, i, 'constraints.energy_cap_equals')
and constraint_exists(model_run, i, 'constraints.energy_cap_min'))
]
constraint_sets['loc_techs_storage_capacity_max_purchase_constraint'] = [
i for i in set(sets.loc_techs_purchase).intersection(sets.loc_techs_store)
if (constraint_exists(model_run, i, 'constraints.storage_cap_equals') is not None
or (constraint_exists(model_run, i, 'constraints.storage_cap_max') is not None
and constraint_exists(model_run, i, 'constraints.storage_cap_max') != np.inf))
]
constraint_sets['loc_techs_storage_capacity_min_purchase_constraint'] = [
i for i in set(sets.loc_techs_purchase).intersection(sets.loc_techs_store)
if (not constraint_exists(model_run, i, 'constraints.storage_cap_equals')
and constraint_exists(model_run, i, 'constraints.storage_cap_min'))
]
constraint_sets['loc_techs_update_costs_investment_units_constraint'] = [
i for i in sets.loc_techs_milp
if i in sets.loc_techs_investment_cost and
any(constraint_exists(model_run, i, 'costs.{}.purchase'.format(j))
for j in model_run.sets.costs)
]
# loc_techs_purchase technologies only exist because they have defined a purchase cost
constraint_sets['loc_techs_update_costs_investment_purchase_constraint'] = sets.loc_techs_purchase
constraint_sets['techs_unit_capacity_systemwide_constraint'] = [
i for i in sets.techs
if model_run.get_key('techs.{}.constraints.units_max_systemwide'.format(i), None)
or model_run.get_key('techs.{}.constraints.units_equals_systemwide'.format(i), None)
]
# conversion.py
constraint_sets['loc_techs_balance_conversion_constraint'] = sets.loc_techs_conversion
constraint_sets['loc_techs_cost_var_conversion_constraint'] = sets.loc_techs_om_cost_conversion
# conversion_plus.py
constraint_sets['loc_techs_balance_conversion_plus_primary_constraint'] = sets.loc_techs_conversion_plus
constraint_sets['loc_techs_carrier_production_max_conversion_plus_constraint'] = [
i for i in sets.loc_techs_conversion_plus
if i not in sets.loc_techs_milp
]
constraint_sets['loc_techs_carrier_production_min_conversion_plus_constraint'] = [
i for i in sets.loc_techs_conversion_plus
if constraint_exists(model_run, i, 'constraints.energy_cap_min_use')
and i not in sets.loc_techs_milp
]
constraint_sets['loc_techs_cost_var_conversion_plus_constraint'] = sets.loc_techs_om_cost_conversion_plus
constraint_sets['loc_techs_balance_conversion_plus_in_2_constraint'] = sets.loc_techs_in_2
constraint_sets['loc_techs_balance_conversion_plus_in_3_constraint'] = sets.loc_techs_in_3
constraint_sets['loc_techs_balance_conversion_plus_out_2_constraint'] = sets.loc_techs_out_2
constraint_sets['loc_techs_balance_conversion_plus_out_3_constraint'] = sets.loc_techs_out_3
# network.py
constraint_sets['loc_techs_symmetric_transmission_constraint'] = sets.loc_techs_transmission
# policy.py
constraint_sets['techlists_group_share_energy_cap_min_constraint'] = [
i for i in sets.techlists
if 'energy_cap_min' in model_run.model.get_key('group_share.{}'.format(i), {}).keys()
]
constraint_sets['techlists_group_share_energy_cap_max_constraint'] = [
i for i in sets.techlists
if 'energy_cap_max' in model_run.model.get_key('group_share.{}'.format(i), {}).keys()
]
constraint_sets['techlists_group_share_energy_cap_equals_constraint'] = [
i for i in sets.techlists
if 'energy_cap_equals' in model_run.model.get_key('group_share.{}'.format(i), {}).keys()
]
constraint_sets['techlists_carrier_group_share_carrier_prod_min_constraint'] = [
i + '::' + carrier
for i in sets.techlists
if 'carrier_prod_min' in model_run.model.get_key('group_share.{}'.format(i), {}).keys()
for carrier in sets.carriers
if carrier in model_run.model.get_key('group_share.{}.carrier_prod_min'.format(i), {}).keys()
]
constraint_sets['techlists_carrier_group_share_carrier_prod_max_constraint'] = [
i + '::' + carrier
for i in sets.techlists
if 'carrier_prod_max' in model_run.model.get_key('group_share.{}'.format(i), {}).keys()
for carrier in sets.carriers
if carrier in model_run.model.get_key('group_share.{}.carrier_prod_max'.format(i), {}).keys()
]
constraint_sets['techlists_carrier_group_share_carrier_prod_equals_constraint'] = [
i + '::' + carrier
for i in sets.techlists
if 'carrier_prod_equals' in model_run.model.get_key('group_share.{}'.format(i), {}).keys()
for carrier in sets.carriers
if carrier in model_run.model.get_key('group_share.{}.carrier_prod_equals'.format(i), {}).keys()
]
return constraint_sets
| |
# Copyright (c) 2013 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: Salvatore Orlando, VMware
import mock
import os
from quantum.openstack.common import jsonutils as json
import quantum.plugins.nicira as nvp_plugin
from quantum.plugins.nicira import nvp_cluster
from quantum.plugins.nicira import NvpApiClient
from quantum.plugins.nicira import nvplib
from quantum.tests import base
from quantum.tests.unit.nicira import fake_nvpapiclient
from quantum.tests.unit import test_api_v2
NICIRA_PKG_PATH = nvp_plugin.__name__
_uuid = test_api_v2._uuid
class NvplibTestCase(base.BaseTestCase):
def setUp(self):
# mock nvp api client
etc_path = os.path.join(os.path.dirname(__file__), 'etc')
self.fc = fake_nvpapiclient.FakeClient(etc_path)
self.mock_nvpapi = mock.patch('%s.NvpApiClient.NVPApiHelper'
% NICIRA_PKG_PATH, autospec=True)
instance = self.mock_nvpapi.start()
instance.return_value.login.return_value = "the_cookie"
def _fake_request(*args, **kwargs):
return self.fc.fake_request(*args, **kwargs)
instance.return_value.request.side_effect = _fake_request
self.fake_cluster = nvp_cluster.NVPCluster(
name='fake-cluster', nvp_controllers=['1.1.1.1:999'],
default_tz_uuid=_uuid(), nvp_user='foo', nvp_password='bar')
self.fake_cluster.api_client = NvpApiClient.NVPApiHelper(
('1.1.1.1', '999', True),
self.fake_cluster.nvp_user, self.fake_cluster.nvp_password,
self.fake_cluster.req_timeout, self.fake_cluster.http_timeout,
self.fake_cluster.retries, self.fake_cluster.redirects)
super(NvplibTestCase, self).setUp()
self.addCleanup(self.fc.reset_all)
self.addCleanup(self.mock_nvpapi.stop)
class TestNvplibNatRules(NvplibTestCase):
def _test_create_lrouter_dnat_rule(self, func):
tenant_id = 'pippo'
lrouter = nvplib.create_lrouter(self.fake_cluster,
tenant_id,
'fake_router',
'192.168.0.1')
nat_rule = func(self.fake_cluster, lrouter['uuid'], '10.0.0.99',
match_criteria={'destination_ip_addresses':
'192.168.0.5'})
uri = nvplib._build_uri_path(nvplib.LROUTERNAT_RESOURCE,
nat_rule['uuid'],
lrouter['uuid'])
return json.loads(nvplib.do_single_request("GET", uri,
cluster=self.fake_cluster))
def test_create_lrouter_dnat_rule_v2(self):
resp_obj = self._test_create_lrouter_dnat_rule(
nvplib.create_lrouter_dnat_rule_v2)
self.assertEqual('DestinationNatRule', resp_obj['type'])
self.assertEqual('192.168.0.5',
resp_obj['match']['destination_ip_addresses'])
def test_create_lrouter_dnat_rule_v3(self):
resp_obj = self._test_create_lrouter_dnat_rule(
nvplib.create_lrouter_dnat_rule_v2)
# TODO(salvatore-orlando): Extend FakeNVPApiClient to deal with
# different versions of NVP API
self.assertEqual('DestinationNatRule', resp_obj['type'])
self.assertEqual('192.168.0.5',
resp_obj['match']['destination_ip_addresses'])
class NvplibL2GatewayTestCase(NvplibTestCase):
def _create_gw_service(self, node_uuid, display_name):
return nvplib.create_l2_gw_service(self.fake_cluster,
'fake-tenant',
display_name,
[{'id': node_uuid,
'interface_name': 'xxx'}])
def test_create_l2_gw_service(self):
display_name = 'fake-gateway'
node_uuid = _uuid()
response = self._create_gw_service(node_uuid, display_name)
self.assertEqual(response.get('type'), 'L2GatewayServiceConfig')
self.assertEqual(response.get('display_name'), display_name)
gateways = response.get('gateways', [])
self.assertEqual(len(gateways), 1)
self.assertEqual(gateways[0]['type'], 'L2Gateway')
self.assertEqual(gateways[0]['device_id'], 'xxx')
self.assertEqual(gateways[0]['transport_node_uuid'], node_uuid)
def test_update_l2_gw_service(self):
display_name = 'fake-gateway'
new_display_name = 'still-fake-gateway'
node_uuid = _uuid()
res1 = self._create_gw_service(node_uuid, display_name)
gw_id = res1['uuid']
res2 = nvplib.update_l2_gw_service(self.fake_cluster, gw_id,
new_display_name)
self.assertEqual(res2['display_name'], new_display_name)
def test_get_l2_gw_service(self):
display_name = 'fake-gateway'
node_uuid = _uuid()
gw_id = self._create_gw_service(node_uuid, display_name)['uuid']
response = nvplib.get_l2_gw_service(self.fake_cluster, gw_id)
self.assertEqual(response.get('type'), 'L2GatewayServiceConfig')
self.assertEqual(response.get('display_name'), display_name)
self.assertEqual(response.get('uuid'), gw_id)
def test_list_l2_gw_service(self):
gw_ids = []
for name in ('fake-1', 'fake-2'):
gw_ids.append(self._create_gw_service(_uuid(), name)['uuid'])
results = nvplib.get_l2_gw_services(self.fake_cluster)
self.assertEqual(len(results), 2)
self.assertEqual(sorted(gw_ids), sorted([r['uuid'] for r in results]))
def test_delete_l2_gw_service(self):
display_name = 'fake-gateway'
node_uuid = _uuid()
gw_id = self._create_gw_service(node_uuid, display_name)['uuid']
nvplib.delete_l2_gw_service(self.fake_cluster, gw_id)
results = nvplib.get_l2_gw_services(self.fake_cluster)
self.assertEqual(len(results), 0)
def test_plug_l2_gw_port_attachment(self):
tenant_id = 'pippo'
node_uuid = _uuid()
lswitch = nvplib.create_lswitch(self.fake_cluster, tenant_id,
'fake-switch')
gw_id = self._create_gw_service(node_uuid, 'fake-gw')['uuid']
lport = nvplib.create_lport(self.fake_cluster,
lswitch['uuid'],
tenant_id,
_uuid(),
'fake-gw-port',
gw_id,
True)
json.loads(nvplib.plug_l2_gw_service(self.fake_cluster,
lswitch['uuid'],
lport['uuid'],
gw_id))
uri = nvplib._build_uri_path(nvplib.LSWITCHPORT_RESOURCE,
lport['uuid'],
lswitch['uuid'],
is_attachment=True)
resp_obj = json.loads(
nvplib.do_single_request("GET", uri,
cluster=self.fake_cluster))
self.assertIn('LogicalPortAttachment', resp_obj)
self.assertEqual(resp_obj['LogicalPortAttachment']['type'],
'L2GatewayAttachment')
class TestNvpLibLogicalPorts(NvplibTestCase):
def test_get_port_by_tag(self):
tenant_id = 'pippo'
quantum_port_id = 'whatever'
lswitch = nvplib.create_lswitch(self.fake_cluster, tenant_id,
'fake-switch')
lport = nvplib.create_lport(self.fake_cluster, lswitch['uuid'],
tenant_id, quantum_port_id,
'name', 'device_id', True)
lport2 = nvplib.get_port_by_quantum_tag(self.fake_cluster,
lswitch['uuid'],
quantum_port_id)
self.assertIsNotNone(lport2)
self.assertEqual(lport['uuid'], lport2['uuid'])
def test_get_port_by_tag_not_found_returns_None(self):
tenant_id = 'pippo'
quantum_port_id = 'whatever'
lswitch = nvplib.create_lswitch(self.fake_cluster, tenant_id,
'fake-switch')
lport = nvplib.get_port_by_quantum_tag(self.fake_cluster,
lswitch['uuid'],
quantum_port_id)
self.assertIsNone(lport)
| |
# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of astroid.
#
# astroid is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# astroid is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with astroid. If not, see <http://www.gnu.org/licenses/>.
"""The AstroidBuilder makes astroid from living object and / or from _ast
The builder is not thread safe and can't be used to parse different sources
at the same time.
"""
from __future__ import with_statement
import _ast
import os
import sys
import textwrap
from astroid import bases
from astroid import exceptions
from astroid import manager
from astroid import modutils
from astroid import raw_building
from astroid import rebuilder
from astroid import util
def _parse(string):
return compile(string, "<string>", 'exec', _ast.PyCF_ONLY_AST)
if sys.version_info >= (3, 0):
# pylint: disable=no-name-in-module; We don't understand flows yet.
from tokenize import detect_encoding
def open_source_file(filename):
with open(filename, 'rb') as byte_stream:
encoding = detect_encoding(byte_stream.readline)[0]
stream = open(filename, 'r', newline=None, encoding=encoding)
try:
data = stream.read()
except UnicodeError: # wrong encoding
# detect_encoding returns utf-8 if no encoding specified
msg = 'Wrong (%s) or no encoding specified' % encoding
raise exceptions.AstroidBuildingException(msg)
return stream, encoding, data
else:
import re
_ENCODING_RGX = re.compile(r"\s*#+.*coding[:=]\s*([-\w.]+)")
def _guess_encoding(string):
"""get encoding from a python file as string or return None if not found"""
# check for UTF-8 byte-order mark
if string.startswith('\xef\xbb\xbf'):
return 'UTF-8'
for line in string.split('\n', 2)[:2]:
# check for encoding declaration
match = _ENCODING_RGX.match(line)
if match is not None:
return match.group(1)
def open_source_file(filename):
"""get data for parsing a file"""
stream = open(filename, 'U')
data = stream.read()
encoding = _guess_encoding(data)
return stream, encoding, data
MANAGER = manager.AstroidManager()
class AstroidBuilder(raw_building.InspectBuilder):
"""Class for building an astroid tree from source code or from a live module.
The param *manager* specifies the manager class which should be used.
If no manager is given, then the default one will be used. The
param *apply_transforms* determines if the transforms should be
applied after the tree was built from source or from a live object,
by default being True.
"""
def __init__(self, manager=None, apply_transforms=True):
super(AstroidBuilder, self).__init__()
self._manager = manager or MANAGER
self._apply_transforms = apply_transforms
def module_build(self, module, modname=None):
"""Build an astroid from a living module instance."""
node = None
path = getattr(module, '__file__', None)
if path is not None:
path_, ext = os.path.splitext(modutils._path_from_filename(path))
if ext in ('.py', '.pyc', '.pyo') and os.path.exists(path_ + '.py'):
node = self.file_build(path_ + '.py', modname)
if node is None:
# this is a built-in module
# get a partial representation by introspection
node = self.inspect_build(module, modname=modname, path=path)
if self._apply_transforms:
# We have to handle transformation by ourselves since the
# rebuilder isn't called for builtin nodes
node = self._manager.visit_transforms(node)
return node
def file_build(self, path, modname=None):
"""Build astroid from a source code file (i.e. from an ast)
*path* is expected to be a python source file
"""
try:
stream, encoding, data = open_source_file(path)
except IOError as exc:
msg = 'Unable to load file %r (%s)' % (path, exc)
raise exceptions.AstroidBuildingException(msg)
except SyntaxError as exc: # py3k encoding specification error
raise exceptions.AstroidBuildingException(exc)
except LookupError as exc: # unknown encoding
raise exceptions.AstroidBuildingException(exc)
with stream:
# get module name if necessary
if modname is None:
try:
modname = '.'.join(modutils.modpath_from_file(path))
except ImportError:
modname = os.path.splitext(os.path.basename(path))[0]
# build astroid representation
module = self._data_build(data, modname, path)
return self._post_build(module, encoding)
def string_build(self, data, modname='', path=None):
"""Build astroid from source code string."""
module = self._data_build(data, modname, path)
module.source_code = data.encode('utf-8')
return self._post_build(module, 'utf-8')
def _post_build(self, module, encoding):
"""Handles encoding and delayed nodes after a module has been built"""
module.file_encoding = encoding
self._manager.cache_module(module)
# post tree building steps after we stored the module in the cache:
for from_node in module._import_from_nodes:
if from_node.modname == '__future__':
for symbol, _ in from_node.names:
module._future_imports.add(symbol)
self.add_from_names_to_locals(from_node)
# handle delayed assattr nodes
for delayed in module._delayed_assattr:
self.delayed_assattr(delayed)
# Visit the transforms
if self._apply_transforms:
module = self._manager.visit_transforms(module)
return module
def _data_build(self, data, modname, path):
"""Build tree node from data and add some informations"""
try:
node = _parse(data + '\n')
except (TypeError, ValueError, SyntaxError) as exc:
raise exceptions.AstroidBuildingException(exc)
if path is not None:
node_file = os.path.abspath(path)
else:
node_file = '<?>'
if modname.endswith('.__init__'):
modname = modname[:-9]
package = True
else:
package = path and path.find('__init__.py') > -1 or False
builder = rebuilder.TreeRebuilder(self._manager)
module = builder.visit_module(node, modname, node_file, package)
module._import_from_nodes = builder._import_from_nodes
module._delayed_assattr = builder._delayed_assattr
return module
def add_from_names_to_locals(self, node):
"""Store imported names to the locals
Resort the locals if coming from a delayed node
"""
_key_func = lambda node: node.fromlineno
def sort_locals(my_list):
my_list.sort(key=_key_func)
for (name, asname) in node.names:
if name == '*':
try:
imported = node.do_import_module()
except exceptions.InferenceError:
continue
for name in imported._public_names():
node.parent.set_local(name, node)
sort_locals(node.parent.scope()._locals[name])
else:
node.parent.set_local(asname or name, node)
sort_locals(node.parent.scope()._locals[asname or name])
def delayed_assattr(self, node):
"""Visit a AssAttr node
This adds name to locals and handle members definition.
"""
try:
frame = node.frame()
for inferred in node.expr.infer():
if inferred is util.YES:
continue
try:
if inferred.__class__ is bases.Instance:
inferred = inferred._proxied
iattrs = inferred._instance_attrs
elif isinstance(inferred, bases.Instance):
# Const, Tuple, ... we may be wrong, may be not, but
# anyway we don't want to pollute builtin's namespace
continue
elif inferred.is_function:
iattrs = inferred._instance_attrs
else:
iattrs = inferred._locals
except AttributeError:
# XXX log error
continue
values = iattrs.setdefault(node.attrname, [])
if node in values:
continue
# get assign in __init__ first XXX useful ?
if (frame.name == '__init__' and values and
not values[0].frame().name == '__init__'):
values.insert(0, node)
else:
values.append(node)
except exceptions.InferenceError:
pass
def parse(code, module_name='', path=None, apply_transforms=True):
"""Parses a source string in order to obtain an astroid AST from it
:param str code: The code for the module.
:param str module_name: The name for the module, if any
:param str path: The path for the module
:param bool apply_transforms:
Apply the transforms for the give code. Use it if you
don't want the default transforms to be applied.
"""
code = textwrap.dedent(code)
builder = AstroidBuilder(manager=MANAGER,
apply_transforms=apply_transforms)
return builder.string_build(code, modname=module_name, path=path)
| |
import unittest
from collections import namedtuple
import contextlib
import itertools
import math
import random
import sys
import numpy as np
from numba.core.compiler import compile_isolated, Flags
from numba import jit
from numba.core import types
import unittest
from numba.tests.support import (TestCase, enable_pyobj_flags, MemoryLeakMixin,
tag, compile_function)
Point = namedtuple('Point', ('a', 'b'))
def _build_set_literal_usecase(code, args):
code = code % {'initializer': ', '.join(repr(arg) for arg in args)}
return compile_function('build_set', code, globals())
def set_literal_return_usecase(args):
code = """if 1:
def build_set():
return {%(initializer)s}
"""
return _build_set_literal_usecase(code, args)
def set_literal_convert_usecase(args):
code = """if 1:
def build_set():
my_set = {%(initializer)s}
return list(my_set)
"""
return _build_set_literal_usecase(code, args)
def empty_constructor_usecase():
s = set()
s.add(1)
return len(s)
def constructor_usecase(arg):
s = set(arg)
return len(s)
def iterator_usecase(arg):
s = set(arg)
l = []
for v in s:
l.append(v)
return l
def update_usecase(a, b, c):
s = set()
s.update(a)
s.update(b)
s.update(c)
return list(s)
def bool_usecase(arg):
# Remove one element to allow for empty sets.
s = set(arg[1:])
return bool(s)
def remove_usecase(a, b):
s = set(a)
for v in b:
s.remove(v)
return list(s)
def discard_usecase(a, b):
s = set(a)
for v in b:
s.discard(v)
return list(s)
def add_discard_usecase(a, u, v):
s = set(a)
for i in range(1000):
s.add(u)
s.discard(v)
return list(s)
def pop_usecase(a):
s = set(a)
l = []
while len(s) > 0:
l.append(s.pop())
return l
def contains_usecase(a, b):
s = set(a)
l = []
for v in b:
l.append(v in s)
return l
def difference_update_usecase(a, b):
s = set(a)
s.difference_update(set(b))
return list(s)
def intersection_update_usecase(a, b):
s = set(a)
s.intersection_update(set(b))
return list(s)
def symmetric_difference_update_usecase(a, b):
s = set(a)
s.symmetric_difference_update(set(b))
return list(s)
def isdisjoint_usecase(a, b):
return set(a).isdisjoint(set(b))
def issubset_usecase(a, b):
return set(a).issubset(set(b))
def issuperset_usecase(a, b):
return set(a).issuperset(set(b))
def clear_usecase(a):
s = set(a)
s.clear()
return len(s), list(s)
def copy_usecase(a):
s = set(a)
ss = s.copy()
s.pop()
return len(ss), list(ss)
def copy_usecase_empty(a):
s = set(a)
s.clear()
ss = s.copy()
s.add(42)
return len(ss), list(ss)
def copy_usecase_deleted(a, b):
s = set(a)
s.remove(b)
ss = s.copy()
s.pop()
return len(ss), list(ss)
def difference_usecase(a, b):
sa = set(a)
s = sa.difference(set(b))
return list(s)
def intersection_usecase(a, b):
sa = set(a)
s = sa.intersection(set(b))
return list(s)
def symmetric_difference_usecase(a, b):
sa = set(a)
s = sa.symmetric_difference(set(b))
return list(s)
def union_usecase(a, b):
sa = set(a)
s = sa.union(set(b))
return list(s)
def set_return_usecase(a):
s = set(a)
return s
def make_operator_usecase(op):
code = """if 1:
def operator_usecase(a, b):
s = set(a) %(op)s set(b)
return list(s)
""" % dict(op=op)
return compile_function('operator_usecase', code, globals())
def make_inplace_operator_usecase(op):
code = """if 1:
def inplace_operator_usecase(a, b):
sa = set(a)
sb = set(b)
sc = sa
sc %(op)s sb
return list(sc), list(sa)
""" % dict(op=op)
return compile_function('inplace_operator_usecase', code, globals())
def make_comparison_usecase(op):
code = """if 1:
def comparison_usecase(a, b):
return set(a) %(op)s set(b)
""" % dict(op=op)
return compile_function('comparison_usecase', code, globals())
def noop(x):
pass
def unbox_usecase(x):
"""
Expect a set of numbers
"""
res = 0
for v in x:
res += v
return res
def unbox_usecase2(x):
"""
Expect a set of tuples
"""
res = 0
for v in x:
res += len(v)
return res
def unbox_usecase3(x):
"""
Expect a (number, set of numbers) tuple.
"""
a, b = x
res = a
for v in b:
res += v
return res
def unbox_usecase4(x):
"""
Expect a (number, set of tuples) tuple.
"""
a, b = x
res = a
for v in b:
res += len(v)
return res
def reflect_simple(sa, sb):
sa.add(42)
sa.update(sb)
return sa, len(sa), len(sb)
def reflect_conditional(sa, sb):
# `sa` may or may not actually reflect a Python set
if len(sb) > 1:
sa = set((11., 22., 33., 44.))
sa.add(42.)
sa.update(sb)
# Combine with a non-reflected set (to check method typing)
sc = set((55., 66.))
sa.symmetric_difference_update(sc)
return sa, len(sa), len(sb)
def reflect_exception(s):
s.add(42)
raise ZeroDivisionError
def reflect_dual(sa, sb):
sa.add(sb.pop())
return sa is sb
def unique_usecase(src):
seen = set()
res = []
for v in src:
if v not in seen:
seen.add(v)
res.append(v)
return res
class BaseTest(MemoryLeakMixin, TestCase):
def setUp(self):
super(BaseTest, self).setUp()
self.rnd = random.Random(42)
def _range(self, stop):
return np.arange(int(stop))
def _random_choice(self, seq, n):
"""
Choose *n* possibly duplicate items from sequence.
"""
l = [self.rnd.choice(list(seq)) for i in range(n)]
if isinstance(seq, np.ndarray):
return np.array(l, dtype=seq.dtype)
else:
return l
def duplicates_array(self, n):
"""
Get a 1d array with many duplicate values.
"""
a = self._range(np.sqrt(n))
return self._random_choice(a, n)
def sparse_array(self, n):
"""
Get a 1d array with values spread around.
"""
# Note two calls to sparse_array() should generate reasonable overlap
a = self._range(n ** 1.3)
return self._random_choice(a, n)
def _assert_equal_unordered(self, a, b):
if isinstance(a, tuple):
self.assertIsInstance(b, tuple)
for u, v in zip(a, b):
self._assert_equal_unordered(u, v)
elif isinstance(a, list):
self.assertIsInstance(b, list)
self.assertPreciseEqual(sorted(a), sorted(b))
else:
self.assertPreciseEqual(a, b)
def unordered_checker(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
def check(*args):
expected = pyfunc(*args)
got = cfunc(*args)
self._assert_equal_unordered(expected, got)
return check
class TestSetLiterals(BaseTest):
def test_build_set(self, flags=enable_pyobj_flags):
pyfunc = set_literal_return_usecase((1, 2, 3, 2))
self.run_nullary_func(pyfunc, flags=flags)
def test_build_heterogeneous_set(self, flags=enable_pyobj_flags):
pyfunc = set_literal_return_usecase((1, 2.0, 3j, 2))
self.run_nullary_func(pyfunc, flags=flags)
pyfunc = set_literal_return_usecase((2.0, 2))
got, expected = self.run_nullary_func(pyfunc, flags=flags)
self.assertIs(type(got.pop()), type(expected.pop()))
def test_build_set_nopython(self):
arg = list(self.sparse_array(50))
pyfunc = set_literal_convert_usecase(arg)
cfunc = jit(nopython=True)(pyfunc)
expected = pyfunc()
got = cfunc()
self.assertPreciseEqual(sorted(expected), sorted(got))
class TestSets(BaseTest):
def test_constructor(self):
pyfunc = empty_constructor_usecase
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(), pyfunc())
pyfunc = constructor_usecase
cfunc = jit(nopython=True)(pyfunc)
def check(arg):
self.assertPreciseEqual(pyfunc(arg), cfunc(arg))
check((1, 2, 3, 2, 7))
check(self.duplicates_array(200))
check(self.sparse_array(200))
def test_set_return(self):
pyfunc = set_return_usecase
cfunc = jit(nopython=True)(pyfunc)
arg = (1, 2, 3, 2, 7)
self.assertEqual(cfunc(arg), set(arg))
def test_iterator(self):
pyfunc = iterator_usecase
check = self.unordered_checker(pyfunc)
check((1, 2, 3, 2, 7))
check(self.duplicates_array(200))
check(self.sparse_array(200))
def test_update(self):
pyfunc = update_usecase
check = self.unordered_checker(pyfunc)
a, b, c = (1, 2, 4, 9), (2, 3, 5, 11, 42), (4, 5, 6, 42)
check(a, b, c)
a = self.sparse_array(50)
b = self.duplicates_array(50)
c = self.sparse_array(50)
check(a, b, c)
def test_bool(self):
pyfunc = bool_usecase
check = self.unordered_checker(pyfunc)
check([1])
check([1, 2])
check([False, False])
check([True, False])
def test_remove(self):
pyfunc = remove_usecase
check = self.unordered_checker(pyfunc)
a = (1, 2, 3, 5, 8, 42)
b = (5, 2, 8)
check(a, b)
def test_remove_error(self):
# References are leaked on exception
self.disable_leak_check()
pyfunc = remove_usecase
cfunc = jit(nopython=True)(pyfunc)
with self.assertRaises(KeyError) as raises:
cfunc((1, 2, 3), (5, ))
def test_discard(self):
pyfunc = discard_usecase
check = self.unordered_checker(pyfunc)
a = (1, 2, 3, 5, 8, 42)
b = (5, 2, 8)
check(a, b)
a = self.sparse_array(50)
b = self.sparse_array(50)
check(a, b)
def test_add_discard(self):
"""
Check that the insertion logic does not create an infinite lookup
chain with deleted entries (insertion should happen at the first
deleted entry, not at the free entry at the end of the chain).
See issue #1913.
"""
pyfunc = add_discard_usecase
check = self.unordered_checker(pyfunc)
check((1,), 5, 5)
def test_pop(self):
pyfunc = pop_usecase
check = self.unordered_checker(pyfunc)
check((2, 3, 55, 11, 8, 42))
check(self.sparse_array(50))
def test_contains(self):
pyfunc = contains_usecase
cfunc = jit(nopython=True)(pyfunc)
def check(a, b):
self.assertPreciseEqual(pyfunc(a, b), cfunc(a, b))
a = (1, 2, 3, 5, 42)
b = (5, 2, 8, 3)
check(a, b)
def _test_xxx_update(self, pyfunc):
check = self.unordered_checker(pyfunc)
a, b = (1, 2, 4, 11), (2, 3, 5, 11, 42)
check(a, b)
sizes = (0, 50, 500)
for na, nb in itertools.product(sizes, sizes):
a = self.sparse_array(na)
b = self.sparse_array(nb)
check(a, b)
def test_difference_update(self):
self._test_xxx_update(difference_update_usecase)
def test_intersection_update(self):
self._test_xxx_update(intersection_update_usecase)
def test_symmetric_difference_update(self):
self._test_xxx_update(symmetric_difference_update_usecase)
def _test_comparator(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
def check(a, b):
self.assertPreciseEqual(pyfunc(a, b), cfunc(a, b))
a, b = map(set, [(1, 2, 4, 11), (2, 3, 5, 11, 42)])
args = [a & b, a - b, a | b, a ^ b]
args = [tuple(x) for x in args]
for a, b in itertools.product(args, args):
check(a, b)
def test_isdisjoint(self):
self._test_comparator(isdisjoint_usecase)
def test_issubset(self):
self._test_comparator(issubset_usecase)
def test_issuperset(self):
self._test_comparator(issuperset_usecase)
def test_clear(self):
pyfunc = clear_usecase
check = self.unordered_checker(pyfunc)
check((1, 2, 4, 11))
check(self.sparse_array(50))
def test_copy(self):
# Source set doesn't have any deleted entries
pyfunc = copy_usecase
check = self.unordered_checker(pyfunc)
check((1, 2, 4, 11))
check(self.sparse_array(50))
pyfunc = copy_usecase_empty
check = self.unordered_checker(pyfunc)
check((1,))
# Source set has deleted entries
pyfunc = copy_usecase_deleted
check = self.unordered_checker(pyfunc)
check((1, 2, 4, 11), 2)
a = self.sparse_array(50)
check(a, a[len(a) // 2])
def _test_set_operator(self, pyfunc):
check = self.unordered_checker(pyfunc)
a, b = (1, 2, 4, 11), (2, 3, 5, 11, 42)
check(a, b)
sizes = (0, 50, 500)
for na, nb in itertools.product(sizes, sizes):
a = self.sparse_array(na)
b = self.sparse_array(nb)
check(a, b)
def test_difference(self):
self._test_set_operator(difference_usecase)
def test_intersection(self):
self._test_set_operator(intersection_usecase)
def test_symmetric_difference(self):
self._test_set_operator(symmetric_difference_usecase)
def test_union(self):
self._test_set_operator(union_usecase)
def test_and(self):
self._test_set_operator(make_operator_usecase('&'))
def test_or(self):
self._test_set_operator(make_operator_usecase('|'))
def test_sub(self):
self._test_set_operator(make_operator_usecase('-'))
def test_xor(self):
self._test_set_operator(make_operator_usecase('^'))
def test_eq(self):
self._test_set_operator(make_comparison_usecase('=='))
def test_ne(self):
self._test_set_operator(make_comparison_usecase('!='))
def test_le(self):
self._test_set_operator(make_comparison_usecase('<='))
def test_lt(self):
self._test_set_operator(make_comparison_usecase('<'))
def test_ge(self):
self._test_set_operator(make_comparison_usecase('>='))
def test_gt(self):
self._test_set_operator(make_comparison_usecase('>'))
def test_iand(self):
self._test_set_operator(make_inplace_operator_usecase('&='))
def test_ior(self):
self._test_set_operator(make_inplace_operator_usecase('|='))
def test_isub(self):
self._test_set_operator(make_inplace_operator_usecase('-='))
def test_ixor(self):
self._test_set_operator(make_inplace_operator_usecase('^='))
class OtherTypesTest(object):
def test_constructor(self):
pyfunc = empty_constructor_usecase
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(), pyfunc())
pyfunc = constructor_usecase
cfunc = jit(nopython=True)(pyfunc)
def check(arg):
self.assertPreciseEqual(pyfunc(arg), cfunc(arg))
check(self.duplicates_array(200))
check(self.sparse_array(200))
def test_iterator(self):
pyfunc = iterator_usecase
check = self.unordered_checker(pyfunc)
check(self.duplicates_array(200))
check(self.sparse_array(200))
def test_update(self):
pyfunc = update_usecase
check = self.unordered_checker(pyfunc)
a = self.sparse_array(50)
b = self.duplicates_array(50)
c = self.sparse_array(50)
check(a, b, c)
class TestFloatSets(OtherTypesTest, BaseTest):
"""
Test sets with floating-point keys.
"""
# Only a few basic tests here, as the sanity of most operations doesn't
# depend on the key type.
def _range(self, stop):
return np.arange(stop, dtype=np.float32) * np.float32(0.1)
class TestTupleSets(OtherTypesTest, BaseTest):
"""
Test sets with tuple keys.
"""
def _range(self, stop):
a = np.arange(stop, dtype=np.int64)
b = a & 0x5555555555555555
c = (a & 0xaaaaaaaa).astype(np.int32)
d = ((a >> 32) & 1).astype(np.bool_)
return list(zip(b, c, d))
class TestUnboxing(BaseTest):
"""
Test unboxing of Python sets into native Numba sets.
"""
@contextlib.contextmanager
def assert_type_error(self, msg):
with self.assertRaises(TypeError) as raises:
yield
if msg is not None:
self.assertRegexpMatches(str(raises.exception), msg)
def check_unary(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
def check(arg):
expected = pyfunc(arg)
got = cfunc(arg)
self.assertPreciseEqual(got, expected)
return check
def test_numbers(self):
check = self.check_unary(unbox_usecase)
check(set([1, 2]))
check(set([1j, 2.5j]))
# Check allocation and sizing
check(set(range(100)))
def test_tuples(self):
check = self.check_unary(unbox_usecase2)
check(set([(1, 2), (3, 4)]))
check(set([(1, 2j), (3, 4j)]))
def test_set_inside_tuple(self):
check = self.check_unary(unbox_usecase3)
check((1, set([2, 3, 4])))
def test_set_of_tuples_inside_tuple(self):
check = self.check_unary(unbox_usecase4)
check((1, set([(2,), (3,)])))
def test_errors(self):
# Error checking should ensure the set is homogeneous
msg = "can't unbox heterogeneous set"
pyfunc = noop
cfunc = jit(nopython=True)(pyfunc)
val = set([1, 2.5])
with self.assert_type_error(msg):
cfunc(val)
# The set hasn't been changed (bogus reflecting)
self.assertEqual(val, set([1, 2.5]))
with self.assert_type_error(msg):
cfunc(set([1, 2j]))
# Same when the set is nested in a tuple or namedtuple
with self.assert_type_error(msg):
cfunc((1, set([1, 2j])))
with self.assert_type_error(msg):
cfunc(Point(1, set([1, 2j])))
# Tuples of different size.
# Note the check is really on the tuple side.
lst = set([(1,), (2, 3)])
# Depending on which tuple is examined first, we could get
# a IndexError or a ValueError.
with self.assertRaises((IndexError, ValueError)) as raises:
cfunc(lst)
class TestSetReflection(BaseTest):
"""
Test reflection of native Numba sets on Python set objects.
"""
def check_reflection(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
samples = [(set([1., 2., 3., 4.]), set([0.])),
(set([1., 2., 3., 4.]), set([5., 6., 7., 8., 9.])),
]
for dest, src in samples:
expected = set(dest)
got = set(dest)
pyres = pyfunc(expected, src)
with self.assertRefCount(got, src):
cres = cfunc(got, src)
self.assertPreciseEqual(cres, pyres)
self.assertPreciseEqual(expected, got)
self.assertEqual(pyres[0] is expected, cres[0] is got)
del pyres, cres
def test_reflect_simple(self):
self.check_reflection(reflect_simple)
def test_reflect_conditional(self):
self.check_reflection(reflect_conditional)
def test_reflect_exception(self):
"""
When the function exits with an exception, sets should still be
reflected.
"""
pyfunc = reflect_exception
cfunc = jit(nopython=True)(pyfunc)
s = set([1, 2, 3])
with self.assertRefCount(s):
with self.assertRaises(ZeroDivisionError):
cfunc(s)
self.assertPreciseEqual(s, set([1, 2, 3, 42]))
def test_reflect_same_set(self):
"""
When the same set object is reflected twice, behaviour should
be consistent.
"""
pyfunc = reflect_dual
cfunc = jit(nopython=True)(pyfunc)
pyset = set([1, 2, 3])
cset = pyset.copy()
expected = pyfunc(pyset, pyset)
got = cfunc(cset, cset)
self.assertPreciseEqual(expected, got)
self.assertPreciseEqual(pyset, cset)
self.assertPreciseEqual(sys.getrefcount(pyset), sys.getrefcount(cset))
def test_reflect_clean(self):
"""
When the set wasn't mutated, no reflection should take place.
"""
cfunc = jit(nopython=True)(noop)
# Use a complex, as Python integers can be cached
s = set([12.5j])
ids = [id(x) for x in s]
cfunc(s)
self.assertEqual([id(x) for x in s], ids)
class TestExamples(BaseTest):
"""
Examples of using sets.
"""
def test_unique(self):
pyfunc = unique_usecase
check = self.unordered_checker(pyfunc)
check(self.duplicates_array(200))
check(self.sparse_array(200))
if __name__ == '__main__':
unittest.main()
| |
import array
import errno
import os
from socket import socket as _original_socket
import socket
import sys
import time
import warnings
from eventlet.support import get_errno, six
from eventlet.hubs import trampoline
__all__ = ['GreenSocket', 'GreenPipe', 'shutdown_safe']
BUFFER_SIZE = 4096
CONNECT_ERR = set((errno.EINPROGRESS, errno.EALREADY, errno.EWOULDBLOCK))
CONNECT_SUCCESS = set((0, errno.EISCONN))
if sys.platform[:3] == "win":
CONNECT_ERR.add(errno.WSAEINVAL) # Bug 67
if six.PY3:
from io import IOBase as file
_fileobject = socket.SocketIO
elif six.PY2:
_fileobject = socket._fileobject
def socket_connect(descriptor, address):
"""
Attempts to connect to the address, returns the descriptor if it succeeds,
returns None if it needs to trampoline, and raises any exceptions.
"""
err = descriptor.connect_ex(address)
if err in CONNECT_ERR:
return None
if err not in CONNECT_SUCCESS:
raise socket.error(err, errno.errorcode[err])
return descriptor
def socket_checkerr(descriptor):
err = descriptor.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err not in CONNECT_SUCCESS:
raise socket.error(err, errno.errorcode[err])
def socket_accept(descriptor):
"""
Attempts to accept() on the descriptor, returns a client,address tuple
if it succeeds; returns None if it needs to trampoline, and raises
any exceptions.
"""
try:
return descriptor.accept()
except socket.error as e:
if get_errno(e) == errno.EWOULDBLOCK:
return None
raise
if sys.platform[:3] == "win":
# winsock sometimes throws ENOTCONN
SOCKET_BLOCKING = set((errno.EWOULDBLOCK,))
SOCKET_CLOSED = set((errno.ECONNRESET, errno.ENOTCONN, errno.ESHUTDOWN))
else:
# oddly, on linux/darwin, an unconnected socket is expected to block,
# so we treat ENOTCONN the same as EWOULDBLOCK
SOCKET_BLOCKING = set((errno.EWOULDBLOCK, errno.ENOTCONN))
SOCKET_CLOSED = set((errno.ECONNRESET, errno.ESHUTDOWN, errno.EPIPE))
def set_nonblocking(fd):
"""
Sets the descriptor to be nonblocking. Works on many file-like
objects as well as sockets. Only sockets can be nonblocking on
Windows, however.
"""
try:
setblocking = fd.setblocking
except AttributeError:
# fd has no setblocking() method. It could be that this version of
# Python predates socket.setblocking(). In that case, we can still set
# the flag "by hand" on the underlying OS fileno using the fcntl
# module.
try:
import fcntl
except ImportError:
# Whoops, Windows has no fcntl module. This might not be a socket
# at all, but rather a file-like object with no setblocking()
# method. In particular, on Windows, pipes don't support
# non-blocking I/O and therefore don't have that method. Which
# means fcntl wouldn't help even if we could load it.
raise NotImplementedError("set_nonblocking() on a file object "
"with no setblocking() method "
"(Windows pipes don't support non-blocking I/O)")
# We managed to import fcntl.
fileno = fd.fileno()
orig_flags = fcntl.fcntl(fileno, fcntl.F_GETFL)
new_flags = orig_flags | os.O_NONBLOCK
if new_flags != orig_flags:
fcntl.fcntl(fileno, fcntl.F_SETFL, new_flags)
else:
# socket supports setblocking()
setblocking(0)
try:
from socket import _GLOBAL_DEFAULT_TIMEOUT
except ImportError:
_GLOBAL_DEFAULT_TIMEOUT = object()
class GreenSocket(object):
"""
Green version of socket.socket class, that is intended to be 100%
API-compatible.
It also recognizes the keyword parameter, 'set_nonblocking=True'.
Pass False to indicate that socket is already in non-blocking mode
to save syscalls.
"""
def __init__(self, family_or_realsock=socket.AF_INET, *args, **kwargs):
should_set_nonblocking = kwargs.pop('set_nonblocking', True)
if isinstance(family_or_realsock, six.integer_types):
fd = _original_socket(family_or_realsock, *args, **kwargs)
else:
fd = family_or_realsock
# import timeout from other socket, if it was there
try:
self._timeout = fd.gettimeout() or socket.getdefaulttimeout()
except AttributeError:
self._timeout = socket.getdefaulttimeout()
if should_set_nonblocking:
set_nonblocking(fd)
self.fd = fd
# when client calls setblocking(0) or settimeout(0) the socket must
# act non-blocking
self.act_non_blocking = False
# Copy some attributes from underlying real socket.
# This is the easiest way that i found to fix
# https://bitbucket.org/eventlet/eventlet/issue/136
# Only `getsockopt` is required to fix that issue, others
# are just premature optimization to save __getattr__ call.
self.bind = fd.bind
self.close = fd.close
self.fileno = fd.fileno
self.getsockname = fd.getsockname
self.getsockopt = fd.getsockopt
self.listen = fd.listen
self.setsockopt = fd.setsockopt
self.shutdown = fd.shutdown
@property
def _sock(self):
return self
# Forward unknown attributes to fd, cache the value for future use.
# I do not see any simple attribute which could be changed
# so caching everything in self is fine.
# If we find such attributes - only attributes having __get__ might be cached.
# For now - I do not want to complicate it.
def __getattr__(self, name):
attr = getattr(self.fd, name)
setattr(self, name, attr)
return attr
def accept(self):
if self.act_non_blocking:
return self.fd.accept()
fd = self.fd
while True:
res = socket_accept(fd)
if res is not None:
client, addr = res
set_nonblocking(client)
return type(self)(client), addr
trampoline(fd, read=True, timeout=self.gettimeout(),
timeout_exc=socket.timeout("timed out"))
def connect(self, address):
if self.act_non_blocking:
return self.fd.connect(address)
fd = self.fd
if self.gettimeout() is None:
while not socket_connect(fd, address):
trampoline(fd, write=True)
socket_checkerr(fd)
else:
end = time.time() + self.gettimeout()
while True:
if socket_connect(fd, address):
return
if time.time() >= end:
raise socket.timeout("timed out")
trampoline(fd, write=True, timeout=end - time.time(),
timeout_exc=socket.timeout("timed out"))
socket_checkerr(fd)
def connect_ex(self, address):
if self.act_non_blocking:
return self.fd.connect_ex(address)
fd = self.fd
if self.gettimeout() is None:
while not socket_connect(fd, address):
try:
trampoline(fd, write=True)
socket_checkerr(fd)
except socket.error as ex:
return get_errno(ex)
else:
end = time.time() + self.gettimeout()
while True:
try:
if socket_connect(fd, address):
return 0
if time.time() >= end:
raise socket.timeout(errno.EAGAIN)
trampoline(fd, write=True, timeout=end - time.time(),
timeout_exc=socket.timeout(errno.EAGAIN))
socket_checkerr(fd)
except socket.error as ex:
return get_errno(ex)
def dup(self, *args, **kw):
sock = self.fd.dup(*args, **kw)
newsock = type(self)(sock, set_nonblocking=False)
newsock.settimeout(self.gettimeout())
return newsock
def makefile(self, *args, **kw):
dupped = self.dup()
res = _fileobject(dupped, *args, **kw)
if hasattr(dupped, "_drop"):
dupped._drop()
return res
def makeGreenFile(self, *args, **kw):
warnings.warn("makeGreenFile has been deprecated, please use "
"makefile instead", DeprecationWarning, stacklevel=2)
return self.makefile(*args, **kw)
def recv(self, buflen, flags=0):
fd = self.fd
if self.act_non_blocking:
return fd.recv(buflen, flags)
while True:
try:
return fd.recv(buflen, flags)
except socket.error as e:
if get_errno(e) in SOCKET_BLOCKING:
pass
elif get_errno(e) in SOCKET_CLOSED:
return ''
else:
raise
trampoline(
fd,
read=True,
timeout=self.gettimeout(),
timeout_exc=socket.timeout("timed out"))
def recvfrom(self, *args):
if not self.act_non_blocking:
trampoline(self.fd, read=True, timeout=self.gettimeout(),
timeout_exc=socket.timeout("timed out"))
return self.fd.recvfrom(*args)
def recvfrom_into(self, *args):
if not self.act_non_blocking:
trampoline(self.fd, read=True, timeout=self.gettimeout(),
timeout_exc=socket.timeout("timed out"))
return self.fd.recvfrom_into(*args)
def recv_into(self, *args):
if not self.act_non_blocking:
trampoline(self.fd, read=True, timeout=self.gettimeout(),
timeout_exc=socket.timeout("timed out"))
return self.fd.recv_into(*args)
def send(self, data, flags=0):
fd = self.fd
if self.act_non_blocking:
return fd.send(data, flags)
# blocking socket behavior - sends all, blocks if the buffer is full
total_sent = 0
len_data = len(data)
while 1:
try:
total_sent += fd.send(data[total_sent:], flags)
except socket.error as e:
if get_errno(e) not in SOCKET_BLOCKING:
raise
if total_sent == len_data:
break
trampoline(self.fd, write=True, timeout=self.gettimeout(),
timeout_exc=socket.timeout("timed out"))
return total_sent
def sendall(self, data, flags=0):
tail = self.send(data, flags)
len_data = len(data)
while tail < len_data:
tail += self.send(data[tail:], flags)
def sendto(self, *args):
trampoline(self.fd, write=True)
return self.fd.sendto(*args)
def setblocking(self, flag):
if flag:
self.act_non_blocking = False
self._timeout = None
else:
self.act_non_blocking = True
self._timeout = 0.0
def settimeout(self, howlong):
if howlong is None or howlong == _GLOBAL_DEFAULT_TIMEOUT:
self.setblocking(True)
return
try:
f = howlong.__float__
except AttributeError:
raise TypeError('a float is required')
howlong = f()
if howlong < 0.0:
raise ValueError('Timeout value out of range')
if howlong == 0.0:
self.act_non_blocking = True
self._timeout = 0.0
else:
self.act_non_blocking = False
self._timeout = howlong
def gettimeout(self):
return self._timeout
if "__pypy__" in sys.builtin_module_names:
def _reuse(self):
getattr(self.fd, '_sock', self.fd)._reuse()
def _drop(self):
getattr(self.fd, '_sock', self.fd)._drop()
class _SocketDuckForFd(object):
""" Class implementing all socket method used by _fileobject in cooperative manner using low level os I/O calls."""
def __init__(self, fileno):
self._fileno = fileno
@property
def _sock(self):
return self
def fileno(self):
return self._fileno
def recv(self, buflen):
while True:
try:
data = os.read(self._fileno, buflen)
return data
except OSError as e:
if get_errno(e) != errno.EAGAIN:
raise IOError(*e.args)
trampoline(self, read=True)
def sendall(self, data):
len_data = len(data)
os_write = os.write
fileno = self._fileno
try:
total_sent = os_write(fileno, data)
except OSError as e:
if get_errno(e) != errno.EAGAIN:
raise IOError(*e.args)
total_sent = 0
while total_sent < len_data:
trampoline(self, write=True)
try:
total_sent += os_write(fileno, data[total_sent:])
except OSError as e:
if get_errno(e) != errno. EAGAIN:
raise IOError(*e.args)
def __del__(self):
self._close()
def _close(self):
try:
os.close(self._fileno)
except:
# os.close may fail if __init__ didn't complete (i.e file dscriptor passed to popen was invalid
pass
def __repr__(self):
return "%s:%d" % (self.__class__.__name__, self._fileno)
if "__pypy__" in sys.builtin_module_names:
_refcount = 0
def _reuse(self):
self._refcount += 1
def _drop(self):
self._refcount -= 1
if self._refcount == 0:
self._close()
def _operationOnClosedFile(*args, **kwargs):
raise ValueError("I/O operation on closed file")
class GreenPipe(_fileobject):
"""
GreenPipe is a cooperative replacement for file class.
It will cooperate on pipes. It will block on regular file.
Differneces from file class:
- mode is r/w property. Should re r/o
- encoding property not implemented
- write/writelines will not raise TypeError exception when non-string data is written
it will write str(data) instead
- Universal new lines are not supported and newlines property not implementeded
- file argument can be descriptor, file name or file object.
"""
def __init__(self, f, mode='r', bufsize=-1):
if not isinstance(f, six.string_types + (int, file)):
raise TypeError('f(ile) should be int, str, unicode or file, not %r' % f)
if isinstance(f, six.string_types):
f = open(f, mode, 0)
if isinstance(f, int):
fileno = f
self._name = "<fd:%d>" % fileno
else:
fileno = os.dup(f.fileno())
self._name = f.name
if f.mode != mode:
raise ValueError('file.mode %r does not match mode parameter %r' % (f.mode, mode))
self._name = f.name
f.close()
super(GreenPipe, self).__init__(_SocketDuckForFd(fileno), mode, bufsize)
set_nonblocking(self)
self.softspace = 0
@property
def name(self):
return self._name
def __repr__(self):
return "<%s %s %r, mode %r at 0x%x>" % (
self.closed and 'closed' or 'open',
self.__class__.__name__,
self.name,
self.mode,
(id(self) < 0) and (sys.maxint + id(self)) or id(self))
def close(self):
super(GreenPipe, self).close()
for method in [
'fileno', 'flush', 'isatty', 'next', 'read', 'readinto',
'readline', 'readlines', 'seek', 'tell', 'truncate',
'write', 'xreadlines', '__iter__', 'writelines']:
setattr(self, method, _operationOnClosedFile)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def readinto(self, buf):
data = self.read(len(buf)) # FIXME could it be done without allocating intermediate?
n = len(data)
try:
buf[:n] = data
except TypeError as err:
if not isinstance(buf, array.array):
raise err
buf[:n] = array.array('c', data)
return n
def _get_readahead_len(self):
return len(self._rbuf.getvalue())
def _clear_readahead_buf(self):
len = self._get_readahead_len()
if len > 0:
self.read(len)
def tell(self):
self.flush()
try:
return os.lseek(self.fileno(), 0, 1) - self._get_readahead_len()
except OSError as e:
raise IOError(*e.args)
def seek(self, offset, whence=0):
self.flush()
if whence == 1 and offset == 0: # tell synonym
return self.tell()
if whence == 1: # adjust offset by what is read ahead
offset -= self._get_readahead_len()
try:
rv = os.lseek(self.fileno(), offset, whence)
except OSError as e:
raise IOError(*e.args)
else:
self._clear_readahead_buf()
return rv
if getattr(file, "truncate", None): # not all OSes implement truncate
def truncate(self, size=-1):
self.flush()
if size == -1:
size = self.tell()
try:
rv = os.ftruncate(self.fileno(), size)
except OSError as e:
raise IOError(*e.args)
else:
self.seek(size) # move position&clear buffer
return rv
def isatty(self):
try:
return os.isatty(self.fileno())
except OSError as e:
raise IOError(*e.args)
# import SSL module here so we can refer to greenio.SSL.exceptionclass
try:
from OpenSSL import SSL
except ImportError:
# pyOpenSSL not installed, define exceptions anyway for convenience
class SSL(object):
class WantWriteError(object):
pass
class WantReadError(object):
pass
class ZeroReturnError(object):
pass
class SysCallError(object):
pass
def shutdown_safe(sock):
""" Shuts down the socket. This is a convenience method for
code that wants to gracefully handle regular sockets, SSL.Connection
sockets from PyOpenSSL and ssl.SSLSocket objects from Python 2.6
interchangeably. Both types of ssl socket require a shutdown() before
close, but they have different arity on their shutdown method.
Regular sockets don't need a shutdown before close, but it doesn't hurt.
"""
try:
try:
# socket, ssl.SSLSocket
return sock.shutdown(socket.SHUT_RDWR)
except TypeError:
# SSL.Connection
return sock.shutdown()
except socket.error as e:
# we don't care if the socket is already closed;
# this will often be the case in an http server context
if get_errno(e) != errno.ENOTCONN:
raise
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import json
import hashlib
from urllib.parse import urlsplit
import requests
from dateutil.parser import parse
from dateutil.rrule import rrule, DAILY
from .multi_downloader import MultiFileDownloader
from .summary_data_injector import add_nhl_ids_to_content
from utils import adjust_html_response
class SummaryDownloader(MultiFileDownloader):
# base url for official schedule json page
SCHEDULE_URL_BASE = "http://statsapi.web.nhl.com/api/v1/schedule"
# url template for official json gamefeed page
JSON_GAME_FEED_URL_TEMPLATE = "http://statsapi.web.nhl.com/api/v1/game/%s/feed/live"
# JSON_SHIFT_CHART_URL_TEMPLATE = "http://www.nhl.com/stats/rest/shiftcharts?cayenneExp=gameId=%s"
JSON_SHIFT_CHART_URL_TEMPLATE = "https://api.nhle.com/stats/rest/en/shiftcharts?cayenneExp=gameId=%s"
# url parameter for json scoreboard page
LINESCORE_CONTENT_KEY = "schedule.linescore"
# defining necessary url prefixes
NHL_PREFIX = r"http://www.nhl.com"
# url prefix for html game reports
HTML_REPORT_PREFIX = "".join((NHL_PREFIX, r"/scores/htmlreports/"))
# defining valid game and report types
REPORT_TYPES = ['GS', 'ES', 'FC', 'PL', 'TV', 'TH', 'RO', 'SS', 'SO']
GAME_TYPES = ['P', 'R']
def __init__(self, tgt_dir, date, to_date='', zip_summaries=True, workers=0, cleanup=True, exclude=None):
# constructing base class instance
super().__init__(tgt_dir, zip_summaries, workers, cleanup)
# parsing start date for summary retrieval
self.date = parse(date)
# retrieving end date for summary retrieval
if to_date:
self.to_date = parse(to_date)
else:
self.to_date = self.date
# preparing list of dates to download summary data for
self.game_dates = list(rrule(DAILY, dtstart=self.date, until=self.to_date))
# storing datasets to be excluded from downloading
self.exclude = list()
if exclude is not None:
self.exclude = exclude
# preparing connection to dumped dictionary of modification timestamps
self.mod_timestamp_src = os.path.join(tgt_dir, '_mod_timestamps.json')
# loading dictionary of previously downloaded summaries (if available)
if os.path.isfile(self.mod_timestamp_src):
self.mod_timestamps = json.loads(open(self.mod_timestamp_src).read())
else:
self.mod_timestamps = dict()
def get_tgt_dir(self):
"""
Returns target directory according to current date.
"""
return os.path.join(self.base_tgt_dir, self.current_date.strftime("%Y-%m"))
def get_zip_name(self):
"""
Returns file name of zipped downloads for current date.
"""
return "%04d-%02d-%02d" % (self.current_date.year, self.current_date.month, self.current_date.day)
def get_zip_path(self):
"""
Returns path to file of zipped downloaded files for current date.
"""
return os.path.join(self.get_tgt_dir(), ".".join((self.get_zip_name(), 'zip')))
def find_files_to_download(self):
"""
Identifies files to be downloaded.
"""
# making sure that the list of files to download is empty
self.files_to_download = list()
# preparing formatted date string as necessary for scoreboard retrieval
fmt_date = "%d-%02d-%02d" % (self.current_date.year, self.current_date.month, self.current_date.day)
# retrieving schedule for current date in json format
req = requests.get(self.SCHEDULE_URL_BASE, params={
'startDate': fmt_date, 'endDate': fmt_date, 'expand': self.LINESCORE_CONTENT_KEY})
json_scoreboard = json.loads(req.text)
self.files_to_download = self.get_files_to_download_from_scoreboard(json_scoreboard)
def get_files_to_download_from_scoreboard(self, json_scoreboard):
"""
Gets downloadable files from JSON scoreboard page.
"""
files_to_download = list()
for date in json_scoreboard['dates']:
# retrieving basic game data from json contents
for game in date['games']:
season = game['season']
full_game_id = game['gamePk']
game_type = game['gameType']
game_id = str(full_game_id)[4:]
# skipping game unless it's a regular season or playoff game
if game_type not in self.GAME_TYPES:
continue
# constructing urls to individual game report pages
if 'html_reports' not in self.exclude:
for rt in self.REPORT_TYPES:
# only adding shootout report to files to be downloaded
# if the current game ended in a shootout
if rt == 'SO' and not game['linescore']['hasShootout']:
continue
htmlreport_url = "".join((self.HTML_REPORT_PREFIX, season, "/", rt, str(game_id), ".HTM"))
files_to_download.append((htmlreport_url, None))
# setting upd json game feed url and adding it to list of
# files to be downloaded
if 'game_feed' not in self.exclude:
feed_json_url = self.JSON_GAME_FEED_URL_TEMPLATE % str(full_game_id)
files_to_download.append((feed_json_url, ".".join((game_id, "json"))))
# setting upd json shift chart url and adding it to list of
# files to be downloaded
if 'shift_chart' not in self.exclude:
chart_json_url = self.JSON_SHIFT_CHART_URL_TEMPLATE % str(full_game_id)
files_to_download.append((chart_json_url, "".join((game_id, "_sc.json"))))
return files_to_download
def get_last_modification_timestamp(self, url, tgt_path):
"""
Retrieves timestamp of last modification for specified url if data
had been downloaded before to the given target location.
"""
# determining whether data has been downloaded before by checking if
# target file exists in file system or in a corresponding zip file
if (
os.path.isfile(tgt_path) or self.check_for_file(
self.zip_path, os.path.basename(tgt_path))
):
# if data has been downloaded before, retrieve last
# modification timestamp
if url in self.mod_timestamps and self.mod_timestamps[url]:
return self.mod_timestamps[url]
return ""
def download_task(self, url, tgt_dir, tgt_file):
"""
Represents a single downloading task.
"""
# setting up target path
if tgt_file is None:
tgt_file = os.path.basename(urlsplit(url).path)
tgt_path = os.path.join(tgt_dir, tgt_file)
# downloading data according to actual content type
if url.lower().endswith('.htm'):
content = self.download_html_content(url, tgt_path)
write_type = 'wb'
else:
content = self.download_json_content(url, tgt_path)
write_type = 'w'
if content:
# writing downloaded content to target path
open(tgt_path, write_type).write(content)
return tgt_path
def download_html_content(self, url, tgt_path):
"""
Downloads html content from specified url.
"""
# retrieving timestamp of last modification in case data has been
# downloaded before
mod_time_stamp = self.get_last_modification_timestamp(url, tgt_path)
# setting up http headers using modification time stamp
headers = dict()
# modifing headers in case we're looking for an update of already
# downloaded data
if mod_time_stamp:
headers['If-Modified-Since'] = mod_time_stamp
req = requests.get(url, headers=headers)
# if server responds with code for no modification
if req.status_code == 304:
# TODO: proper logging
sys.stdout.write(".")
sys.stdout.flush()
return
elif req.status_code == 200:
# TODO: proper logging
sys.stdout.write("+")
sys.stdout.flush()
# updating modification timestamp in corresponding dictionary
self.mod_timestamps[url] = req.headers.get('Last-Modified')
# adjusting html data
content = adjust_html_response(req)
if "ES" in url:
content = add_nhl_ids_to_content(url, content)
return content
def download_json_content(self, url, tgt_path):
"""
Downloads JSON content from specified url.
"""
if tgt_path.endswith('_sc.json'):
return self.download_json_shift_chart(url, tgt_path)
else:
return self.download_json_game_feed(url, tgt_path)
def download_json_game_feed(self, url, tgt_path):
"""
Downloads JSON game feed data from specified url.
"""
# retrieving timestamp of last modification in case data has been
# downloaded before
mod_time_stamp = self.get_last_modification_timestamp(url, tgt_path)
# converting modification time stamp into actual datetime
if mod_time_stamp:
mod_time_stamp = parse(mod_time_stamp)
req = requests.get(url)
if req.status_code == 200:
json_data = req.json()
# retrieving time stamp for downloaded data
act_time_stamp = parse(json_data['metaData']['timeStamp'].replace("_", " "))
# checking whether json data that is due to update an existing data
# set contains any play information at all and bailing out if that
# is not the case - by doing so we avoid overwriting existing
# *good* with *bad* data
play_data = json_data['liveData']['plays']['allPlays']
# print(tgt_path)
if mod_time_stamp and not play_data:
# print("No playdata found %s" % url)
# TODO: proper logging
sys.stdout.write("x")
sys.stdout.flush()
return
# comparing time stamp of last modification of json data with
# previously saved timestamp
if act_time_stamp == mod_time_stamp:
# TODO: proper logging
sys.stdout.write(".")
sys.stdout.flush()
return
else:
# TODO: proper logging
sys.stdout.write("+")
sys.stdout.flush()
# updating modification timestamp in corresponding dictionary
self.mod_timestamps[url] = str(act_time_stamp)
# returning json data as prettily formatted string
return json.dumps(json_data, indent=2)
def download_json_shift_chart(self, url, tgt_path):
"""
Downloads JSON shift data from specified url.
"""
# retrieving timestamp of last modification in case data has been
# downloaded before
existing_data_hash = self.get_last_modification_timestamp(url, tgt_path)
req = requests.get(url)
if req.status_code == 200:
json_data = req.json()
# calculating MD5 hash for downloaded data
json_data_hash = hashlib.md5(json.dumps(json_data).encode('utf-8')).hexdigest()
# comparing hashes of downloaded and already exising data
if not existing_data_hash == json_data_hash:
sys.stdout.write("+")
sys.stdout.flush()
self.mod_timestamps[url] = json_data_hash
return json.dumps(json_data, indent=2)
else:
sys.stdout.write(".")
sys.stdout.flush()
def run(self):
"""
Runs downloading process for all registered game dates.
"""
for date in self.game_dates:
self.current_date = date
print("+ Downloading summaries for %s" % self.current_date.strftime("%A, %B %d, %Y"))
self.find_files_to_download()
self.zip_path = self.get_zip_path()
self.download_files(self.get_tgt_dir())
print()
if self.zip_downloaded_files:
self.zip_files(self.get_zip_name(), self.get_tgt_dir())
json.dump(self.mod_timestamps, open(self.mod_timestamp_src, 'w'), indent=2, sort_keys=True)
| |
# Based on a cryptography program written for a Udacity project
# Produces private key and decrypts message.
# Based on counting binary 1's and 0's multiplication used in calculating key.
# In this example, there are 26 multiplications.
# The timing attack is a type of side channel attack.
# http://en.wikipedia.org/wiki/Timing_attack
# This is all of the public information
# of a Diffie-Hellman key exchange plus the number of multiplications
# necessary to calculate (g**b)**a mod p, given g**b where `a` is
# Alice's private key and `b` is Bob's private key
#
# This information should be enough to determine Alice's
# private key and then decrypt the message - which is given at the
# bottom of this file
#
import string
#############
# p and g are public information
#
# 2 ** 100 - 153 is prime
# (from http://primes.utm.edu/lists/2small/0bit.html)
# and verified using Wolfram Alpha
p = 1267650600228229401496703205223
# primitive root (calculated using wolfram alpha)
g = 3
#############
# g_a, g_b are both transmitted public
# and easily intercepted by a passive eavesdropper
#
# g_a = g**a mod p
# g_b = g**b mod p
g_a = 142621255265782287951127214876
g_b = 609743693736442153553407144551
#############
# Unfortunately, for Alice, she is using a modular
# exponentiation function and so it is possible to count the number of
# multiplications used to calculate the key
n_multiplications = 26
# binary conversion
# "{0:b}".format(10)
############################
# This eliminates the recursion in the mod_exp
# and does bitwise operations to speed things up a bit
# but the number of multiplications stays the same
def mod_exp(a, b, q):
"""return a**b % q"""
val = 1
mult = a
while b > 0:
odd = b & 1 # bitwise and
if odd == 1:
val = (val * mult) % q
b -= 1
if b == 0:
break
mult = (mult * mult) % q
b = b >> 1 # bitwise divide by 2
return val
# `count_multiplications` might be useful
# to see if you've found an exponent that
# would require the same number multiplications
# as Alice's private key
def count_multiplications(exponent):
"""return the number of multiplications
necessary to raise a number to `exponent`"""
bits = convert_to_bits(exponent)
return len(bits) + sum(b for b in bits) - 2
# this is the encode function used to
# create the cipher text found at the bottom of the file
def encode(plaintext, key):
assert len(plaintext) <= len(key)
return [m^k for m, k in zip(plaintext, key)]
# use this function to decrypt the ciphertext
def decode(ciphertext, key):
assert len(ciphertext) <= len(str(key))
return [c^k for c,k in zip(ciphertext, key)]
# is_valid returns True if the input consist of valid
# characters (numbers, upper case A-Z and lower case a-z and space)
# The message still might be garbage, but this is a decent
# and reasonably fast preliminary filter
valid_chars = set(c for c in string.printable[:62])
valid_chars.add(' ')
def is_valid(decode_guess):
return (len(decode_guess) == 14 and
all(d in valid_chars for d in decode_guess))
# Below are the typical bit manipulation functions
# that you might find useful
# Note that ASCII_BITS is set to 7 for this problem
BITS = ('0', '1')
ASCII_BITS = 7
def display_bits(b):
"""converts list of {0, 1}* to string"""
return ''.join([BITS[e] for e in b])
def seq_to_bits(seq):
return [0 if b == '0' else 1 for b in seq]
def pad_bits(bits, pad):
"""pads seq with leading 0s up to length pad"""
assert len(bits) <= pad
return [0] * (pad - len(bits)) + bits
def convert_to_bits(n):
"""converts an integer `n` to bit array"""
result = []
if n == 0:
return [0]
while n > 0:
result = [(n % 2)] + result
n = n / 2
return result
def string_to_bits(s):
def chr_to_bit(c):
return pad_bits(convert_to_bits(ord(c)), ASCII_BITS)
return [b for group in
map(chr_to_bit, s)
for b in group]
def bits_to_char(b):
assert len(b) == ASCII_BITS
value = 0
for e in b:
value = (value * 2) + e
return chr(value)
def list_to_string(p):
return ''.join(p)
def bits_to_string(b):
return ''.join([bits_to_char(b[i:i + ASCII_BITS])
for i in range(0, len(b), ASCII_BITS)])
############
# `ciphertext` is the observed message exchanged between Alice
# and Bob - which is what you need to decrypt
#
# key = convert_to_bits(mod_exp(g_b, a, p))
# ciphertext = encode(string_to_bits(plaintext), key)
ciphertext = string_to_bits(' x\x0br\x1fu/W\x00gJ@h#')
###########
# `plaintext` is the variable to set
# with the decrypted message
plaintext = "" # answer here
# Might be a useful test function.
# If you've calculated Alice's key
# and the plaintext, you can
# calculate a cipher-text to see
# if it matches the given `ciphertext`
def test(alices_key, plaintext):
key = convert_to_bits(mod_exp(g_b, alices_key, p))
test_cipher = encode(string_to_bits(plaintext), key)
return test_cipher == ciphertext
### uncomment to run
# print test(alices_key, plaintext)
#print p
binp = "{0:b}".format(p)
#print binp
#print g_a
binga = "{0:b}".format(g_a)
#print binga
#print g_b
bingb = "{0:b}".format(g_b)
#print bingb
test = g**67
#print test
#print "{0:b}".format(test)
import math
ceiling = math.log(g_a,g)
#print ceiling
#print "--------- \n\n"
candidates = []
for i in range(int(math.ceil(ceiling))):
candidates.append(pow(g,g_b*i,p))
#print [i for i in candidates]
import math
import itertools as iter
def generate_possibilities(floor, roof):
a = roof; max_length = int(math.log(roof, 2))
while a > floor:
log = int(math.log(a, 2))
ones = max_length - log
for some_ones in iter.combinations(range(log), ones):
yield int(a + sum(2**one for one in some_ones))
a = a / 2
# should generate between floor and roof where the binary multiplications match n_multiplications
def find_possibility():
result = []
for possibility in generate_possibilities(2, 2**26):
if pow(g, possibility, p) == g_a:
result.append(possibility)
return result
a = find_possibility()[0]
# key = g_b**a ## too slow, use pow()
key = pow(g_b, a, p) ## pow() handles the larger numbers better
key = convert_to_bits(mod_exp(g_b, a, p))
print bits_to_string(decode(ciphertext, key))
| |
# -*- coding: utf-8 -*-
from os import path
from gluon import current
from gluon.html import *
from gluon.storage import Storage
from s3.s3crud import S3CRUD
from s3.s3filter import S3DateFilter, S3LocationFilter, S3OptionsFilter, S3TextFilter
from s3.s3resource import S3FieldSelector
from s3.s3utils import s3_avatar_represent
THEME = "NEREIDS"
# =============================================================================
class index():
""" Custom Home Page """
def __call__(self):
response = current.response
output = {}
#output["title"] = response.title = current.deployment_settings.get_system_name()
view = path.join(current.request.folder, "private", "templates",
THEME, "views", "index.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP("404", "Unable to open Custom View: %s" % view)
s3 = response.s3
# Image Carousel
s3.jquery_ready.append('''$('#myCarousel').carousel()''')
if current.auth.is_logged_in():
s3db = current.s3db
# Latest 4 Events
resource = s3db.resource("cms_post")
resource.add_filter(S3FieldSelector("series_id$name") == "Event")
list_fields = ["location_id",
"date",
"body",
"created_by",
"created_by$organisation_id",
"document.file",
"event_post.event_id",
]
orderby = resource.get_config("list_orderby",
~resource.table.date)
datalist, numrows, ids = resource.datalist(fields=list_fields,
start=None,
limit=4,
listid="event_datalist",
orderby=orderby,
layout=render_cms_events)
if numrows == 0:
# Empty table or just no match?
table = resource.table
if "deleted" in table:
available_records = current.db(table.deleted != True)
else:
available_records = current.db(table._id > 0)
if available_records.select(table._id,
limitby=(0, 1)).first():
msg = DIV(S3CRUD.crud_string(resource.tablename,
"msg_no_match"),
_class="empty")
else:
msg = DIV(S3CRUD.crud_string(resource.tablename,
"msg_list_empty"),
_class="empty")
data = msg
else:
# Render the list
dl = datalist.html()
data = dl
output["events"] = data
# Latest 4 Updates
resource = s3db.resource("cms_post")
list_fields = ["series_id",
"location_id",
"date",
"body",
"created_by",
"created_by$organisation_id",
"document.file",
"event_post.event_id",
]
orderby = resource.get_config("list_orderby",
~resource.table.date)
datalist, numrows, ids = resource.datalist(fields=list_fields,
start=None,
limit=4,
listid="news_datalist",
orderby=orderby,
layout=s3.render_posts)
if numrows == 0:
# Empty table or just no match?
table = resource.table
if "deleted" in table:
available_records = current.db(table.deleted != True)
else:
available_records = current.db(table._id > 0)
if available_records.select(table._id,
limitby=(0, 1)).first():
msg = DIV(S3CRUD.crud_string(resource.tablename,
"msg_no_match"),
_class="empty")
else:
msg = DIV(S3CRUD.crud_string(resource.tablename,
"msg_list_empty"),
_class="empty")
data = msg
else:
# Render the list
dl = datalist.html()
data = dl
output["news"] = data
return output
# =============================================================================
class datalist():
""" Alternate URL for Updates page """
def __call__(self):
return _updates()
# =============================================================================
class datalist_dl_post():
""" AJAX URL for CMS Posts (for Updates page) """
def __call__(self):
return _updates()
# =============================================================================
class datalist_dl_filter():
""" AJAX URL for CMS Posts Filter Form (for Updates page) """
def __call__(self):
return _updates()
# =============================================================================
class login():
""" Custom Login page """
def __call__(self):
return _login()
# =============================================================================
class updates():
""" Updates page """
def __call__(self):
return _updates()
# =============================================================================
class validate():
""" Alternate URL for Updates page """
def __call__(self):
return _updates()
# =============================================================================
def _updates():
"""
Custom Page
- Filterable DataList of CMS Posts & a DataList of Events
"""
#if not current.auth.is_logged_in():
# current.auth.permission.fail()
T = current.T
s3db = current.s3db
request = current.request
response = current.response
s3 = response.s3
current.deployment_settings.ui.customize_cms_post()
list_layout = s3.render_posts
filter_widgets = [S3TextFilter(["body"],
label="",
_class="filter-search",
#_placeholder=T("Search").upper(),
),
S3OptionsFilter("series_id",
label=T("Filter by Type"),
represent="%(name)s",
widget="multiselect",
cols=3,
hidden=True,
),
S3LocationFilter("location_id",
label=T("Filter by Location"),
levels=["L1", "L2", "L3"],
widget="multiselect",
cols=3,
hidden=True,
),
S3OptionsFilter("created_by$organisation_id",
label=T("Filter by Organization"),
represent="%(name)s",
widget="multiselect",
cols=3,
hidden=True,
),
S3DateFilter("created_on",
label=T("Filter by Date"),
hide_time=True,
hidden=True,
),
]
s3db.configure("cms_post",
# We use a custom Advanced widget
filter_advanced = False,
filter_formstyle = filter_formstyle,
filter_submit = (T("SEARCH"), "btn btn-primary"),
filter_widgets = filter_widgets,
list_layout = list_layout,
# Create form comes via AJAX in a Modal
insertable = False,
)
s3.dl_pagelength = 6 # 5 forces an AJAX call
if "datalist_dl_post" in request.args:
# DataList pagination or Ajax-deletion request
request.args = ["datalist_f"]
ajax = "list"
elif "datalist_dl_filter" in request.args:
# FilterForm options update request
request.args = ["filter"]
ajax = "filter"
elif "validate.json" in request.args:
ajax = True
else:
# Default
request.args = ["datalist_f"]
ajax = None
def prep(r):
if ajax == "list":
r.representation = "dl"
elif ajax == "filter":
r.representation = "json"
return True
s3.prep = prep
output = current.rest_controller("cms", "post",
list_ajaxurl = URL(f="index",
args="datalist_dl_post"),
filter_ajax_url = URL(f="index",
args="datalist_dl_filter",
vars={}))
if ajax == "list":
# Don't override view if this is an Ajax-deletion request
if not "delete" in request.get_vars:
response.view = "plain.html"
elif not ajax:
# Set Title & View after REST Controller, in order to override
output["title"] = T("News Feed")
view = path.join(request.folder, "private", "templates",
THEME, "views", "updates.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP("404", "Unable to open Custom View: %s" % view)
scripts = []
sappend = scripts.append
# Style the Search TextFilter widget
sappend('''$('#post-cms_post_body-text-filter__row').addClass('input-append').append('<span class="add-on"><i class="icon-search"></i></span>')''')
# Button to toggle Advanced Form
sappend('''$('#list-filter').append('<a class="accordion-toggle"><i class="icon-reorder"></i> %s</a>')''' % T("Advanced Search"))
sappend('''$('.accordion-toggle').click(function(){$('.advanced').toggle()})''')
s3.jquery_ready.append('''\n'''.join(scripts))
# Latest 5 Disasters
resource = s3db.resource("event_event")
list_fields = ["name",
"event_type_id$name",
"zero_hour",
"closed",
]
orderby = resource.get_config("list_orderby",
~resource.table.created_on)
datalist, numrows, ids = resource.datalist(fields=list_fields,
start=None,
limit=5,
listid="event_datalist",
orderby=orderby,
layout=render_events)
# Render the list
data = datalist.html()
if numrows == 0:
# Empty table or just no match?
table = resource.table
if "deleted" in table:
available_records = current.db(table.deleted != True)
else:
available_records = current.db(table._id > 0)
if available_records.select(table._id,
limitby=(0, 1)).first():
msg = DIV(S3CRUD.crud_string(resource.tablename,
"msg_no_match"),
_class="empty")
else:
msg = DIV(S3CRUD.crud_string(resource.tablename,
"msg_list_empty"),
_class="empty")
data.insert(1, msg)
output["disasters"] = data
return output
# -----------------------------------------------------------------------------
def filter_formstyle(row_id, label, widget, comment, hidden=False):
"""
Custom Formstyle for FilterForm
@param row_id: HTML id for the row
@param label: the label
@param widget: the form widget
@param comment: the comment
@param hidden: whether the row should initially be hidden or not
"""
if hidden:
_class = "advanced hide"
else:
_class= ""
if label:
return DIV(label, widget, _id=row_id, _class=_class)
else:
return DIV(widget, _id=row_id, _class=_class)
# -----------------------------------------------------------------------------
def render_events(listid, resource, rfields, record, **attr):
"""
Custom dataList item renderer for 'Disasters' on the Updates page
@param listid: the HTML ID for this list
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
@param attr: additional HTML attributes for the item
"""
pkey = "event_event.id"
# Construct the item ID
if pkey in record:
record_id = record[pkey]
item_id = "%s-%s" % (listid, record_id)
else:
# template
item_id = "%s-[id]" % listid
item_class = "thumbnail"
raw = record._row
name = record["event_event.name"]
date = record["event_event.zero_hour"]
closed = raw["event_event.closed"]
event_type = record["event_event_type.name"]
if closed:
edit_bar = DIV()
else:
item_class = "%s disaster" % item_class
permit = current.auth.s3_has_permission
table = resource.table
if permit("update", table, record_id=record_id):
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="event", f="event",
args=[record_id, "update.popup"],
vars={"refresh": listid,
"record": record_id}),
_class="s3_modal",
_title=current.response.s3.crud_strings.event_event.title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-remove-sign"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Render the item
item = DIV(DIV(A(IMG(_class="media-object",
_src=URL(c="static",
f="themes",
args=["DRMP", "img", "%s.png" % event_type]),
),
_class="pull-left",
_href="#",
),
edit_bar,
DIV(A(H5(name,
_class="media-heading"),
SPAN(date,
_class="date-title",
),
_href=URL(c="event", f="event",
args=[record_id, "profile"]),
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def render_cms_events(listid, resource, rfields, record, **attr):
"""
Custom dataList item renderer for 'Events' on the Home page
@param listid: the HTML ID for this list
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
@param attr: additional HTML attributes for the item
"""
T = current.T
pkey = "cms_post.id"
# Construct the item ID
if pkey in record:
record_id = record[pkey]
item_id = "%s-%s" % (listid, record_id)
else:
# template
item_id = "%s-[id]" % listid
item_class = "thumbnail"
raw = record._row
series = "Event"
date = record["cms_post.date"]
body = record["cms_post.body"]
location = record["cms_post.location_id"]
location_id = raw["cms_post.location_id"]
location_url = URL(c="gis", f="location", args=[location_id])
author = record["cms_post.created_by"]
author_id = raw["cms_post.created_by"]
organisation = record["auth_user.organisation_id"]
organisation_id = raw["auth_user.organisation_id"]
org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
# @ToDo: Optimise by not doing DB lookups (especially duplicate) within render, but doing these in the bulk query
avatar = s3_avatar_represent(author_id,
_class="media-object",
_style="width:50px;padding:5px;padding-top:0px;")
db = current.db
ltable = current.s3db.pr_person_user
ptable = db.pr_person
query = (ltable.user_id == author_id) & \
(ltable.pe_id == ptable.pe_id)
row = db(query).select(ptable.id,
limitby=(0, 1)
).first()
if row:
person_url = URL(c="hrm", f="person", args=[row.id])
else:
person_url = "#"
author = A(author,
_href=person_url,
)
avatar = A(avatar,
_href=person_url,
_class="pull-left",
)
# Edit Bar
permit = current.auth.s3_has_permission
table = db.cms_post
if permit("update", table, record_id=record_id):
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="cms", f="post",
args=[record_id, "update.popup"],
vars={"refresh": listid,
"record": record_id}),
_class="s3_modal",
_title=T("Edit Event"),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-remove-sign"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Dropdown of available documents
documents = raw["doc_document.file"]
if documents:
if not isinstance(documents, list):
documents = [documents]
doc_list = UL(_class="dropdown-menu",
_role="menu",
)
retrieve = db.doc_document.file.retrieve
for doc in documents:
try:
doc_name = retrieve(doc)[0]
except IOError:
doc_name = current.messages["NONE"]
doc_url = URL(c="default", f="download",
args=[doc])
doc_item = LI(A(I(_class="icon-file"),
" ",
doc_name,
_href=doc_url,
),
_role="menuitem",
)
doc_list.append(doc_item)
docs = DIV(A(I(_class="icon-paper-clip"),
SPAN(_class="caret"),
_class="btn dropdown-toggle",
_href="#",
**{"_data-toggle": "dropdown"}
),
doc_list,
_class="btn-group attachments dropdown pull-right",
)
else:
docs = ""
# Render the item
item = DIV(DIV(I(SPAN(" %s" % T("Event"),
_class="card-title",
),
_class="icon icon-%s" % series.lower().replace(" ", "_"),
),
SPAN(A(location,
_href=location_url,
),
_class="location-title",
),
SPAN(date,
_class="date-title",
),
edit_bar,
_class="card-header",
),
DIV(avatar,
DIV(DIV(body,
DIV(author,
" - ",
A(organisation,
_href=org_url,
_class="card-organisation",
),
docs,
_class="card-person",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
# =============================================================================
class glossary():
"""
Custom page
"""
def __call__(self):
view = path.join(current.request.folder, "private", "templates",
THEME, "views", "glossary.html")
try:
# Pass view as file not str to work in compiled mode
current.response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP("404", "Unable to open Custom View: %s" % view)
title = current.T("Glossary")
return dict(title = title,
)
# END =========================================================================
| |
"""
========================= FOOTNOTES =================================
This section adds footnote handling to markdown. It can be used as
an example for extending python-markdown with relatively complex
functionality. While in this case the extension is included inside
the module itself, it could just as easily be added from outside the
module. Not that all markdown classes above are ignorant about
footnotes. All footnote functionality is provided separately and
then added to the markdown instance at the run time.
Footnote functionality is attached by calling extendMarkdown()
method of FootnoteExtension. The method also registers the
extension to allow it's state to be reset by a call to reset()
method.
Example:
Footnotes[^1] have a label[^label] and a definition[^!DEF].
[^1]: This is a footnote
[^label]: A footnote on "label"
[^!DEF]: The footnote for definition
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..preprocessors import Preprocessor
from ..inlinepatterns import Pattern
from ..treeprocessors import Treeprocessor
from ..postprocessors import Postprocessor
from ..util import etree, text_type
from ..odict import OrderedDict
import re
FN_BACKLINK_TEXT = "zz1337820767766393qq"
NBSP_PLACEHOLDER = "qq3936677670287331zz"
DEF_RE = re.compile(r'[ ]{0,3}\[\^([^\]]*)\]:\s*(.*)')
TABBED_RE = re.compile(r'((\t)|( ))(.*)')
class FootnoteExtension(Extension):
""" Footnote Extension. """
def __init__ (self, configs):
""" Setup configs. """
self.config = {'PLACE_MARKER':
["///Footnotes Go Here///",
"The text string that marks where the footnotes go"],
'UNIQUE_IDS':
[False,
"Avoid name collisions across "
"multiple calls to reset()."],
"BACKLINK_TEXT":
["↩",
"The text string that links from the footnote to the reader's place."]
}
for key, value in configs:
self.config[key][0] = value
# In multiple invocations, emit links that don't get tangled.
self.unique_prefix = 0
self.reset()
def extendMarkdown(self, md, md_globals):
""" Add pieces to Markdown. """
md.registerExtension(self)
self.parser = md.parser
self.md = md
# Insert a preprocessor before ReferencePreprocessor
md.preprocessors.add("footnote", FootnotePreprocessor(self),
"<reference")
# Insert an inline pattern before ImageReferencePattern
FOOTNOTE_RE = r'\[\^([^\]]*)\]' # blah blah [^1] blah
md.inlinePatterns.add("footnote", FootnotePattern(FOOTNOTE_RE, self),
"<reference")
# Insert a tree-processor that would actually add the footnote div
# This must be before all other treeprocessors (i.e., inline and
# codehilite) so they can run on the the contents of the div.
md.treeprocessors.add("footnote", FootnoteTreeprocessor(self),
"_begin")
# Insert a postprocessor after amp_substitute oricessor
md.postprocessors.add("footnote", FootnotePostprocessor(self),
">amp_substitute")
def reset(self):
""" Clear the footnotes on reset, and prepare for a distinct document. """
self.footnotes = OrderedDict()
self.unique_prefix += 1
def findFootnotesPlaceholder(self, root):
""" Return ElementTree Element that contains Footnote placeholder. """
def finder(element):
for child in element:
if child.text:
if child.text.find(self.getConfig("PLACE_MARKER")) > -1:
return child, element, True
if child.tail:
if child.tail.find(self.getConfig("PLACE_MARKER")) > -1:
return child, element, False
finder(child)
return None
res = finder(root)
return res
def setFootnote(self, id, text):
""" Store a footnote for later retrieval. """
self.footnotes[id] = text
def get_separator(self):
if self.md.output_format in ['html5', 'xhtml5']:
return '-'
return ':'
def makeFootnoteId(self, id):
""" Return footnote link id. """
if self.getConfig("UNIQUE_IDS"):
return 'fn%s%d-%s' % (self.get_separator(), self.unique_prefix, id)
else:
return 'fn%s%s' % (self.get_separator(), id)
def makeFootnoteRefId(self, id):
""" Return footnote back-link id. """
if self.getConfig("UNIQUE_IDS"):
return 'fnref%s%d-%s' % (self.get_separator(), self.unique_prefix, id)
else:
return 'fnref%s%s' % (self.get_separator(), id)
def makeFootnotesDiv(self, root):
""" Return div of footnotes as et Element. """
if not list(self.footnotes.keys()):
return None
div = etree.Element("div")
div.set('class', 'footnote')
etree.SubElement(div, "hr")
ol = etree.SubElement(div, "ol")
for id in self.footnotes.keys():
li = etree.SubElement(ol, "li")
li.set("id", self.makeFootnoteId(id))
self.parser.parseChunk(li, self.footnotes[id])
backlink = etree.Element("a")
backlink.set("href", "#" + self.makeFootnoteRefId(id))
if self.md.output_format not in ['html5', 'xhtml5']:
backlink.set("rev", "footnote") # Invalid in HTML5
backlink.set("class", "footnote-backref")
backlink.set("title", "Jump back to footnote %d in the text" % \
(self.footnotes.index(id)+1))
backlink.text = FN_BACKLINK_TEXT
if li.getchildren():
node = li[-1]
if node.tag == "p":
node.text = node.text + NBSP_PLACEHOLDER
node.append(backlink)
else:
p = etree.SubElement(li, "p")
p.append(backlink)
return div
class FootnotePreprocessor(Preprocessor):
""" Find all footnote references and store for later use. """
def __init__ (self, footnotes):
self.footnotes = footnotes
def run(self, lines):
"""
Loop through lines and find, set, and remove footnote definitions.
Keywords:
* lines: A list of lines of text
Return: A list of lines of text with footnote definitions removed.
"""
newlines = []
i = 0
while True:
m = DEF_RE.match(lines[i])
if m:
fn, _i = self.detectTabbed(lines[i+1:])
fn.insert(0, m.group(2))
i += _i-1 # skip past footnote
self.footnotes.setFootnote(m.group(1), "\n".join(fn))
else:
newlines.append(lines[i])
if len(lines) > i+1:
i += 1
else:
break
return newlines
def detectTabbed(self, lines):
""" Find indented text and remove indent before further proccesing.
Keyword arguments:
* lines: an array of strings
Returns: a list of post processed items and the index of last line.
"""
items = []
blank_line = False # have we encountered a blank line yet?
i = 0 # to keep track of where we are
def detab(line):
match = TABBED_RE.match(line)
if match:
return match.group(4)
for line in lines:
if line.strip(): # Non-blank line
detabbed_line = detab(line)
if detabbed_line:
items.append(detabbed_line)
i += 1
continue
elif not blank_line and not DEF_RE.match(line):
# not tabbed but still part of first par.
items.append(line)
i += 1
continue
else:
return items, i+1
else: # Blank line: _maybe_ we are done.
blank_line = True
i += 1 # advance
# Find the next non-blank line
for j in range(i, len(lines)):
if lines[j].strip():
next_line = lines[j]; break
else:
break # There is no more text; we are done.
# Check if the next non-blank line is tabbed
if detab(next_line): # Yes, more work to do.
items.append("")
continue
else:
break # No, we are done.
else:
i += 1
return items, i
class FootnotePattern(Pattern):
""" InlinePattern for footnote markers in a document's body text. """
def __init__(self, pattern, footnotes):
super(FootnotePattern, self).__init__(pattern)
self.footnotes = footnotes
def handleMatch(self, m):
id = m.group(2)
if id in self.footnotes.footnotes.keys():
sup = etree.Element("sup")
a = etree.SubElement(sup, "a")
sup.set('id', self.footnotes.makeFootnoteRefId(id))
a.set('href', '#' + self.footnotes.makeFootnoteId(id))
if self.footnotes.md.output_format not in ['html5', 'xhtml5']:
a.set('rel', 'footnote') # invalid in HTML5
a.set('class', 'footnote-ref')
a.text = text_type(self.footnotes.footnotes.index(id) + 1)
return sup
else:
return None
class FootnoteTreeprocessor(Treeprocessor):
""" Build and append footnote div to end of document. """
def __init__ (self, footnotes):
self.footnotes = footnotes
def run(self, root):
footnotesDiv = self.footnotes.makeFootnotesDiv(root)
if footnotesDiv:
result = self.footnotes.findFootnotesPlaceholder(root)
if result:
child, parent, isText = result
ind = parent.getchildren().index(child)
if isText:
parent.remove(child)
parent.insert(ind, footnotesDiv)
else:
parent.insert(ind + 1, footnotesDiv)
child.tail = None
else:
root.append(footnotesDiv)
class FootnotePostprocessor(Postprocessor):
""" Replace placeholders with html entities. """
def __init__(self, footnotes):
self.footnotes = footnotes
def run(self, text):
text = text.replace(FN_BACKLINK_TEXT, self.footnotes.getConfig("BACKLINK_TEXT"))
return text.replace(NBSP_PLACEHOLDER, " ")
def makeExtension(configs=[]):
""" Return an instance of the FootnoteExtension """
return FootnoteExtension(configs=configs)
| |
"""Helpers that help with state related things."""
import asyncio
import datetime as dt
import json
import logging
from collections import defaultdict
from types import TracebackType
from typing import ( # noqa: F401 pylint: disable=unused-import
Awaitable, Dict, Iterable, List, Optional, Tuple, Type, Union)
from homeassistant.loader import bind_hass
import homeassistant.util.dt as dt_util
from homeassistant.components.notify import (
ATTR_MESSAGE, SERVICE_NOTIFY)
from homeassistant.components.sun import (
STATE_ABOVE_HORIZON, STATE_BELOW_HORIZON)
from homeassistant.components.mysensors.switch import (
ATTR_IR_CODE, SERVICE_SEND_IR_CODE)
from homeassistant.components.cover import (
ATTR_POSITION, ATTR_TILT_POSITION)
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_OPTION, SERVICE_ALARM_ARM_AWAY,
SERVICE_ALARM_ARM_HOME, SERVICE_ALARM_DISARM, SERVICE_ALARM_TRIGGER,
SERVICE_LOCK, SERVICE_TURN_OFF, SERVICE_TURN_ON, SERVICE_UNLOCK,
SERVICE_OPEN_COVER,
SERVICE_CLOSE_COVER, SERVICE_SET_COVER_POSITION,
SERVICE_SET_COVER_TILT_POSITION, STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME, STATE_ALARM_DISARMED, STATE_ALARM_TRIGGERED,
STATE_CLOSED, STATE_HOME, STATE_LOCKED, STATE_NOT_HOME, STATE_OFF,
STATE_ON, STATE_OPEN, STATE_UNKNOWN,
STATE_UNLOCKED, SERVICE_SELECT_OPTION)
from homeassistant.core import (
Context, State, DOMAIN as HASS_DOMAIN)
from homeassistant.util.async_ import run_coroutine_threadsafe
from .typing import HomeAssistantType
_LOGGER = logging.getLogger(__name__)
GROUP_DOMAIN = 'group'
# Update this dict of lists when new services are added to HA.
# Each item is a service with a list of required attributes.
SERVICE_ATTRIBUTES = {
SERVICE_NOTIFY: [ATTR_MESSAGE],
SERVICE_SEND_IR_CODE: [ATTR_IR_CODE],
SERVICE_SELECT_OPTION: [ATTR_OPTION],
SERVICE_SET_COVER_POSITION: [ATTR_POSITION],
SERVICE_SET_COVER_TILT_POSITION: [ATTR_TILT_POSITION]
}
# Update this dict when new services are added to HA.
# Each item is a service with a corresponding state.
SERVICE_TO_STATE = {
SERVICE_TURN_ON: STATE_ON,
SERVICE_TURN_OFF: STATE_OFF,
SERVICE_ALARM_ARM_AWAY: STATE_ALARM_ARMED_AWAY,
SERVICE_ALARM_ARM_HOME: STATE_ALARM_ARMED_HOME,
SERVICE_ALARM_DISARM: STATE_ALARM_DISARMED,
SERVICE_ALARM_TRIGGER: STATE_ALARM_TRIGGERED,
SERVICE_LOCK: STATE_LOCKED,
SERVICE_UNLOCK: STATE_UNLOCKED,
SERVICE_OPEN_COVER: STATE_OPEN,
SERVICE_CLOSE_COVER: STATE_CLOSED
}
class AsyncTrackStates:
"""
Record the time when the with-block is entered.
Add all states that have changed since the start time to the return list
when with-block is exited.
Must be run within the event loop.
"""
def __init__(self, hass: HomeAssistantType) -> None:
"""Initialize a TrackStates block."""
self.hass = hass
self.states = [] # type: List[State]
# pylint: disable=attribute-defined-outside-init
def __enter__(self) -> List[State]:
"""Record time from which to track changes."""
self.now = dt_util.utcnow()
return self.states
def __exit__(self, exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType]) -> None:
"""Add changes states to changes list."""
self.states.extend(get_changed_since(self.hass.states.async_all(),
self.now))
def get_changed_since(states: Iterable[State],
utc_point_in_time: dt.datetime) -> List[State]:
"""Return list of states that have been changed since utc_point_in_time."""
return [state for state in states
if state.last_updated >= utc_point_in_time]
@bind_hass
def reproduce_state(hass: HomeAssistantType,
states: Union[State, Iterable[State]],
blocking: bool = False) -> None:
"""Reproduce given state."""
return run_coroutine_threadsafe( # type: ignore
async_reproduce_state(hass, states, blocking), hass.loop).result()
@bind_hass
async def async_reproduce_state(
hass: HomeAssistantType,
states: Union[State, Iterable[State]],
blocking: bool = False,
context: Optional[Context] = None) -> None:
"""Reproduce a list of states on multiple domains."""
if isinstance(states, State):
states = [states]
to_call = defaultdict(list) # type: Dict[str, List[State]]
for state in states:
to_call[state.domain].append(state)
async def worker(domain: str, data: List[State]) -> None:
component = getattr(hass.components, domain)
if hasattr(component, 'async_reproduce_states'):
await component.async_reproduce_states(
data,
context=context)
else:
await async_reproduce_state_legacy(
hass,
domain,
data,
blocking=blocking,
context=context)
if to_call:
# run all domains in parallel
await asyncio.gather(*[
worker(domain, data)
for domain, data in to_call.items()
])
@bind_hass
async def async_reproduce_state_legacy(
hass: HomeAssistantType,
domain: str,
states: Iterable[State],
blocking: bool = False,
context: Optional[Context] = None) -> None:
"""Reproduce given state."""
to_call = defaultdict(list) # type: Dict[Tuple[str, str], List[str]]
if domain == GROUP_DOMAIN:
service_domain = HASS_DOMAIN
else:
service_domain = domain
for state in states:
if hass.states.get(state.entity_id) is None:
_LOGGER.warning("reproduce_state: Unable to find entity %s",
state.entity_id)
continue
domain_services = hass.services.async_services().get(service_domain)
if not domain_services:
_LOGGER.warning(
"reproduce_state: Unable to reproduce state %s (1)", state)
continue
service = None
for _service in domain_services.keys():
if (_service in SERVICE_ATTRIBUTES and
all(attr in state.attributes
for attr in SERVICE_ATTRIBUTES[_service]) or
_service in SERVICE_TO_STATE and
SERVICE_TO_STATE[_service] == state.state):
service = _service
if (_service in SERVICE_TO_STATE and
SERVICE_TO_STATE[_service] == state.state):
break
if not service:
_LOGGER.warning(
"reproduce_state: Unable to reproduce state %s (2)", state)
continue
# We group service calls for entities by service call
# json used to create a hashable version of dict with maybe lists in it
key = (service,
json.dumps(dict(state.attributes), sort_keys=True))
to_call[key].append(state.entity_id)
domain_tasks = [] # type: List[Awaitable[Optional[bool]]]
for (service, service_data), entity_ids in to_call.items():
data = json.loads(service_data)
data[ATTR_ENTITY_ID] = entity_ids
domain_tasks.append(
hass.services.async_call(service_domain, service, data, blocking,
context)
)
if domain_tasks:
await asyncio.wait(domain_tasks, loop=hass.loop)
def state_as_number(state: State) -> float:
"""
Try to coerce our state to a number.
Raises ValueError if this is not possible.
"""
from homeassistant.components.climate.const import (
STATE_HEAT, STATE_COOL, STATE_IDLE)
if state.state in (STATE_ON, STATE_LOCKED, STATE_ABOVE_HORIZON,
STATE_OPEN, STATE_HOME, STATE_HEAT, STATE_COOL):
return 1
if state.state in (STATE_OFF, STATE_UNLOCKED, STATE_UNKNOWN,
STATE_BELOW_HORIZON, STATE_CLOSED, STATE_NOT_HOME,
STATE_IDLE):
return 0
return float(state.state)
| |
from django.core.cache import cache
from django.conf import settings
from twython import Twython
import numpy as np
import datetime
import math
'''
Created on Mar 3, 2015
@author: SS
'''
class TwitterAppliance:
# the twython api interface
twitter = None
# TwitterSocialAccount
social_account = None
# TwitterRateThrottle
rate_throttle = None
def __init__(self,social_account):
self.social_account = social_account
self.twitter = self.get_twitter()
self.rate_throttle = TwitterRateThrottle(social_account)
def get_twitter(self):
if self.twitter == None:
self.twitter = Twython(settings.TWITTER_KEY, settings.TWITTER_SECRET,
self.social_account.token, self.social_account.secret)
return self.twitter
def get_rates(self,resources=None,endpoint=None):
# if no resources were provided try to get it from the endpoint
if resources == None:
resources = self.rate_throttle.resource
twitter = self.get_twitter()
rates = twitter.get_application_rate_limit_status(resources=resources)
return rates
def check_rate(self,rates,resource,endpoint):
"""
TODO move the rates part internal to this
See if the API is telling us to wait.
this can be called more than other calls so we should hit it with a little more frequency.
"""
vals = rates['resources'][resource]['/' + endpoint]
print(str(vals['remaining']) + " hits left for " + resource + "/" + endpoint)
return vals['remaining'] > 0
def get_rate_remaining(self,rates,resource,endpoint):
"""
TODO move the rates part internal to this
See if the API is telling us to wait.
this can be called more than other calls so we should hit it with a little more frequency.
"""
print(rates)
return rates['resources'][resource]['/' + endpoint]['remaining']
def get_rate_used(self,rates,resource,endpoint):
"""
TODO move the rates part internal to this
This is not really what it used but more importantly how many have been used up in relation to the limit
"""
print(rates)
return int(self.rate_throttle.rate_limit_for_endpoint(endpoint)) - rates['resources'][resource]['/' + endpoint]['remaining']
def hit_system_hard_limit(self,endpoint, sync_with_twitter=False):
"""
If we have his the internal system limit.
This should probably be moved into another system but for now it's okay in here.
This should be pulled from Redis since we can cache things up in there and not burdon other things
Assumption: system clock is working and in sync with twitter's clock
algorithm:
Check the hits against this user+resource
See if this user is about to hit X times in the last few minutes
Return boolean if the user has hit or passed this rate limit
"""
if sync_with_twitter:
# get the resource
print('Syncing with twitter - ')
self.rate_throttle.set_endpoint(endpoint)
resource = self.rate_throttle.resource
# set the proper amount we have used in relation to the limit count
rate_remaining = self.get_rate_used(self.get_rates(endpoint=endpoint),resource,endpoint )
self.set_endpoint_count(endpoint, rate_remaining)
return self.rate_throttle.hit_rate_limit(endpoint)
def clock_resets_in(self,endpoint):
return self.rate_throttle.clock_resets_in(self.twitter, endpoint)
def increment_endpoint_count(self,endpoint):
self.rate_throttle.increment_cache_count(endpoint)
def set_endpoint_count(self,endpoint, new_count):
if not str(new_count).isdigit():
raise TwitterApplianceException("New count is not a number ")
self.rate_throttle.set_cache_count(endpoint, new_count)
class TwitterRateThrottle():
"""
Used to limit the hits to twitter
Should be consulted before each call to twitter
Can check out internal redis or can call the twitter API
We want to provide an endpoint but we probably shouldn't bind to one so that
we can assume that the data is fresh
"""
social_account = None
endpoint = None
matrix = None
resource = None
def __init__(self, social_account):
self.social_account = social_account
self.matrix = TwitterRateMatrix()
def set_endpoint(self,endpoint):
self.matrix.check_endpoint(endpoint)
self.endpoint = endpoint
self.resource = self.matrix.get_resource_for_endpoint(endpoint)
def hit_rate_limit(self, endpoint):
"""The current hits are greater than or equal to the twitter limits"""
limit = self.rate_limit_for_endpoint(endpoint)
current_count = self.get_cache_count(endpoint)
# going to cut it off one early because we kept hitting our limits
return current_count >= (int(limit) - 2)
def rate_limit_for_endpoint(self, endpoint):
return self.matrix.get_rate_for_endpoint(endpoint)
def get_cache_count(self, endpoint):
"""Get the hit count so far for this time window"""
# get the hits currently
return cache.get(self.get_cache_key(self.social_account, endpoint))
def increment_cache_count(self, endpoint):
"""Add one more hit to the cache"""
if self.get_cache_count(endpoint) == None:
new_count = 1
else:
new_count = self.get_cache_count(endpoint) + 1
cache.set(self.get_cache_key(self.social_account, endpoint), new_count, timeout=60*30)
print("Setting cache = " + str(self.get_cache_key(self.social_account, endpoint)) + " new count " + str(new_count))
return new_count
def set_cache_count(self, endpoint, new_count):
"""Add one more hit to the cache"""
cache.set(self.get_cache_key(self.social_account, endpoint), new_count, timeout=60*30)
print("Setting hard cache = " + str(self.get_cache_key(self.social_account, endpoint)) + " new count " + str(new_count))
return new_count
def get_cache_key(self, social_account, endpoint):
"""
KEY: social_account.id + hr_of_day + this_15min_window (integer) + endpoint
returns string key
"""
now = datetime.datetime.now()
return str(social_account.id) + "_" + str(now.hour) + "_" + str(int(math.ceil(now.minute/15))) + "_" + endpoint
def clock_resets_in(self, twitter=None, endpoint=None):
"""In the future we should probably check twitter for a real live hard reset time"""
now = datetime.datetime.now()
return self.minutes_till_reset(now.minute)
def minutes_till_reset(self,minutes):
"""How concise? - How many minutes till the next 15 minute block"""
return (60 - minutes) % 15
class TwitterRateMatrix():
"""
The matrix as listed on the API docs site
@see: https://dev.twitter.com/rest/public/rate-limits
( METHOD, endpoint, family, user limit 15-min, app limit 15-min )
"""
rates = np.array([
["GET","application/rate_limit_status","application",180,180],
["GET","favorites/list","favorites",15,15],
["GET","followers/ids","followers",15,15],
["GET","followers/list","followers",15,30],
["GET","friends/ids","friends",15,15],
["GET","friends/list","friends",15,30],
["GET","friendships/show","friendships",180,15],
["GET","help/configuration","help",15,15],
["GET","help/languages","help",15,15],
["GET","help/privacy","help",15,15],
["GET","help/tos","help",15,15],
["GET","lists/list","lists",15,15],
["GET","lists/members","lists",180,15],
["GET","lists/members/show","lists",15,15],
["GET","lists/memberships","lists",15,15],
["GET","lists/ownerships","lists",15,15],
["GET","lists/show","lists",15,15],
["GET","lists/statuses","lists",180,180],
["GET","lists/subscribers","lists",180,15],
["GET","lists/subscribers/show","lists",15,15],
["GET","lists/subscriptions","lists",15,15],
["GET","search/tweets","search",180,450],
["GET","statuses/lookup","statuses",180,60],
["GET","statuses/oembed","statuses",180,180],
["GET","statuses/retweeters/ids","statuses",15,60],
["GET","statuses/retweets/:id","statuses",15,60],
["GET","statuses/show/:id","statuses",180,180],
["GET","statuses/user_timeline","statuses",180,300],
["GET","trends/available","trends",15,15],
["GET","trends/closest","trends",15,15],
["GET","trends/place","trends",15,15],
["GET","users/lookup","users",180,60],
["GET","users/show","users",180,180],
["GET","users/suggestions","users",15,15],
["GET","users/suggestions/:slug","users",15,15],
["GET","users/suggestions/:slug/members","users",15,15]
])
def get_rate_for_endpoint(self, endpoint=None):
self.check_endpoint(endpoint)
# loop them and if we find the endpoint key send the limit value
for rate in self.rates:
if endpoint in rate:
return rate[3]
# if we don't find it set default to 15
return 15
def is_valid_endpoint(self, endpoint):
"""Exact match to the 2nd column"""
return endpoint in self.rates[:,1]
def get_resource_for_endpoint(self, endpoint):
self.check_endpoint(endpoint)
for rate in self.rates:
if endpoint in rate:
return rate[2]
def check_endpoint(self, endpoint):
if endpoint == None:
raise InvalidTwitterEndpointException(endpoint, "Provide an endpoint")
if endpoint != None and not self.is_valid_endpoint(endpoint):
raise InvalidTwitterEndpointException(endpoint, "The endpoint " + str(endpoint) + " is invalid ")
return True
class TwitterApplianceException(Exception):
"""
Base exception
"""
pass
class InvalidTwitterEndpointException(TwitterApplianceException):
"""
Exception raised by looking for invalid resources
Attributes:
endpoint -- the endpoint looked for
msg -- explanation of the error
"""
def __init__(self, endpoint, msg):
# string data about the exception
self.endpoint = endpoint
self.msg = msg
| |
# Copyright (c) 2011 Zadara Storage Inc.
# Copyright (c) 2011 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2011 Ken Pepple
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Built-in volume type properties."""
from oslo.config import cfg
from cinder import context
from cinder import db
from cinder import exception
from cinder.openstack.common.db import exception as db_exc
from cinder.openstack.common import log as logging
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def create(context, name, extra_specs={}):
"""Creates volume types."""
try:
type_ref = db.volume_type_create(context,
dict(name=name,
extra_specs=extra_specs))
except db_exc.DBError as e:
LOG.exception(_('DB error: %s') % e)
raise exception.VolumeTypeCreateFailed(name=name,
extra_specs=extra_specs)
return type_ref
def destroy(context, id):
"""Marks volume types as deleted."""
if id is None:
msg = _("id cannot be None")
raise exception.InvalidVolumeType(reason=msg)
else:
db.volume_type_destroy(context, id)
def get_all_types(context, inactive=0, search_opts={}):
"""Get all non-deleted volume_types.
Pass true as argument if you want deleted volume types returned also.
"""
vol_types = db.volume_type_get_all(context, inactive)
if search_opts:
LOG.debug(_("Searching by: %s") % search_opts)
def _check_extra_specs_match(vol_type, searchdict):
for k, v in searchdict.iteritems():
if (k not in vol_type['extra_specs'].keys()
or vol_type['extra_specs'][k] != v):
return False
return True
# search_option to filter_name mapping.
filter_mapping = {'extra_specs': _check_extra_specs_match}
result = {}
for type_name, type_args in vol_types.iteritems():
# go over all filters in the list
for opt, values in search_opts.iteritems():
try:
filter_func = filter_mapping[opt]
except KeyError:
# no such filter - ignore it, go to next filter
continue
else:
if filter_func(type_args, values):
result[type_name] = type_args
break
vol_types = result
return vol_types
def get_volume_type(ctxt, id):
"""Retrieves single volume type by id."""
if id is None:
msg = _("id cannot be None")
raise exception.InvalidVolumeType(reason=msg)
if ctxt is None:
ctxt = context.get_admin_context()
return db.volume_type_get(ctxt, id)
def get_volume_type_by_name(context, name):
"""Retrieves single volume type by name."""
if name is None:
msg = _("name cannot be None")
raise exception.InvalidVolumeType(reason=msg)
return db.volume_type_get_by_name(context, name)
def get_default_volume_type():
"""Get the default volume type."""
name = CONF.default_volume_type
vol_type = {}
if name is not None:
ctxt = context.get_admin_context()
try:
vol_type = get_volume_type_by_name(ctxt, name)
except exception.VolumeTypeNotFoundByName as e:
# Couldn't find volume type with the name in default_volume_type
# flag, record this issue and move on
#TODO(zhiteng) consider add notification to warn admin
LOG.exception(_('Default volume type is not found, '
'please check default_volume_type config: %s'), e)
return vol_type
def get_volume_type_extra_specs(volume_type_id, key=False):
volume_type = get_volume_type(context.get_admin_context(),
volume_type_id)
extra_specs = volume_type['extra_specs']
if key:
if extra_specs.get(key):
return extra_specs.get(key)
else:
return False
else:
return extra_specs
def is_encrypted(context, volume_type_id):
if volume_type_id is None:
return False
encryption = db.volume_type_encryption_get(context, volume_type_id)
return encryption is not None
def get_volume_type_encryption(context, volume_type_id):
if volume_type_id is None:
return None
encryption = db.volume_type_encryption_get(context, volume_type_id)
return encryption
def get_volume_type_qos_specs(volume_type_id):
ctxt = context.get_admin_context()
res = db.volume_type_qos_specs_get(ctxt,
volume_type_id)
return res
def volume_types_diff(context, vol_type_id1, vol_type_id2):
"""Returns a 'diff' of two volume types and whether they are equal.
Returns a tuple of (diff, equal), where 'equal' is a boolean indicating
whether there is any difference, and 'diff' is a dictionary with the
following format:
{'extra_specs': {'key1': (value_in_1st_vol_type, value_in_2nd_vol_type),
'key2': (value_in_1st_vol_type, value_in_2nd_vol_type),
...}
'qos_specs': {'key1': (value_in_1st_vol_type, value_in_2nd_vol_type),
'key2': (value_in_1st_vol_type, value_in_2nd_vol_type),
...}
'encryption': {'cipher': (value_in_1st_vol_type, value_in_2nd_vol_type),
{'key_size': (value_in_1st_vol_type, value_in_2nd_vol_type),
...}
"""
def _fix_qos_specs(qos_specs):
if qos_specs:
qos_specs.pop('id', None)
qos_specs.pop('name', None)
qos_specs.update(qos_specs.pop('specs', {}))
def _fix_encryption_specs(encryption):
if encryption:
encryption = dict(encryption)
for param in ['volume_type_id', 'created_at', 'updated_at',
'deleted_at']:
encryption.pop(param, None)
return encryption
def _dict_diff(dict1, dict2):
res = {}
equal = True
if dict1 is None:
dict1 = {}
if dict2 is None:
dict2 = {}
for k, v in dict1.iteritems():
res[k] = (v, dict2.get(k))
if k not in dict2 or res[k][0] != res[k][1]:
equal = False
for k, v in dict2.iteritems():
res[k] = (dict1.get(k), v)
if k not in dict1 or res[k][0] != res[k][1]:
equal = False
return (res, equal)
all_equal = True
diff = {}
vol_type_data = []
for vol_type_id in (vol_type_id1, vol_type_id2):
if vol_type_id is None:
specs = {'extra_specs': None,
'qos_specs': None,
'encryption': None}
else:
specs = {}
vol_type = get_volume_type(context, vol_type_id)
specs['extra_specs'] = vol_type.get('extra_specs')
qos_specs = get_volume_type_qos_specs(vol_type_id)
specs['qos_specs'] = qos_specs.get('qos_specs')
_fix_qos_specs(specs['qos_specs'])
specs['encryption'] = get_volume_type_encryption(context,
vol_type_id)
specs['encryption'] = _fix_encryption_specs(specs['encryption'])
vol_type_data.append(specs)
diff['extra_specs'], equal = _dict_diff(vol_type_data[0]['extra_specs'],
vol_type_data[1]['extra_specs'])
if not equal:
all_equal = False
diff['qos_specs'], equal = _dict_diff(vol_type_data[0]['qos_specs'],
vol_type_data[1]['qos_specs'])
if not equal:
all_equal = False
diff['encryption'], equal = _dict_diff(vol_type_data[0]['encryption'],
vol_type_data[1]['encryption'])
if not equal:
all_equal = False
return (diff, all_equal)
| |
# vi: ts=4 sw=4
'''
:mod:`ophyd.controls.cas.function` - CAS Functions
==================================================
.. module:: ophyd.controls.cas.function
:synopsis: RPC-like functionality via channel access for Python functions
with a simple decorator
'''
from __future__ import print_function
import functools
import inspect
import logging
import time
from collections import OrderedDict
import epics
from .pv import CasPV
from .errors import casAsyncCompletion
from . import caServer
logger = logging.getLogger(__name__)
class CasFunction(object):
'''Channel Access Server function decorator
RPC-like functionality via channel access for Python functions
Parameters
----------
prefix : str, optional
The prefix to use (defaults to the function name)
server : caServer, optional
The channel access server to use (defaults to the currently running one,
or the next instantiated one if not specified)
async : bool, optional
Function should be called asynchronously, in its own thread (do not set
to False when doing large calculations or any blocking in the function)
failed_cb : callable, optional
When an exception is raised inside the function, `failed_cb` will be
called.
process_pv : str, optional
PV name for the Process PV, used to start the calculation
use_process : bool, optional
If True, process_pv is created. Otherwise, the function will be called
when each parameter is written to.
retval_pv : str, optional
Return value PV name
status_pv : str, optional
Status PV name
return_value : , optional
Default value for the return value
return_kwargs : , optional
Keyword arguments are passed to the return value CasPV initializer. You
can then specify `count`, `type_`, etc. here
'''
_to_attach = []
def __init__(self, prefix='', server=None,
async=True, failed_cb=None,
process_pv='Proc', use_process=True,
retval_pv='Val',
status_pv='Sts',
return_value=0.0,
**return_kwargs
):
if server is None and caServer.default_instance is not None:
server = caServer.default_instance
self._prefix = str(prefix)
self._server = server
self._async = bool(async)
self._functions = {}
self._failed_cb = failed_cb
self._async_threads = {}
self._process_pv = str(process_pv)
self._status_pv = str(status_pv)
self._use_process = bool(use_process)
self._retval_pv = str(retval_pv)
self._default_retval = return_value
self._return_kwargs = return_kwargs
if not self._use_process:
self._async = False
def attach_server(self, server):
'''Attach a channel access server instance'''
if self._server is not None:
raise ValueError('Server already attached')
self._server = server
for name in self._functions.keys():
try:
self._add_fcn(name)
except Exception as ex:
# print('Failed to add function: %s (%s)' % (name, ex))
logger.error('Failed to add function: %s (%s)' % (name, ex), exc_info=ex)
del self._functions[name]
if self in CasFunction._to_attach:
CasFunction._to_attach.remove(self)
def _add_fcn(self, name):
'''Add a function to the list being handled.
If a channel access server isn't attached yet, queue this instance to be
added at a later point.
'''
server = self._server
if server is None:
# The next caServer created will attach to these functions
if self not in CasFunction._to_attach:
CasFunction._to_attach.append(self)
return
fcn_prefix = self._prefix
if not fcn_prefix:
fcn_prefix = '%s:' % name
info = self._functions[name]
params = info['parameters']
defaults = info['defaults']
pv_kw = {}
if self._use_process:
proc_pv = CasPV(''.join((fcn_prefix, self._process_pv)), 0,
written_cb=info['wrapped'])
else:
pv_kw['written_cb'] = info['wrapped']
proc_pv = None
retval_pv = CasPV(''.join((fcn_prefix, self._retval_pv)),
self._default_retval,
**self._return_kwargs)
status_pv = CasPV(''.join((fcn_prefix, self._status_pv)),
'status')
param_pvs = [CasPV(''.join((fcn_prefix, param)),
default,
**pv_kw)
for param, default in zip(params, defaults)]
added = []
try:
for pv in param_pvs + [proc_pv, retval_pv, status_pv]:
if pv is not None:
server.add_pv(pv)
added.append(pv)
except Exception as ex:
logger.error('Failed to add function: %s (%s)' % (name, ex), exc_info=ex)
# If failed in adding any of the PVs, remove all that were added
for pv in added:
server.remove_pv(pv)
raise
info['process_pv'] = proc_pv
info['retval_pv'] = retval_pv
info['status_pv'] = status_pv
info['param_pvs'] = param_pvs
pv_dict = OrderedDict(zip(params, param_pvs))
pv_dict['retval'] = retval_pv
pv_dict['process'] = proc_pv
pv_dict['status'] = status_pv
info['param_dict'] = pv_dict
def _failed(self, name, msg, ex, kwargs):
'''Failure condition - since functions are called asynchronously in
background threads, a few options are given to the user for error
reporting. First, the status_pv for the corresponding function is
updated. If a failure callback was specified in the decorator, it will
then be called. If no failure callback is specified, the module logger
will be used.
'''
failed_cb = self._failed_cb
info = self._functions[name]
status_pv = info['status_pv']
if status_pv.server is not None:
try:
status_pv.value = msg
except:
pass
if failed_cb is not None:
try:
failed_cb(name=name, ex=ex, kwargs=kwargs)
except:
pass
else:
logger.error(msg, exc_info=ex)
def _run_function(self, name, **kwargs):
'''Run the function in this thread, with the kwargs passed'''
info = self._functions[name]
fcn = info['function']
kwargs = self.get_kwargs(name, **kwargs)
try:
ret = fcn(**kwargs)
except Exception as ex:
self._failed(name, '%s: %s (%s)' % (ex.__class__.__name__, ex, name),
ex, kwargs)
ret = None
try:
if ret is not None:
info['retval_pv'].value = ret
except Exception as ex:
self._failed(name, 'Retval: %s %s (%s)' % (ex.__class__.__name__, ex, name),
ex, kwargs)
if self._async and name in self._async_threads:
del self._async_threads[name]
info['process_pv'].async_done()
return ret
def _run_async(self, name, **kwargs):
'''Run a function asynchronously, in a separate thread'''
thread = epics.ca.CAThread(target=self._run_function,
args=(name, ), kwargs=kwargs)
self._async_threads[name] = thread
thread.start()
def get_kwargs(self, name, **override):
'''Get the keyword arguments to be passed to the function.
These come from the current values stored in the channel access server
process variables.
'''
info = self._functions[name]
pv_dict = info['param_dict']
parameters = info['parameters']
ret = dict((param, pv_dict[param].value)
for param in parameters)
ret.update(override)
return ret
def get_pv_instance(self, name, pv):
'''Grab a parameter's PV instance from a specific function, by name'''
if not self._server:
raise RuntimeError('Server not yet configured (i.e., no prefix yet)')
info = self._functions[name]
param_pvs = info['param_dict']
return param_pvs[pv]
def get_pvnames(self, name):
'''Get all PV names for a specific function in a dictionary:
{param: pvname}
'''
if not self._server:
raise RuntimeError('Server not yet configured (i.e., no prefix yet)')
info = self._functions[name]
ret = dict((param, pv.full_pvname)
for param, pv in info['param_dict'].items())
return ret
def __call__(self, fcn):
'''Wraps the function'''
@functools.wraps(fcn)
def wrapped_sync(**cas_kw):
# Block until async request finishes
while name in self._async_threads:
time.sleep(0.05)
return self._run_function(name)
@functools.wraps(fcn)
def wrapped_async(**cas_kw):
self._run_async(name)
raise casAsyncCompletion()
if self._async:
wrapped = wrapped_async
else:
wrapped = wrapped_sync
spec = inspect.getargspec(fcn)
args, var_args, var_kws, defaults = spec
if (args and len(args) != len(defaults)) or var_args:
raise ValueError('All arguments must have defaults')
name = fcn.__name__
if name in self._functions:
raise ValueError('Function already registered')
info = self._functions[name] = {}
if args:
parameters = list(zip(args, defaults))
else:
parameters = []
info['parameters'] = [param for param, default in parameters]
info['defaults'] = [default for param, default in parameters]
info['function'] = fcn
info['wrapped'] = wrapped
self._add_fcn(name)
wrapped_sync.wrapper = self
wrapped_async.wrapper = self
def get_pvnames():
return self.get_pvnames(name)
def get_pv(pv):
return self.get_pv_instance(name, pv)
wrapped_sync.get_pvnames = get_pvnames
wrapped_sync.get_pv = get_pv
return wrapped_sync
| |
# Copyright 2012 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime
from manila.api.contrib import services
from manila import context
from manila import db
from manila import exception
from manila.openstack.common import timeutils
from manila import policy
from manila import test
from manila.tests.api import fakes
fake_services_list = [{'binary': 'manila-scheduler',
'host': 'host1',
'availability_zone': 'manila',
'id': 1,
'disabled': True,
'updated_at': datetime(2012, 10, 29, 13, 42, 2),
'created_at': datetime(2012, 9, 18, 2, 46, 27)},
{'binary': 'manila-volume',
'host': 'host1',
'availability_zone': 'manila',
'id': 2,
'disabled': True,
'updated_at': datetime(2012, 10, 29, 13, 42, 5),
'created_at': datetime(2012, 9, 18, 2, 46, 27)},
{'binary': 'manila-scheduler',
'host': 'host2',
'availability_zone': 'manila',
'id': 3,
'disabled': False,
'updated_at': datetime(2012, 9, 19, 6, 55, 34),
'created_at': datetime(2012, 9, 18, 2, 46, 28)},
{'binary': 'manila-volume',
'host': 'host2',
'availability_zone': 'manila',
'id': 4,
'disabled': True,
'updated_at': datetime(2012, 9, 18, 8, 3, 38),
'created_at': datetime(2012, 9, 18, 2, 46, 28)},
]
class FakeRequest(object):
environ = {"manila.context": context.get_admin_context()}
GET = {}
class FakeRequestWithSevice(object):
environ = {"manila.context": context.get_admin_context()}
GET = {"service": "manila-volume"}
class FakeRequestWithHost(object):
environ = {"manila.context": context.get_admin_context()}
GET = {"host": "host1"}
class FakeRequestWithHostService(object):
environ = {"manila.context": context.get_admin_context()}
GET = {"host": "host1", "service": "manila-volume"}
def fake_servcie_get_all(context):
return fake_services_list
def fake_service_get_by_host_binary(context, host, binary):
for service in fake_services_list:
if service['host'] == host and service['binary'] == binary:
return service
return None
def fake_service_get_by_id(value):
for service in fake_services_list:
if service['id'] == value:
return service
return None
def fake_service_update(context, service_id, values):
service = fake_service_get_by_id(service_id)
if service is None:
raise exception.ServiceNotFound(service_id=service_id)
else:
{'host': 'host1', 'service': 'manila-volume',
'disabled': values['disabled']}
def fake_policy_enforce(context, action, target):
pass
def fake_utcnow():
return datetime(2012, 10, 29, 13, 42, 11)
class ServicesTest(test.TestCase):
def setUp(self):
super(ServicesTest, self).setUp()
self.stubs.Set(db, "service_get_all", fake_servcie_get_all)
self.stubs.Set(timeutils, "utcnow", fake_utcnow)
self.stubs.Set(db, "service_get_by_args",
fake_service_get_by_host_binary)
self.stubs.Set(db, "service_update", fake_service_update)
self.stubs.Set(policy, "enforce", fake_policy_enforce)
self.context = context.get_admin_context()
self.controller = services.ServiceController()
def tearDown(self):
super(ServicesTest, self).tearDown()
def test_services_list(self):
req = FakeRequest()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'manila-scheduler',
'host': 'host1', 'zone': 'manila',
'status': 'disabled', 'state': 'up',
'updated_at': datetime(2012, 10, 29, 13, 42, 2)},
{'binary': 'manila-volume',
'host': 'host1', 'zone': 'manila',
'status': 'disabled', 'state': 'up',
'updated_at': datetime(2012, 10, 29, 13, 42, 5)},
{'binary': 'manila-scheduler', 'host': 'host2',
'zone': 'manila',
'status': 'enabled', 'state': 'up',
'updated_at': datetime(2012, 9, 19, 6, 55, 34)},
{'binary': 'manila-volume', 'host': 'host2',
'zone': 'manila',
'status': 'disabled', 'state': 'up',
'updated_at': datetime(2012, 9, 18, 8, 3, 38)}]}
self.assertEqual(res_dict, response)
def test_services_list_with_host(self):
req = FakeRequestWithHost()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'manila-scheduler',
'host': 'host1',
'zone': 'manila',
'status': 'disabled', 'state': 'up',
'updated_at': datetime(2012, 10,
29, 13, 42, 2)},
{'binary': 'manila-volume', 'host': 'host1',
'zone': 'manila',
'status': 'disabled', 'state': 'up',
'updated_at': datetime(2012, 10, 29,
13, 42, 5)}]}
self.assertEqual(res_dict, response)
def test_services_list_with_service(self):
req = FakeRequestWithSevice()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'manila-volume',
'host': 'host1',
'zone': 'manila',
'status': 'disabled',
'state': 'up',
'updated_at': datetime(2012, 10, 29,
13, 42, 5)},
{'binary': 'manila-volume',
'host': 'host2',
'zone': 'manila',
'status': 'disabled',
'state': 'up',
'updated_at': datetime(2012, 9, 18,
8, 3, 38)}]}
self.assertEqual(res_dict, response)
def test_services_list_with_host_service(self):
req = FakeRequestWithHostService()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'manila-volume',
'host': 'host1',
'zone': 'manila',
'status': 'disabled',
'state': 'up',
'updated_at': datetime(2012, 10, 29,
13, 42, 5)}]}
self.assertEqual(res_dict, response)
def test_services_enable(self):
body = {'host': 'host1', 'service': 'manila-volume'}
req = fakes.HTTPRequest.blank('/v1/fake/os-services/enable')
res_dict = self.controller.update(req, "enable", body)
self.assertEqual(res_dict['disabled'], False)
def test_services_disable(self):
req = fakes.HTTPRequest.blank('/v1/fake/os-services/disable')
body = {'host': 'host1', 'service': 'manila-volume'}
res_dict = self.controller.update(req, "disable", body)
self.assertEqual(res_dict['disabled'], True)
| |
import os
import re
import sys
from django.template.loader import render_to_string
from django.conf import settings
from file_system import File
from subprocess import check_call, CalledProcessError
class TemplateProcessor:
@staticmethod
def process(resource):
try:
rendered = render_to_string(resource.source_file.path, settings.CONTEXT)
resource.source_file.write(rendered)
except:
print >> sys.stderr, \
"***********************\nError while rendering page %s\n***********************" % \
resource.url
raise
## aym-cms code refactored into processors.
class CleverCSS:
@staticmethod
def process(resource):
import clevercss
data = resource.source_file.read_all()
out = clevercss.convert(data)
out_file = File(resource.source_file.path_without_extension + ".css")
out_file.write(out)
resource.source_file.delete()
class HSS:
@staticmethod
def process(resource):
out_file = File(resource.source_file.path_without_extension + ".css")
hss = settings.HSS_PATH
if not hss or not os.path.exists(hss):
raise ValueError("HSS Processor cannot be found at [%s]" % hss)
try:
check_call([hss, resource.source_file.path, "-output", out_file.parent.path + '/'])
except CalledProcessError, e:
print 'Syntax Error when calling HSS Processor:', e
return None
resource.source_file.delete()
out_file.copy_to(resource.source_file.path)
out_file.delete()
class SASS:
@staticmethod
def process(resource):
out_file = File(resource.source_file.path_without_extension + ".css")
load_path = os.path.dirname(resource.file.path)
sass = settings.SASS_PATH
if not sass or not os.path.exists(sass):
raise ValueError("SASS Processor cannot be found at [%s]" % sass)
try:
check_call([sass, "-I", load_path, resource.source_file.path, out_file])
except CalledProcessError, e:
print 'Syntax Error when calling SASS Processor:', e
return None
resource.source_file.delete()
resource.source_file = out_file
class LessCSS:
@staticmethod
def process(resource):
out_file = File(resource.source_file.path_without_extension + ".css")
if not out_file.parent.exists:
out_file.parent.make()
less = settings.LESS_CSS_PATH
if not less or not os.path.exists(less):
raise ValueError("Less CSS Processor cannot be found at [%s]" % less)
try:
check_call([less, resource.source_file.path, out_file.path])
except CalledProcessError, e:
print 'Syntax Error when calling less:', e
else:
resource.source_file.delete()
"""
Assign our out_file as the source_file for this resource in order for
other processors to be able to correctly process this resource too.
This is needed because this processor changes the extension of the source file.
See bugreport at http://support.ringce.com/ringce/topics/lesscss_yuicompressor_fail_and_sitemap_generation_broken
"""
resource.source_file = out_file
if not out_file.exists:
print 'Error Occurred when processing with Less'
class CSSPrefixer:
@staticmethod
def process(resource):
import cssprefixer
data = resource.source_file.read_all()
out = cssprefixer.process(data, debug=False, minify=False)
resource.source_file.write(out)
class CSSmin:
@staticmethod
def process(resource):
import cssmin
data = resource.source_file.read_all()
out = cssmin.cssmin(data)
resource.source_file.write(out)
class CoffeeScript:
@staticmethod
def process(resource):
coffee = settings.COFFEE_PATH
if not coffee or not os.path.exists(coffee):
raise ValueError("CoffeeScript Processor cannot be found at [%s]" % coffee)
try:
check_call([coffee, "-b", "-c", resource.source_file.path])
except CalledProcessError, e:
print 'Syntax Error when calling CoffeeScript:', e
return None
resource.source_file.delete()
class JSmin:
@staticmethod
def process(resource):
import jsmin
data = resource.source_file.read_all()
out = jsmin.jsmin(data)
resource.source_file.write(out)
class YUICompressor:
@staticmethod
def process(resource):
if settings.YUI_COMPRESSOR == None:
return
compress = settings.YUI_COMPRESSOR
if not os.path.exists(compress):
compress = os.path.join(
os.path.dirname(
os.path.abspath(__file__)), "..", compress)
if not compress or not os.path.exists(compress):
raise ValueError(
"YUI Compressor cannot be found at [%s]" % compress)
tmp_file = File(resource.source_file.path + ".z-tmp")
try:
check_call(["java", "-jar", compress,
resource.source_file.path, "-o",
tmp_file.path])
except CalledProcessError, e:
print 'Syntax Error when calling YUI Compressor:', e
else:
resource.source_file.delete()
tmp_file.move_to(resource.source_file.path)
class ClosureCompiler:
@staticmethod
def process(resource):
compress = settings.CLOSURE_COMPILER
if not os.path.exists(compress):
compress = os.path.join(
os.path.dirname(
os.path.abspath(__file__)), "..", compress)
if not compress or not os.path.exists(compress):
raise ValueError(
"Closure Compiler cannot be found at [%s]" % compress)
tmp_file = File(resource.source_file.path + ".z-tmp")
try:
check_call(["java", "-jar", compress, "--js",
resource.source_file.path, "--js_output_file",
tmp_file.path])
except CalledProcessError, e:
print 'Syntax Error when calling Closure Compiler:', e
else:
resource.source_file.delete()
tmp_file.move_to(resource.source_file.path)
class Thumbnail:
@staticmethod
def process(resource):
from PIL import Image
i = Image.open(resource.source_file.path)
if i.mode != 'RGBA':
i = i.convert('RGBA')
i.thumbnail(
(settings.THUMBNAIL_MAX_WIDTH, settings.THUMBNAIL_MAX_HEIGHT),
Image.ANTIALIAS
)
orig_path, _, orig_extension = resource.source_file.path.rpartition('.')
if "THUMBNAIL_FILENAME_POSTFIX" in dir(settings):
postfix = settings.THUMBNAIL_FILENAME_POSTFIX
else:
postfix = "-thumb"
thumb_path = "%s%s.%s" % (orig_path, postfix, orig_extension)
if i.format == "JPEG" and "THUMBNAIL_JPEG_QUALITY" in dir(settings):
i.save(thumb_path, quality = settings.THUMBNAIL_JPEG_QUALITY, optimize = True)
else:
i.save(thumb_path)
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ======================================================================
# Copyright 2020 Julien LE CLEACH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================================================
import sys
import unittest
from unittest.mock import call, patch, Mock
from supvisors.tests.base import DummyHttpContext
class ViewHostAddressTest(unittest.TestCase):
""" Test case for the viewhostaddress module. """
def setUp(self):
""" Create a logger that stores log traces. """
# apply the forced inheritance done in supvisors.plugin
from supvisors.viewhandler import ViewHandler
from supervisor.web import StatusView
StatusView.__bases__ = (ViewHandler,)
# create the instance to be tested
from supvisors.viewhostaddress import HostAddressView
self.view = HostAddressView(DummyHttpContext('ui/hostaddress.html'))
def test_init(self):
""" Test the values set at construction. """
from supvisors.webutils import HOST_ADDRESS_PAGE
self.assertEqual(HOST_ADDRESS_PAGE, self.view.page_name)
@patch('supvisors.viewhostaddress.HostAddressView._write_io_image')
@patch('supvisors.viewhostaddress.HostAddressView._write_mem_image')
@patch('supvisors.viewhostaddress.HostAddressView._write_cpu_image')
@patch('supvisors.viewhostaddress.HostAddressView.write_network_statistics')
@patch('supvisors.viewhostaddress.HostAddressView.write_memory_statistics')
@patch('supvisors.viewhostaddress.HostAddressView.write_processor_statistics')
def test_write_contents(self, mocked_processor, mocked_memory, mocked_network,
mocked_cpu, mocked_mem, mocked_io):
""" Test the write_contents method. """
from supvisors import viewhostaddress
# set context (meant to be set through render)
dummy_stats = Mock(cpu='cpu', mem='mem', io='io')
self.view.view_ctx = Mock(**{'get_address_stats.return_value': dummy_stats}) # replace root structure
mocked_root = Mock()
# in first test, HAS_PLOT is False
viewhostaddress.HAS_PLOT = False
self.view.write_contents(mocked_root)
self.assertEqual([call(mocked_root, 'cpu')], mocked_processor.call_args_list)
self.assertEqual([call(mocked_root, 'mem')], mocked_memory.call_args_list)
self.assertEqual([call(mocked_root, 'io')], mocked_network.call_args_list)
self.assertFalse(mocked_cpu.called)
self.assertFalse(mocked_mem.called)
self.assertFalse(mocked_io.called)
# reset mocks
mocked_processor.reset_mock()
mocked_memory.reset_mock()
mocked_network.reset_mock()
# in second test, HAS_PLOT is True
viewhostaddress.HAS_PLOT = True
self.view.write_contents(mocked_root)
self.assertEqual([call(mocked_root, 'cpu')], mocked_processor.call_args_list)
self.assertEqual([call(mocked_root, 'mem')], mocked_memory.call_args_list)
self.assertEqual([call(mocked_root, 'io')], mocked_network.call_args_list)
self.assertEqual([call('cpu')], mocked_cpu.call_args_list)
self.assertEqual([call('mem')], mocked_mem.call_args_list)
self.assertEqual([call('io')], mocked_io.call_args_list)
def test_write_processor_single_title(self):
""" Test the _write_processor_single_title method. """
# set context (meant to be set through render)
self.view.view_ctx = Mock(**{'format_url.return_value': 'http://addr:port/index.html',
'cpu_id_to_string.return_value': '1'}) # replace root structure
mocked_title_mid = Mock(attrib={})
mocked_tr = Mock(**{'findmeld.return_value': mocked_title_mid})
# in first call, elt is not the selected element
self.view._write_processor_single_title(mocked_tr, 1, 0)
self.assertEqual([call('cpunum_a_mid')], mocked_tr.findmeld.call_args_list)
self.assertDictEqual({}, mocked_title_mid.attrib)
self.assertEqual([call(href='http://addr:port/index.html')], mocked_title_mid.attributes.call_args_list)
self.assertEqual([call('cpu#1')], mocked_title_mid.content.call_args_list)
mocked_tr.findmeld.reset_mock()
mocked_title_mid.attributes.reset_mock()
# in first call, elt is the selected element
self.view._write_processor_single_title(mocked_tr, 1, 1)
self.assertEqual([call('cpunum_a_mid')], mocked_tr.findmeld.call_args_list)
self.assertDictEqual({'class': 'button off active'}, mocked_title_mid.attrib)
self.assertFalse(mocked_title_mid.attributes.called)
self.assertEqual([call('cpu#1')], mocked_title_mid.content.call_args_list)
@patch('supvisors.viewhostaddress.HostAddressView._write_common_statistics')
def test_write_processor_single_statistics(self, mocked_common):
""" Test the _write_processor_single_statistics method. """
# replace root element
mocked_root = Mock()
# test method call
self.view._write_processor_single_statistics(mocked_root, [1.523, 2.456])
self.assertEqual([call(mocked_root, [1.523, 2.456], 'cpuval_td_mid', 'cpuavg_td_mid',
'cpuslope_td_mid', 'cpudev_td_mid')],
mocked_common.call_args_list)
@patch('supvisors.viewhostaddress.HostAddressView._write_processor_single_statistics')
@patch('supvisors.viewhostaddress.HostAddressView._write_processor_single_title')
def test_write_processor_statistics(self, mocked_title, mocked_stats):
""" Test the write_processor_statistics method. """
from supvisors.viewcontext import CPU
# set context (meant to be set through render)
self.view.view_ctx = Mock(parameters={CPU: 1})
# build root structure
mocked_trs = [Mock(attrib={}) for _ in range(2)]
mocked_mid = Mock(**{'repeat.return_value': [(mocked_trs[0], 'cpu stats 0'),
(mocked_trs[1], 'cpu stats 1')]})
mocked_root = Mock(**{'findmeld.return_value': mocked_mid})
# test call
self.view.write_processor_statistics(mocked_root, [])
self.assertEqual([call(mocked_trs[0], 1, 0), call(mocked_trs[1], 1, 1)],
mocked_title.call_args_list)
self.assertEqual([call(mocked_trs[0], 'cpu stats 0'), call(mocked_trs[1], 'cpu stats 1')],
mocked_stats.call_args_list)
self.assertDictEqual({'class': 'brightened'}, mocked_trs[0].attrib)
self.assertDictEqual({'class': 'shaded'}, mocked_trs[1].attrib)
@patch('supvisors.viewhostaddress.HostAddressView._write_common_statistics')
def test_write_memory_statistics(self, mocked_common):
""" Test the write_memory_statistics method. """
# replace root element
mocked_root = Mock()
# test method call
self.view.write_memory_statistics(mocked_root, [1.523, 2.456])
self.assertEqual([call(mocked_root, [1.523, 2.456], 'memval_td_mid', 'memavg_td_mid',
'memslope_td_mid', 'memdev_td_mid')],
mocked_common.call_args_list)
def test_write_network_single_title(self):
""" Test the _write_network_single_title method. """
# set context (meant to be set through render)
self.view.view_ctx = Mock(**{'format_url.return_value': 'http://addr:port/index.html'})
# replace root structure
mocked_href_mid = Mock(attrib={})
mocked_title_mid = Mock(attrib={}, **{'findmeld.return_value': mocked_href_mid})
mocked_tr = Mock(**{'findmeld.return_value': mocked_title_mid})
# in first call, elt is not the first line (rowspan False)
self.view._write_network_single_title(mocked_tr, 'eth0', 'lo', False, True)
self.assertEqual([call('intf_td_mid')], mocked_tr.findmeld.call_args_list)
self.assertDictEqual({}, mocked_title_mid.attrib)
self.assertFalse(mocked_title_mid.findmeld.called)
self.assertDictEqual({}, mocked_href_mid.attrib)
self.assertEqual([call('')], mocked_title_mid.replace.call_args_list)
mocked_tr.findmeld.reset_mock()
mocked_title_mid.replace.reset_mock()
# in second call, elt is the first line (rowspan True), shaded and is not the selected interface
self.view._write_network_single_title(mocked_tr, 'eth0', 'lo', True, True)
self.assertEqual([call('intf_td_mid')], mocked_tr.findmeld.call_args_list)
self.assertDictEqual({'class': 'shaded', 'rowspan': '2'}, mocked_title_mid.attrib)
self.assertEqual([call('intf_a_mid')], mocked_title_mid.findmeld.call_args_list)
self.assertDictEqual({}, mocked_href_mid.attrib)
self.assertEqual([call(href='http://addr:port/index.html')], mocked_href_mid.attributes.call_args_list)
self.assertFalse(mocked_title_mid.replace.called)
mocked_tr.findmeld.reset_mock()
mocked_title_mid.findmeld.reset_mock()
mocked_href_mid.attributes.reset_mock()
# in third call, elt is the first line (rowspan True), not shaded and is the selected interface
self.view._write_network_single_title(mocked_tr, 'lo', 'lo', True, False)
self.assertEqual([call('intf_td_mid')], mocked_tr.findmeld.call_args_list)
self.assertDictEqual({'class': 'brightened', 'rowspan': '2'}, mocked_title_mid.attrib)
self.assertEqual([call('intf_a_mid')], mocked_title_mid.findmeld.call_args_list)
self.assertDictEqual({'class': 'button off active'}, mocked_href_mid.attrib)
self.assertFalse(mocked_href_mid.attributes.called)
self.assertFalse(mocked_title_mid.replace.called)
@patch('supvisors.viewhostaddress.HostAddressView._write_common_statistics')
def test_write_network_single_statistics(self, mocked_common):
""" Test the _write_network_single_statistics method. """
# replace root structure
mocked_title_mid = Mock()
mocked_tr = Mock(**{'findmeld.return_value': mocked_title_mid})
# in first call, test no rate, slope and standard deviation
self.view._write_network_single_statistics(mocked_tr, [1.523, 2.456], False)
self.assertEqual([call('intfrxtx_td_mid')], mocked_tr.findmeld.call_args_list)
self.assertEqual([call('Tx')], mocked_title_mid.content.call_args_list)
self.assertEqual([call(mocked_tr, [1.523, 2.456], 'intfval_td_mid', 'intfavg_td_mid',
'intfslope_td_mid', 'intfdev_td_mid')],
mocked_common.call_args_list)
mocked_tr.reset_mock()
mocked_title_mid.content.reset_mock()
mocked_common.reset_mock()
# in second call, test no rate, slope and standard deviation
self.view._write_network_single_statistics(mocked_tr, [1.523, 2.456], True)
self.assertEqual([call('intfrxtx_td_mid')], mocked_tr.findmeld.call_args_list)
self.assertEqual([call('Rx')], mocked_title_mid.content.call_args_list)
self.assertEqual([call(mocked_tr, [1.523, 2.456], 'intfval_td_mid', 'intfavg_td_mid',
'intfslope_td_mid', 'intfdev_td_mid')],
mocked_common.call_args_list)
@patch('supvisors.viewhostaddress.HostAddressView._write_network_single_statistics')
@patch('supvisors.viewhostaddress.HostAddressView._write_network_single_title')
def test_write_network_statistics(self, mocked_title, mocked_stats):
""" Test the write_network_statistics method. """
from supvisors.viewcontext import INTF
# set context (meant to be set through render)
self.view.view_ctx = Mock(parameters={INTF: 'eth0'})
# build root structure
mocked_trs = [Mock(attrib={}) for _ in range(4)]
mocked_mid = Mock(**{'repeat.return_value': [(mocked_trs[0], ('lo', 'lo recv')),
(mocked_trs[1], ('lo', 'lo sent')),
(mocked_trs[2], ('eth0', 'eth0 recv')),
(mocked_trs[3], ('eth0', 'eth0 sent'))]})
mocked_root = Mock(**{'findmeld.return_value': mocked_mid})
# test method with dummy stats
dummy_stats = {'lo': ['lo recv', 'lo sent'], 'eth0': ['eth0 recv', 'eth0 sent']}
self.view.write_network_statistics(mocked_root, dummy_stats)
# check calls
self.assertEqual([call('intf_tr_mid')], mocked_root.findmeld.call_args_list)
self.assertEqual([call([('lo', 'lo recv'), ('lo', 'lo sent'),
('eth0', 'eth0 recv'), ('eth0', 'eth0 sent')])],
mocked_mid.repeat.call_args_list)
self.assertEqual('brightened', mocked_trs[0].attrib['class'])
self.assertEqual('brightened', mocked_trs[1].attrib['class'])
self.assertEqual('shaded', mocked_trs[2].attrib['class'])
self.assertEqual('shaded', mocked_trs[3].attrib['class'])
self.assertEqual([call(mocked_trs[0], 'eth0', 'lo', True, False),
call(mocked_trs[1], 'eth0', 'lo', False, False),
call(mocked_trs[2], 'eth0', 'eth0', True, True),
call(mocked_trs[3], 'eth0', 'eth0', False, True)],
mocked_title.call_args_list)
self.assertEqual([call(mocked_trs[0], 'lo recv', True),
call(mocked_trs[1], 'lo sent', False),
call(mocked_trs[2], 'eth0 recv', True),
call(mocked_trs[3], 'eth0 sent', False)],
mocked_stats.call_args_list)
@patch('supvisors.viewhostaddress.get_stats', side_effect=[(10.231, None, (None, 2), None),
(8.999, 2, (-1.1, 4), 5.72)])
@patch('supvisors.viewhostaddress.HostAddressView.set_slope_class')
def test_write_common_statistics(self, mocked_class, mocked_stats):
""" Test the _write_common_statistics method. """
# replace root structure
mocked_val_mid = Mock()
mocked_avg_mid = Mock()
mocked_slope_mid = Mock()
mocked_dev_mid = Mock()
mocked_tr = Mock(**{'findmeld.side_effect': [mocked_val_mid, mocked_avg_mid,
mocked_val_mid, mocked_avg_mid,
mocked_slope_mid, mocked_dev_mid]})
# in first call, test empty stats
self.view._write_common_statistics(mocked_tr, [], 'val_mid', 'avg_mid', 'slope_mid', 'dev_mid')
self.assertFalse(mocked_tr.findmeld.called)
self.assertFalse(mocked_stats.called)
self.assertFalse(mocked_class.called)
self.assertFalse(mocked_val_mid.called)
self.assertFalse(mocked_avg_mid.called)
self.assertFalse(mocked_slope_mid.called)
self.assertFalse(mocked_dev_mid.called)
# in second call, test no rate, slope and standard deviation
self.view._write_common_statistics(mocked_tr, [1.523, 2.456], 'val_mid', 'avg_mid', 'slope_mid', 'dev_mid')
self.assertEqual([call('val_mid'), call('avg_mid')], mocked_tr.findmeld.call_args_list)
self.assertEqual([call([1.523, 2.456])], mocked_stats.call_args_list)
self.assertFalse(mocked_class.called)
self.assertEqual([call('2.46')], mocked_val_mid.content.call_args_list)
self.assertEqual([call('10.23')], mocked_avg_mid.content.call_args_list)
self.assertFalse(mocked_slope_mid.called)
self.assertFalse(mocked_dev_mid.called)
mocked_stats.reset_mock()
mocked_val_mid.content.reset_mock()
mocked_avg_mid.content.reset_mock()
# in third call, test no rate, slope and standard deviation
self.view._write_common_statistics(mocked_tr, [1.523, 2.456], 'val_mid', 'avg_mid', 'slope_mid', 'dev_mid')
self.assertEqual([call([1.523, 2.456])], mocked_stats.call_args_list)
self.assertEqual([call(mocked_val_mid, 2)], mocked_class.call_args_list)
self.assertEqual([call('val_mid'), call('avg_mid'),
call('val_mid'), call('avg_mid'), call('slope_mid'), call('dev_mid')],
mocked_tr.findmeld.call_args_list)
self.assertEqual([call('2.46')], mocked_val_mid.content.call_args_list)
self.assertEqual([call('9.00')], mocked_avg_mid.content.call_args_list)
self.assertEqual([call('-1.10')], mocked_slope_mid.content.call_args_list)
self.assertEqual([call('5.72')], mocked_dev_mid.content.call_args_list)
@patch('supvisors.plot.StatisticsPlot.export_image')
@patch('supvisors.plot.StatisticsPlot.add_plot')
def test_write_cpu_image(self, mocked_add, mocked_export):
""" Test the _write_cpu_image method. """
from supvisors.viewcontext import ViewContext, CPU
from supvisors.viewimage import address_cpu_img
# set context (meant to be set through render)
self.view.view_ctx = Mock(parameters={CPU: 0},
**{'cpu_id_to_string.return_value': ViewContext.cpu_id_to_string(0)})
# just test calls to StatisticsPlot
dummy_stats = ['#all stats', '#0 stats', '#1 stats']
self.view._write_cpu_image(dummy_stats)
self.assertEqual([call('CPU #all', '%', '#all stats')], mocked_add.call_args_list)
self.assertEqual([call(address_cpu_img)], mocked_export.call_args_list)
@patch('supvisors.plot.StatisticsPlot.export_image')
@patch('supvisors.plot.StatisticsPlot.add_plot')
def test_write_mem_image(self, mocked_add, mocked_export):
""" Test the _write_mem_image method. """
from supvisors.viewimage import address_mem_img
# just test calls to StatisticsPlot
dummy_stats = ['mem 1', 'mem 2']
self.view._write_mem_image(dummy_stats)
self.assertEqual([call('MEM', '%', dummy_stats)], mocked_add.call_args_list)
self.assertEqual([call(address_mem_img)], mocked_export.call_args_list)
@patch('supvisors.plot.StatisticsPlot.export_image')
@patch('supvisors.plot.StatisticsPlot.add_plot')
def test_write_io_image(self, mocked_add, mocked_export):
""" Test the _write_io_image method. """
from supvisors.viewcontext import INTF
from supvisors.viewimage import address_io_img
# set context (meant to be set through render)
self.view.view_ctx = Mock(parameters={INTF: 'eth0'})
# just test calls to StatisticsPlot
dummy_stats = {'lo': ['lo recv', 'lo sent'], 'eth0': ['eth0 recv', 'eth0 sent']}
self.view._write_io_image(dummy_stats)
self.assertEqual([call('eth0 recv', 'kbits/s', 'eth0 recv'),
call('eth0 sent', 'kbits/s', 'eth0 sent')],
mocked_add.call_args_list)
self.assertEqual([call(address_io_img)], mocked_export.call_args_list)
def test_suite():
return unittest.findTestCases(sys.modules[__name__])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
# Python 2 and 3:
from six import with_metaclass
import os
from .modifiers import ModifierMixin
__all__ = ['Empty', 'BaseObject', 'Scad', 'Import']
INDENT = ' '
class MetaObject(type):
object_definition = {
# Bool
'union': ('union', (), True),
'difference': ('difference', (), True),
'intersection': ('intersection', (), True),
# Transforms
'translate': ('translate', ('v', ), True),
'rotate': ('rotate', ('a', 'v'), True),
'scale': ('scale', ('v', ), True),
'resize': ('resize', ('newsize', 'auto'), True),
'mirror': ('mirror', ('__axis', ), True),
'color': ('color', ('__color', 'a'), True),
'offset': ('offset', ('r', 'chamfer', '_fn'), True),
'minkowski': ('minkowski', (), True),
'hull': ('hull', (), True),
'linear_extrude': ('linear_extrude', ('height', 'center',
'convexity', 'twist', 'slices', 'scale'),
True),
'rotate_extrude': ('rotate_extrude', ('angle', 'convexity',
'_fn'), True),
# 2D
'circle': ('circle', ('r', 'd', '_fn'), False),
'square': ('square', ('size', 'center'), False),
'polygon': ('polygon', ('points', 'paths', 'convexity'), False),
'text': ('text',
('text', 'size', 'font', 'halign', 'valign', 'spacing',
'direction', 'language', 'script', '_fn'),
False),
# 3D
'sphere': ('sphere', ('r', 'd', 'center', '_fa', '_fs', '_fn'), False),
'cube': ('cube', ('size', 'center'), False),
'cylinder': ('cylinder',
('h', 'r', 'r1', 'r2', 'd', 'd1', 'd2',
'center', '_fa', '_fs', '_fn'),
False
),
'scad': ('scad', ('scadfile', 'version'), False),
'import': ('import', ('file', 'convexity'), False),
'surface': ('surface', ('file', 'center', 'invert', 'convexity'), False),
'polyhedron': ('polyhedron',
('points', 'triangles', 'faces', 'convexity'),
False)
}
def __new__(mcs, name, bases, attr):
if name[0] != '_':
definition = MetaObject.object_definition[name.lower()]
attr['_name'] = definition[0]
attr['_properties'] = definition[1]
for param in definition[1]:
attr[param] = None
attr['has_child'] = definition[2]
return type.__new__(mcs, name, bases, attr)
class _BaseObject(with_metaclass(MetaObject, ModifierMixin, object)):
def __init__(self, *args, **kwargs):
super(_BaseObject, self).__init__()
self.modules = list()
self._comment = None
for k, v in kwargs.items():
if hasattr(self.__class__, k):
setattr(self, k, v)
if len(args) > 0:
for i, k in enumerate(args):
setattr(self, self._properties[i], args[i])
self.children = []
def _retrieve_value(self, name):
val = getattr(self, name)
if val is None:
return None
try:
if isinstance(val, str):
return '"{}"'.format(val)
except Exception as e:
print(e)
if isinstance(val, str):
return '"{}"'.format(val)
return '{}'.format(val)
def _get_params(self, fp=None):
valid_keys = list(filter(lambda x: getattr(self, x) is not None, self._properties))
def is_no_keyword_args(arg_name):
if arg_name[0] == '_' and arg_name[1] == '_':
return True
return False
def is_keyword_args(arg_name):
return not is_no_keyword_args(arg_name)
def convert_special_args(arg_name):
if arg_name[0] == '_':
if arg_name[1] != '_':
return '$' + arg_name[1:]
return arg_name
def _get_attr(self, x, fp):
if x == 'scadfile':
scadfile = getattr(self, x)
def rename_scadfile(scadfile):
sf = ''.join(os.path.basename(scadfile).split('.')[:-1])
scadfile_renamed = sf.lower().strip('_').strip('-')
return(scadfile_renamed)
with open(scadfile) as f:
content = f.readlines()
content = ''.join(content).rstrip('\n')
sc = rename_scadfile(scadfile)
module = 'module {sc}() {{{content};}}\n'.format(**{'content': content, 'sc': sc})
module = module.replace(';;', ';')
self.modules.append(module)
if fp is not None:
fp.write(module)
content = '{}()'.format(sc)
return(content)
elif x == 'file':
content = getattr(self, x)
if not content.startswith('"'):
content = '"' + content
if not content.endswith('"'):
content = content + '"'
return(content)
else:
return(getattr(self, x))
args = ''
# no-keyword args
no_kw_args = list(filter(lambda x: is_no_keyword_args(x), valid_keys))
args += ' '.join(map(lambda x: '{},'.format(self._retrieve_value(x)), no_kw_args))[:-1]
# keyword args
kw_args = filter(lambda x: is_keyword_args(x), valid_keys)
args += ' '.join(map(lambda x: '{}={},'.format(convert_special_args(x), _get_attr(self, x, fp)), kw_args))[:-1]
args = args.replace('scadfile=', '')
return args
def _get_children_content(self, indent_level=0, fp=None):
_content = ''
if len(self.children) > 0:
for child in self.children:
_content += child.dumps(indent_level, fp)
return _content
def _get_content(self, indent_level=0, fp=None):
if len(self.children) == 0:
return ''
else:
return '{{\n{children}{indent}}}'.format(
children=self._get_children_content(indent_level + 1, fp=fp),
indent=INDENT * indent_level
)
def _is_2d(self):
'''Defaults to False if no children, else
returns true if all children are 2D. 2D shapes should override and return True.'''
if len(self.children) == 0:
return False
return all([i._is_2d() for i in self.children])
def _validate_append(self, obj):
"""Override if any validation in append operation is required.
:param obj:
"""
def append(self, obj):
if not self.has_child:
raise TypeError('This object can not have any children.')
else:
self._validate_append(obj)
if isinstance(obj, (list, tuple, set)):
for o in obj:
self.append(o)
else:
self.children.append(obj)
return self
def dump(self, fp):
dumps = self.dumps(fp=fp)
fp.write(dumps)
def dumps(self, indent_level=0, fp=None):
if self._name == 'scad':
return '{indent}{prefix}{params};{comment}\n'.format(
indent=INDENT * indent_level,
prefix=self.mod.get_prefix(),
params=self._get_params(fp).replace('True', 'true'),
comment='' if self._comment is None else ' // {}'.format(self._comment)
)
else:
return '{indent}{prefix}{op_name}({params}){content};{comment}\n'.format(
indent=INDENT * indent_level,
prefix=self.mod.get_prefix(),
op_name=self._name,
params=self._get_params(fp).replace('True', 'true'),
content=self._get_content(indent_level, fp=fp),
comment='' if self._comment is None else ' // {}'.format(self._comment)
)
def write(self, filename, with_print=False):
with open(filename, 'w') as fp:
self.dump(fp)
if with_print:
print(self.dumps())
def clone(self):
import copy
return copy.deepcopy(self)
def equals(self, other):
for prop in self._properties:
if self.__class__.__name__ != other.__class__.__name__:
return False
if getattr(self, prop) != getattr(other, prop):
return False
return True
def __str__(self):
return self.dumps()
def __add__(self, other):
from .boolean import Union
if isinstance(self, _Empty):
return other.clone()
elif isinstance(self, Union):
cloned = self.clone()
cloned.append(other)
return cloned
else:
return Union().append(self).append(other)
def __sub__(self, other):
from .boolean import Difference
if isinstance(self, _Empty):
return other.clone()
elif isinstance(self, Difference):
cloned = self.clone()
cloned.append(other)
return cloned
else:
return Difference().append(self).append(other)
def __and__(self, other):
from .boolean import Intersection
if isinstance(self, _Empty):
return other.clone()
elif isinstance(self, Intersection):
cloned = self.clone()
cloned.append(other)
return cloned
else:
return Intersection().append(self).append(other)
def translate(self, *args, **kwargs):
from .transformations import Translate
return Translate(*args, **kwargs).append(self)
def rotate(self, *args, **kwargs):
from .transformations import Rotate
return Rotate(*args, **kwargs).append(self)
def scale(self, *args, **kwargs):
from .transformations import Scale
return Scale(*args, **kwargs).append(self)
def resize(self, *args, **kwargs):
from .transformations import Resize
return Resize(*args, **kwargs).append(self)
def mirror(self, *args, **kwargs):
from .transformations import Mirror
return Mirror(*args, **kwargs).append(self)
def color(self, *args, **kwargs):
from .transformations import Color
return Color(*args, **kwargs).append(self)
def offset(self, *args, **kwargs):
from .transformations import Offset
return Offset(*args, **kwargs).append(self)
def minkowski(self, *args, **kwargs):
from .transformations import Minkowski
return Minkowski(*args, **kwargs).append(self)
def hull(self, *args, **kwargs):
from .transformations import Hull
return Hull(*args, **kwargs).append(self)
def linear_extrude(self, *args, **kwargs):
from .transformations import Linear_Extrude
return Linear_Extrude(*args, **kwargs).append(self)
def rotate_extrude(self, *args, **kwargs):
from .transformations import Rotate_Extrude
return Rotate_Extrude(*args, **kwargs).append(self)
def comment(self, comment):
self._comment = comment
return self
BaseObject = _BaseObject
class _Empty(_BaseObject):
pass
Empty = _Empty
class Scad(_BaseObject):
pass
class Import(_BaseObject):
pass
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class SubnetsOperations(object):
"""SubnetsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2016-12-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-12-01"
self.config = config
def _delete_initial(
self, resource_group_name, virtual_network_name, subnet_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, virtual_network_name, subnet_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified subnet.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'}
def get(
self, resource_group_name, virtual_network_name, subnet_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets the specified subnet by virtual network and resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:param expand: Expands referenced resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Subnet or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2016_12_01.models.Subnet or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Subnet', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'}
def _create_or_update_initial(
self, resource_group_name, virtual_network_name, subnet_name, subnet_parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(subnet_parameters, 'Subnet')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Subnet', response)
if response.status_code == 201:
deserialized = self._deserialize('Subnet', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, virtual_network_name, subnet_name, subnet_parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates a subnet in the specified virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:param subnet_parameters: Parameters supplied to the create or update
subnet operation.
:type subnet_parameters: ~azure.mgmt.network.v2016_12_01.models.Subnet
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns Subnet or
ClientRawResponse<Subnet> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2016_12_01.models.Subnet]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2016_12_01.models.Subnet]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
subnet_parameters=subnet_parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('Subnet', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'}
def list(
self, resource_group_name, virtual_network_name, custom_headers=None, raw=False, **operation_config):
"""Gets all subnets in a virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Subnet
:rtype:
~azure.mgmt.network.v2016_12_01.models.SubnetPaged[~azure.mgmt.network.v2016_12_01.models.Subnet]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.SubnetPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.SubnetPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets'}
| |
#!/usr/bin/env python
"""
Copyright 2010-2017 University Of Southern California
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Python version of Ronnie Kamai Matlab scripts to generate a combined
bias plot. It collects information from the rd100 files for each
realization, groups the results by station (averaging) and then
generates a residuals file using the rd100 files from the recorded
data. This single residuals file uses the same resid2uncer_varN
program used in single bias plots to generate data for the combined
plot.
"""
# Import Python modules
import os
import sys
import glob
import numpy
import shutil
import optparse
import tempfile
# Import Broadband modules
import bband_utils
from PlotGOF import PlotGoF
from station_list import StationList
# Import Pynga and its utilities
import pynga.utils as putils
# --------------------------------------------------------------------------
# Functions
# --------------------------------------------------------------------------
def summarize_rotd50(tmpdir, outdir, combined_resid_file,
comp_label, num_stations, num_realization,
codebase):
"""
This function summarizes all rotd100 data and creates the combined
rotd100/rotd50 ratio bias plot
"""
# Figure out where out binaries are
if "BBP_DIR" in os.environ:
install_root = os.path.normpath(os.environ["BBP_DIR"])
else:
raise bband_utils.ProcessingError("BBP_DIR is not set!")
gp_bin_dir = os.path.join(install_root, "src", "gp", "bin")
logfile = os.path.join(tmpdir, "log.txt")
for comp in ['rotd50', 'rotd100', 'ratio']:
# Build paths and check lengths
fileroot = os.path.join(tmpdir, "%s-%s-combined-rd100-%s" %
(codebase, comp_label, comp))
bband_utils.check_path_lengths([combined_resid_file, fileroot],
bband_utils.GP_MAX_FILENAME)
cmd = ("%s/resid2uncer_varN " % (gp_bin_dir) +
"residfile=%s fileroot=%s " % (combined_resid_file, fileroot) +
"comp=%s nstat=%d nper=63 " % (comp, num_stations) +
" >> %s 2>&1" % (logfile))
bband_utils.runprog(cmd, abort_on_error=True)
plottitle = ("Combined GOF Plot for %s\n%d Realizations\n%s Method" %
(comp_label, num_realization, codebase.upper()))
fileroot = "%s-%s-combined-rd100" % (codebase, comp_label)
plotter = PlotGoF()
plotter.plot(plottitle, fileroot, tmpdir, outdir,
cutoff=0, mode="rd100", colorset="combined")
print "Stations used: %s" % (num_stations)
def combine_station_data(station, input_dir, temp_dir):
"""
This function combines data for a given station across multiple
realizations, writting a single output file in temp_dir
"""
data = {}
# Get realizations
realizations = sorted(os.listdir(input_dir))
for realization in realizations:
basedir = os.path.join(input_dir, realization, "validations",
"baker_rd100")
data_file = glob.glob("%s%s%s.%s.rd100" % (basedir, os.sep,
realization,
station))
if len(data_file) != 1:
raise bband_utils.ProcessingError("Data for station %s " %
(station) +
"not found for "
"realization %s!" %
(realization))
data_file = data_file[0]
in_data = open(data_file, 'r')
for line in in_data:
line = line.strip()
# Skip comments
if line.startswith("#"):
continue
pieces = line.split()
pieces = [float(piece) for piece in pieces]
key = pieces[0]
pieces = pieces[3:]
if not key in data:
# Key is new to dictionary
empty_set = [[] for _ in pieces]
data[key] = empty_set
for idx, value in enumerate(pieces):
data[key][idx].append(value)
in_data.close()
# Now, write the output file
out_file = open((os.path.join(temp_dir, "%s.rd100" % (station))), 'w')
keys = sorted(data.keys())
for key in keys:
out_file.write("%10.4f" % (key))
for comp in data[key]:
out_file.write(" %10.5e" % (numpy.mean(comp)))
out_file.write("\n")
def combine_realizations_data(input_dir, temp_dir):
"""
This function creates a single file averaging the rd100 files for
each of the stations across all realizations
"""
# Get realizations
realizations = sorted(os.listdir(input_dir))
one_realization = realizations[0]
basedir = os.path.join(input_dir, one_realization,
"validations", "baker_rd100")
# Figure out what our stations are
rd100_files = glob.glob("%s%s%s.*.rd100" % (basedir,
os.sep,
one_realization))
rd100_files = [os.path.basename(each_file) for each_file in rd100_files]
stations = [station.split(".")[1] for station in rd100_files]
# Capture event_label
bias_file = glob.glob("%s%s*.bias" % (basedir, os.sep))
if len(bias_file) < 1:
raise bband_utils.ProcessingError("Cannot find event label!")
bias_file = bias_file[0]
# Let's capture the event label
event_label = os.path.basename(bias_file).split("-")[0]
# Now walk through all realizations and combine stations data
for station in stations:
print "working on station: %s" % (station)
combine_station_data(station, input_dir, temp_dir)
return event_label, len(realizations), len(stations)
def trim_rd100_file(a_input_file, a_output_file):
"""
Trims columns 2 and 3 of the input file so that the output file
contains only the following: period, rotd50, rotd100, ratio
"""
# Open input and output files
in_file = open(a_input_file, 'r')
out_file = open(a_output_file, 'w')
for line in in_file:
line = line.strip()
# Skip comments
if line.startswith('#'):
out_file.write("%s\n" % (line))
continue
pieces = line.split()
pieces = pieces[0:1] + pieces[3:]
out_file.write(" %s\n" % (" ".join(pieces)))
# Close everything
in_file.close()
out_file.close()
def create_resid_data_file(comp_label, input_indir, input_obsdir,
combined_file, temp_dir):
"""
This function creates a file containing the combined residuals
from the simulation data from all stations
"""
# Copy header for first file, set logfile
copy_header = 1
logfile = os.path.join(temp_dir, "log.txt")
# Figure out where out binaries are
if "BBP_DIR" in os.environ:
install_root = os.path.normpath(os.environ["BBP_DIR"])
else:
raise bband_utils.ProcessingError("BBP_DIR is not set!")
gp_bin_dir = os.path.join(install_root, "src", "gp", "bin")
# Get realizations
realizations = sorted(os.listdir(input_indir))
one_realization = realizations[0]
basedir = os.path.join(input_indir, one_realization)
# Get the station list
a_statfile = glob.glob("%s%s*.stl" % (basedir, os.sep))
if len(a_statfile) != 1:
raise bband_utils.ProcessingError("Cannot get station list!")
a_statfile = a_statfile[0]
slo = StationList(a_statfile)
site_list = slo.getStationList()
# Get source file
a_srcfile = glob.glob("%s%s*.src" % (basedir, os.sep))
if len(a_srcfile) != 1:
raise bband_utils.ProcessingError("Cannot get src file!")
a_srcfile = a_srcfile[0]
# Parse it!
src_keys = bband_utils.parse_src_file(a_srcfile)
# Get the obsdir
realizations = sorted(os.listdir(input_obsdir))
one_realization = realizations[0]
obs_dir = os.path.join(input_obsdir, one_realization,
"validations", "baker_rd100")
obsfile = os.path.join(temp_dir, "rd100_obs.txt")
# Go through all stations
for site in site_list:
slon = float(site.lon)
slat = float(site.lat)
stat = site.scode
# Calculate Rrup
origin = (src_keys['lon_top_center'],
src_keys['lat_top_center'])
dims = (src_keys['fault_length'], src_keys['dlen'],
src_keys['fault_width'], src_keys['dwid'],
src_keys['depth_to_top'])
mech = (src_keys['strike'], src_keys['dip'],
src_keys['rake'])
site_geom = [float(site.lon), float(site.lat), 0.0]
(fault_trace1, up_seis_depth,
low_seis_depth, ave_dip,
dummy1, dummy2) = putils.FaultTraceGen(origin, dims, mech)
_, rrup, _ = putils.DistanceToSimpleFaultSurface(site_geom,
fault_trace1,
up_seis_depth,
low_seis_depth,
ave_dip)
# Trim observation file
trim_rd100_file(os.path.join(obs_dir,
"%s.rd100" % (stat)),
obsfile)
simfile1 = os.path.join(temp_dir, "%s.rd100" % (stat))
cmd = ("%s bbp_format=1 " %
(os.path.join(gp_bin_dir, "gen_resid_tbl_3comp")) +
"datafile1=%s simfile1=%s " % (obsfile, simfile1) +
"comp1=rotd50 comp2=rotd100 comp3=ratio " +
"eqname=%s mag=0.0 stat=%s lon=%.4f lat=%.4f " %
(comp_label, stat, slon, slat) +
"vs30=%d cd=%.2f " % (site.vs30, rrup) +
"flo=%f fhi=%f " % (site.low_freq_corner,
site.high_freq_corner) +
"print_header=%d >> %s 2>> %s" %
(copy_header, combined_file, logfile))
bband_utils.runprog(cmd, abort_on_error=True)
if copy_header == 1:
copy_header = 0
# --------------------------------------------------------------------------
# Main
# --------------------------------------------------------------------------
PARSER = optparse.OptionParser()
PARSER.add_option("-d", "--dir", dest="input_dir",
help="Input directory containing simulation results")
PARSER.add_option("-o", "--output_dir", dest="output_dir",
help="Output file")
PARSER.add_option("-c", "--codebase", dest="codebase",
help="Method used for the simulation")
(OPTIONS, ARGS) = PARSER.parse_args()
if OPTIONS.input_dir is None:
PARSER.error("Please specify the input directory!")
TOP_INPUT_DIR = OPTIONS.input_dir
if not os.path.isdir(TOP_INPUT_DIR):
PARSER.error("Invalid input directory!")
if not "Sims" in os.listdir(TOP_INPUT_DIR):
PARSER.error("Please provide the top-level simulation directory!\n"
"This is the directory given to the cluster script")
INPUT_OUTDIR = os.path.join(TOP_INPUT_DIR, "Sims" , "outdata")
INPUT_TMPDIR = os.path.join(TOP_INPUT_DIR, "Sims" , "tmpdata")
INPUT_INDIR = os.path.join(TOP_INPUT_DIR, "Sims" , "indata")
if OPTIONS.output_dir is None:
PARSER.error("error specify output directory!")
else:
OUTPUT_DIR = OPTIONS.output_dir
if not os.path.isdir(OUTPUT_DIR):
PARSER.error("Invalid output directory!")
if OPTIONS.codebase is None:
PARSER.error("Please specify codebase!")
# Create temp dir
TMPDIR = tempfile.mkdtemp(prefix="bbp-")
COMBINED_FILE = os.path.join(TMPDIR,
"bbp-rd50-resid-combined.txt")
# Combine realizations' data
(COMP_LABEL,
NUM_REALIZATIONS,
NUM_STAT) = combine_realizations_data(INPUT_OUTDIR,
TMPDIR)
# Create data files with both gmpe and simulation data
create_resid_data_file(COMP_LABEL, INPUT_INDIR, INPUT_OUTDIR,
COMBINED_FILE, TMPDIR)
summarize_rotd50(TMPDIR, OUTPUT_DIR,
COMBINED_FILE,
COMP_LABEL,
NUM_STAT,
NUM_REALIZATIONS,
OPTIONS.codebase)
print "All Done!"
# Clean-up, all done!
shutil.rmtree(TMPDIR)
| |
"""
Support for Nest thermostats.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.nest/
"""
import logging
import voluptuous as vol
from homeassistant.components.nest import DATA_NEST
from homeassistant.components.climate import (
STATE_AUTO, STATE_COOL, STATE_HEAT, ClimateDevice,
PLATFORM_SCHEMA, ATTR_TARGET_TEMP_HIGH, ATTR_TARGET_TEMP_LOW,
ATTR_TEMPERATURE)
from homeassistant.const import (
TEMP_CELSIUS, TEMP_FAHRENHEIT,
CONF_SCAN_INTERVAL, STATE_ON, STATE_OFF, STATE_UNKNOWN)
DEPENDENCIES = ['nest']
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_SCAN_INTERVAL):
vol.All(vol.Coerce(int), vol.Range(min=1)),
})
STATE_ECO = 'eco'
STATE_HEAT_COOL = 'heat-cool'
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Nest thermostat."""
if discovery_info is None:
return
_LOGGER.debug("Setting up nest thermostat")
temp_unit = hass.config.units.temperature_unit
add_devices(
[NestThermostat(structure, device, temp_unit)
for structure, device in hass.data[DATA_NEST].devices()],
True
)
class NestThermostat(ClimateDevice):
"""Representation of a Nest thermostat."""
def __init__(self, structure, device, temp_unit):
"""Initialize the thermostat."""
self._unit = temp_unit
self.structure = structure
self.device = device
self._fan_list = [STATE_ON, STATE_AUTO]
# Not all nest devices support cooling and heating remove unused
self._operation_list = [STATE_OFF]
# Add supported nest thermostat features
if self.device.can_heat:
self._operation_list.append(STATE_HEAT)
if self.device.can_cool:
self._operation_list.append(STATE_COOL)
if self.device.can_heat and self.device.can_cool:
self._operation_list.append(STATE_AUTO)
self._operation_list.append(STATE_ECO)
# feature of device
self._has_fan = self.device.has_fan
# data attributes
self._away = None
self._location = None
self._name = None
self._humidity = None
self._target_temperature = None
self._temperature = None
self._temperature_scale = None
self._mode = None
self._fan = None
self._eco_temperature = None
self._is_locked = None
self._locked_temperature = None
self._min_temperature = None
self._max_temperature = None
@property
def name(self):
"""Return the name of the nest, if any."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return self._temperature_scale
@property
def current_temperature(self):
"""Return the current temperature."""
return self._temperature
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
if self._mode in [STATE_HEAT, STATE_COOL, STATE_OFF, STATE_ECO]:
return self._mode
elif self._mode == STATE_HEAT_COOL:
return STATE_AUTO
else:
return STATE_UNKNOWN
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
if self._mode != STATE_HEAT_COOL and not self.is_away_mode_on:
return self._target_temperature
else:
return None
@property
def target_temperature_low(self):
"""Return the lower bound temperature we try to reach."""
if (self.is_away_mode_on or self._mode == STATE_ECO) and \
self._eco_temperature[0]:
# eco_temperature is always a low, high tuple
return self._eco_temperature[0]
if self._mode == STATE_HEAT_COOL:
return self._target_temperature[0]
else:
return None
@property
def target_temperature_high(self):
"""Return the upper bound temperature we try to reach."""
if (self.is_away_mode_on or self._mode == STATE_ECO) and \
self._eco_temperature[1]:
# eco_temperature is always a low, high tuple
return self._eco_temperature[1]
if self._mode == STATE_HEAT_COOL:
return self._target_temperature[1]
else:
return None
@property
def is_away_mode_on(self):
"""Return if away mode is on."""
return self._away
def set_temperature(self, **kwargs):
"""Set new target temperature."""
target_temp_low = kwargs.get(ATTR_TARGET_TEMP_LOW)
target_temp_high = kwargs.get(ATTR_TARGET_TEMP_HIGH)
if target_temp_low is not None and target_temp_high is not None:
if self._mode == STATE_HEAT_COOL:
temp = (target_temp_low, target_temp_high)
else:
temp = kwargs.get(ATTR_TEMPERATURE)
_LOGGER.debug("Nest set_temperature-output-value=%s", temp)
self.device.target = temp
def set_operation_mode(self, operation_mode):
"""Set operation mode."""
if operation_mode in [STATE_HEAT, STATE_COOL, STATE_OFF, STATE_ECO]:
device_mode = operation_mode
elif operation_mode == STATE_AUTO:
device_mode = STATE_HEAT_COOL
self.device.mode = device_mode
@property
def operation_list(self):
"""List of available operation modes."""
return self._operation_list
def turn_away_mode_on(self):
"""Turn away on."""
self.structure.away = True
def turn_away_mode_off(self):
"""Turn away off."""
self.structure.away = False
@property
def current_fan_mode(self):
"""Return whether the fan is on."""
if self._has_fan:
# Return whether the fan is on
return STATE_ON if self._fan else STATE_AUTO
else:
# No Fan available so disable slider
return None
@property
def fan_list(self):
"""List of available fan modes."""
return self._fan_list
def set_fan_mode(self, fan):
"""Turn fan on/off."""
self.device.fan = fan.lower()
@property
def min_temp(self):
"""Identify min_temp in Nest API or defaults if not available."""
return self._min_temperature
@property
def max_temp(self):
"""Identify max_temp in Nest API or defaults if not available."""
return self._max_temperature
def update(self):
"""Cache value from Python-nest."""
self._location = self.device.where
self._name = self.device.name
self._humidity = self.device.humidity,
self._temperature = self.device.temperature
self._mode = self.device.mode
self._target_temperature = self.device.target
self._fan = self.device.fan
self._away = self.structure.away == 'away'
self._eco_temperature = self.device.eco_temperature
self._locked_temperature = self.device.locked_temperature
self._min_temperature = self.device.min_temperature
self._max_temperature = self.device.max_temperature
self._is_locked = self.device.is_locked
if self.device.temperature_scale == 'C':
self._temperature_scale = TEMP_CELSIUS
else:
self._temperature_scale = TEMP_FAHRENHEIT
| |
# -*- coding: utf-8 -*-
'''
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use mailgun to send emails
- Use Redis on Heroku
- Use sentry for error logging
'''
from __future__ import absolute_import, unicode_literals
from boto.s3.connection import OrdinaryCallingFormat
from django.utils import six
import logging
from .common import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env("DJANGO_SECRET_KEY")
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# django-secure
# ------------------------------------------------------------------------------
INSTALLED_APPS += ("djangosecure", )
# raven sentry client
# See https://docs.getsentry.com/hosted/clients/python/integrations/django/
INSTALLED_APPS += ('raven.contrib.django.raven_compat', )
SECURITY_MIDDLEWARE = (
'djangosecure.middleware.SecurityMiddleware',
)
RAVEN_MIDDLEWARE = ('raven.contrib.django.raven_compat.middleware.Sentry404CatchMiddleware',
'raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware',)
MIDDLEWARE_CLASSES = SECURITY_MIDDLEWARE + \
RAVEN_MIDDLEWARE + MIDDLEWARE_CLASSES
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True)
SECURE_FRAME_DENY = env.bool("DJANGO_SECURE_FRAME_DENY", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['mkalalive5.ru'])
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat()
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
AWS_HEADERS = {
'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY))
}
# URL that handles the media served from MEDIA_ROOT, used for managing
# stored files.
MEDIA_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
# Static Assets
# ------------------------
STATICFILES_STORAGE = DEFAULT_FILE_STORAGE
STATIC_URL = MEDIA_URL
# See: https://github.com/antonagestam/collectfast
# For Django 1.7+, 'collectfast' should come before
# 'django.contrib.staticfiles'
AWS_PRELOAD_METADATA = True
INSTALLED_APPS = ('collectfast', ) + INSTALLED_APPS
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='mkala_live <noreply@mkalalive5.ru>')
EMAIL_BACKEND = 'django_mailgun.MailgunBackend'
MAILGUN_ACCESS_KEY = env('DJANGO_MAILGUN_API_KEY')
MAILGUN_SERVER_NAME = env('DJANGO_MAILGUN_SERVER_NAME')
EMAIL_SUBJECT_PREFIX = env("DJANGO_EMAIL_SUBJECT_PREFIX", default='[mkala_live] ')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db("DATABASE_URL")
# CACHING
# ------------------------------------------------------------------------------
# Heroku URL does not pass the DB number, so we parse it in
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "{0}/{1}".format(env.cache_url('REDIS_URL', default="redis://127.0.0.1:6379"), 0),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"IGNORE_EXCEPTIONS": True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
}
}
}
# Sentry Configuration
SENTRY_DSN = env('DJANGO_SENTRY_DSN')
SENTRY_CLIENT = env('DJANGO_SENTRY_CLIENT', default='raven.contrib.django.raven_compat.DjangoClient')
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry'],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'sentry'],
'propagate': False,
},
},
}
SENTRY_CELERY_LOGLEVEL = env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO)
RAVEN_CONFIG = {
'CELERY_LOGLEVEL': env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO),
'DSN': SENTRY_DSN
}
# Custom Admin URL, use {% url 'admin:index' %}
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Your production stuff: Below this line define 3rd party library settings
| |
import glob
import imp
import os
import pkgutil
import re
import sys
import tarfile
from . import *
PY3 = sys.version_info[0] == 3
if PY3:
_text_type = str
else:
_text_type = unicode
_DEV_VERSION_RE = re.compile(r'\d+\.\d+(?:\.\d+)?\.dev(\d+)')
TEST_VERSION_SETUP_PY = """\
#!/usr/bin/env python
from setuptools import setup
NAME = '_eva_'
VERSION = {version!r}
RELEASE = 'dev' not in VERSION
from astropy_helpers.git_helpers import get_git_devstr
from astropy_helpers.version_helpers import generate_version_py
if not RELEASE:
VERSION += get_git_devstr(False)
generate_version_py(NAME, VERSION, RELEASE, False, uses_git=not RELEASE)
setup(name=NAME, version=VERSION, packages=['_eva_'])
"""
TEST_VERSION_INIT = """\
try:
from .version import version as __version__
from .version import githash as __githash__
except ImportError:
__version__ = __githash__ = ''
"""
@pytest.fixture
def version_test_package(tmpdir, request):
def make_test_package(version='42.42.dev'):
test_package = tmpdir.mkdir('test_package')
test_package.join('setup.py').write(
TEST_VERSION_SETUP_PY.format(version=version))
test_package.mkdir('_eva_').join('__init__.py').write(TEST_VERSION_INIT)
with test_package.as_cwd():
run_cmd('git', ['init'])
run_cmd('git', ['add', '--all'])
run_cmd('git', ['commit', '-m', 'test package'])
if '' in sys.path:
sys.path.remove('')
sys.path.insert(0, '')
def finalize():
cleanup_import('_eva_')
request.addfinalizer(finalize)
return test_package
return make_test_package
def test_update_git_devstr(version_test_package, capsys):
"""Tests that the commit number in the package's version string updates
after git commits even without re-running setup.py.
"""
# We have to call version_test_package to actually create the package
test_pkg = version_test_package()
with test_pkg.as_cwd():
run_setup('setup.py', ['--version'])
stdout, stderr = capsys.readouterr()
version = stdout.strip()
m = _DEV_VERSION_RE.match(version)
assert m, (
"Stdout did not match the version string pattern:"
"\n\n{0}\n\nStderr:\n\n{1}".format(stdout, stderr))
revcount = int(m.group(1))
import _eva_
assert _eva_.__version__ == version
# Make a silly git commit
with open('.test', 'w'):
pass
run_cmd('git', ['add', '.test'])
run_cmd('git', ['commit', '-m', 'test'])
import _eva_.version
imp.reload(_eva_.version)
# Previously this checked packagename.__version__, but in order for that to
# be updated we also have to re-import _astropy_init which could be tricky.
# Checking directly that the packagename.version module was updated is
# sufficient:
m = _DEV_VERSION_RE.match(_eva_.version.version)
assert m
assert int(m.group(1)) == revcount + 1
# This doesn't test astropy_helpers.get_helpers.update_git_devstr directly
# since a copy of that function is made in packagename.version (so that it
# can work without astropy_helpers installed). In order to get test
# coverage on the actual astropy_helpers copy of that function just call it
# directly and compare to the value in packagename
from astropy_helpers.git_helpers import update_git_devstr
newversion = update_git_devstr(version, path=str(test_pkg))
assert newversion == _eva_.version.version
def test_version_update_in_other_repos(version_test_package, tmpdir):
"""
Regression test for https://github.com/astropy/astropy-helpers/issues/114
and for https://github.com/astropy/astropy-helpers/issues/107
"""
test_pkg = version_test_package()
with test_pkg.as_cwd():
run_setup('setup.py', ['build'])
# Add the path to the test package to sys.path for now
sys.path.insert(0, str(test_pkg))
try:
import _eva_
m = _DEV_VERSION_RE.match(_eva_.__version__)
assert m
correct_revcount = int(m.group(1))
with tmpdir.as_cwd():
testrepo = tmpdir.mkdir('testrepo')
testrepo.chdir()
# Create an empty git repo
run_cmd('git', ['init'])
import _eva_.version
imp.reload(_eva_.version)
m = _DEV_VERSION_RE.match(_eva_.version.version)
assert m
assert int(m.group(1)) == correct_revcount
correct_revcount = int(m.group(1))
# Add several commits--more than the revcount for the _eva_ package
for idx in range(correct_revcount + 5):
test_filename = '.test' + str(idx)
testrepo.ensure(test_filename)
run_cmd('git', ['add', test_filename])
run_cmd('git', ['commit', '-m', 'A message'])
import _eva_.version
imp.reload(_eva_.version)
m = _DEV_VERSION_RE.match(_eva_.version.version)
assert m
assert int(m.group(1)) == correct_revcount
correct_revcount = int(m.group(1))
finally:
sys.path.remove(str(test_pkg))
@pytest.mark.parametrize('version', ['1.0.dev', '1.0'])
def test_installed_git_version(version_test_package, version, tmpdir, capsys):
"""
Test for https://github.com/astropy/astropy-helpers/issues/87
Ensures that packages installed with astropy_helpers have a correct copy
of the git hash of the installed commit.
"""
# To test this, it should suffice to build a source dist, unpack it
# somewhere outside the git repository, and then do a build and import
# from the build directory--no need to "install" as such
test_pkg = version_test_package(version)
with test_pkg.as_cwd():
run_setup('setup.py', ['build'])
try:
import _eva_
githash = _eva_.__githash__
assert githash and isinstance(githash, _text_type)
# Ensure that it does in fact look like a git hash and not some
# other arbitrary string
assert re.match(r'[0-9a-f]{40}', githash)
finally:
cleanup_import('_eva_')
run_setup('setup.py', ['sdist', '--dist-dir=dist', '--formats=gztar'])
tgzs = glob.glob(os.path.join('dist', '*.tar.gz'))
assert len(tgzs) == 1
tgz = test_pkg.join(tgzs[0])
build_dir = tmpdir.mkdir('build_dir')
tf = tarfile.open(str(tgz), mode='r:gz')
tf.extractall(str(build_dir))
with build_dir.as_cwd():
pkg_dir = glob.glob('_eva_-*')[0]
os.chdir(pkg_dir)
run_setup('setup.py', ['build'])
try:
import _eva_
loader = pkgutil.get_loader('_eva_')
# Ensure we are importing the 'packagename' that was just unpacked
# into the build_dir
if sys.version_info[:2] != (3, 3):
# Skip this test on Python 3.3 wherein the SourceFileLoader
# has a bug where get_filename() does not return an absolute
# path
assert loader.get_filename().startswith(str(build_dir))
assert _eva_.__githash__ == githash
finally:
cleanup_import('_eva_')
| |
# pylint: disable=I0011,W0613,W0201,W0212,E1101,E1103,R0903,R0904
import pytest
import numpy as np
from mock import MagicMock
from ..data import (ComponentID, Component, Data,
DerivedComponent, pixel_label, CategoricalComponent)
from ..coordinates import Coordinates
from ..subset import Subset, SubsetState
from ..hub import Hub
from ..exceptions import IncompatibleAttribute
from ..component_link import ComponentLink
from ..registry import Registry
class TestCoordinates(Coordinates):
def pixel2world(self, *args):
return [(i + 2.) * a for i, a in enumerate(args)]
def world2pixel(self, *args):
return [a / (i + 2.) for i, a in enumerate(args)]
class TestData(object):
def setup_method(self, method):
self.data = Data(label="Test Data")
Registry().clear()
comp = Component(np.random.random((2, 3)))
self.comp = comp
self.data.coords = TestCoordinates()
self.comp_id = self.data.add_component(comp, 'Test Component')
def test_2d_component_print(self):
assert str(self.comp) == 'Component with shape (2, 3)'
def test_shape_empty(self):
d = Data()
assert d.shape == ()
def test_ndim_empty(self):
d = Data()
assert d.ndim == 0
def test_shape(self):
assert self.data.shape == (2, 3)
def test_ndim(self):
assert self.data.ndim == 2
def test_size(self):
assert self.data.size == 6
def test_label(self):
d = Data()
assert d.label == ''
assert self.data.label == "Test Data"
def test_set_label(self):
d = Data()
d.label = 'test_set_label'
assert d.label == 'test_set_label'
def test_add_component_with_id(self):
cid = ComponentID("test")
comp = Component(np.random.random((2, 3)))
cid2 = self.data.add_component(comp, cid)
assert cid2 is cid
def test_add_component_incompatible_shape(self):
comp = MagicMock()
comp.data.shape = (3, 2)
with pytest.raises(TypeError) as exc:
self.data.add_component(comp("junk label"))
if isinstance(exc.value, basestring): # python 2.6
assert exc.value == ("add_component() takes at least 3 "
"arguments (2 given)")
else:
assert exc.value.args[0] == ("add_component() takes at least 3 "
"arguments (2 given)")
def test_get_getitem_incompatible_attribute(self):
cid = ComponentID('bad')
with pytest.raises(IncompatibleAttribute) as exc:
self.data.__getitem__(cid)
assert exc.value.args[0] is cid
def test_get_component_incompatible_attribute(self):
cid = ComponentID('bad')
with pytest.raises(IncompatibleAttribute) as exc:
self.data.get_component(cid)
assert exc.value.args[0] is cid
def test_get_component_name(self):
d = Data(x=[1, 2, 3])
assert isinstance(d.get_component('x'), Component)
def test_component_ids(self):
cid = self.data.component_ids()
assert self.comp_id in cid
def test_new_subset(self):
sub = self.data.new_subset()
assert sub in self.data.subsets
def test_data_not_created_with_subsets(self):
assert len(self.data.subsets) == 0
def test_register(self):
hub = MagicMock(spec_set=Hub)
not_hub = MagicMock()
self.data.register_to_hub(hub)
assert hub is self.data.hub
with pytest.raises(TypeError) as exc:
self.data.register_to_hub(not_hub)
assert exc.value.args[0].startswith("input is not a Hub object")
def test_component_order(self):
"""Components should be returned in the order they were specified"""
data = Data()
comp = Component(np.array([1, 2, 3]))
labels = 'asldfkjaAREGWoibasiwnsldkgajsldkgslkg'
for label in labels:
data.add_component(comp, label)
ids = data.visible_components
assert [cid.label for cid in ids] == list(labels)
def test_broadcast(self):
hub = MagicMock(spec_set=Hub)
# make sure broadcasting with no hub is ok
self.data.broadcast()
# make sure broadcast with hub gets relayed
self.data.register_to_hub(hub)
self.data.broadcast()
assert hub.broadcast.call_count == 1
def test_double_hub_add(self):
hub = MagicMock(spec_set=Hub)
hub2 = MagicMock(spec_set=Hub)
self.data.register_to_hub(hub)
with pytest.raises(AttributeError) as exc:
self.data.__setattr__('hub', hub2)
assert exc.value.args[0] == ("Data has already been assigned "
"to a different hub")
def test_primary_components(self):
compid = ComponentID('virtual')
link = MagicMock(spec_set=ComponentLink)
comp = DerivedComponent(self.data, link)
self.data.add_component(comp, compid)
pricomps = self.data.primary_components
print self.comp_id, compid, pricomps
print self.comp_id in pricomps
print compid not in pricomps
assert self.comp_id in pricomps
assert compid not in pricomps
def test_add_component_invalid_label(self):
with pytest.raises(TypeError) as exc:
self.data.add_component(self.comp, label=5)
assert exc.value.args[0] == "label must be a ComponentID or string"
def test_add_component_invalid_component(self):
comp = Component(np.array([1]))
with pytest.raises(ValueError) as exc:
self.data.add_component(comp, label='bad')
assert exc.value.args[0].startswith("The dimensions of component bad")
def test_add_component_link(self):
link = MagicMock(spec_set=ComponentLink)
cid = ComponentID("new id")
link.get_to_id.return_value = cid
self.data.add_component_link(link)
assert cid in self.data.derived_components
def test_derived_components(self):
compid = ComponentID('virtual')
link = MagicMock(spec_set=ComponentLink)
comp = DerivedComponent(self.data, link)
self.data.add_component(comp, compid)
pricomps = self.data.derived_components
assert self.comp_id not in pricomps
assert compid in pricomps
def test_str_empty(self):
d = Data()
str(d)
def test_str_(self):
str(self.data)
def test_add_derived_component(self):
compid = ComponentID('virtual')
link = MagicMock(spec_set=ComponentLink)
comp = DerivedComponent(self.data, link)
comp.data.shape = self.data.shape
self.data.add_component(comp, compid)
result = self.data[compid]
link.compute.assert_called_with(self.data)
def test_find_component_id(self):
cid = self.data.find_component_id('Test Component')
assert cid == self.comp_id
assert self.data.find_component_id('does not exist') is None
def test_add_subset(self):
s = Subset(None)
self.data.add_subset(s)
assert s in self.data.subsets
def test_add_subset_with_subset_state(self):
"""Passing a subset state auto-wraps into a subset object"""
state = SubsetState()
self.data.add_subset(state)
added = self.data.subsets[-1]
assert added.subset_state is state
assert added.data is self.data
def test_add_subset_reparents_subset(self):
"""add_subset method updates subset.data reference"""
s = Subset(None)
self.data.add_subset(s)
assert s.data is self.data
def test_add_subset_disambiguates_label(self):
"""adding subset should disambiguate label if needed"""
s1 = Subset(None)
self.data.add_subset(s1)
s1.label = "test_subset_label"
s2 = Subset(None)
s2.label = "test_subset_label"
assert s2.label == "test_subset_label"
self.data.add_subset(s2)
assert s2.label != "test_subset_label"
def test_add_subset_with_hub(self):
s = Subset(None)
hub = MagicMock(spec_set=Hub)
self.data.register_to_hub(hub)
self.data.add_subset(s)
assert s in self.data.subsets
assert hub.broadcast.call_count == 1
def test_remove_component(self):
self.data.remove_component(self.comp_id)
assert not self.comp_id in self.data.components
def test_get_component(self):
assert self.data.get_component(self.comp_id) is self.comp
def test_get_None_component(self):
with pytest.raises(IncompatibleAttribute):
self.data.get_component(None)
def test_get_item(self):
assert self.data[self.comp_id] is self.comp.data
def test_coordinate_links(self):
links = self.data.coordinate_links
w0 = self.data[self.data.get_world_component_id(0)]
w1 = self.data[self.data.get_world_component_id(1)]
p0 = self.data[self.data.get_pixel_component_id(0)]
p1 = self.data[self.data.get_pixel_component_id(1)]
w0prime = links[0].compute(self.data)
p0prime = links[1].compute(self.data)
w1prime = links[2].compute(self.data)
p1prime = links[3].compute(self.data)
np.testing.assert_array_equal(w0, w0prime)
np.testing.assert_array_equal(w1, w1prime)
np.testing.assert_array_equal(p0, p0prime)
np.testing.assert_array_equal(p1, p1prime)
def test_coordinate_links_empty_data(self):
d = Data()
d.coords = None
assert d.coordinate_links == []
def test_coordinate_links_idempotent(self):
"""Should only calculate links once, and
return the same objects every time"""
links = self.data.coordinate_links
links2 = self.data.coordinate_links
assert links == links2
def test_fancy_view(self):
result = self.data[self.comp_id, :, 2]
np.testing.assert_array_equal(result, self.data[self.comp_id][:, 2])
def test_get_by_string(self):
result = self.data['Test Component']
assert result is self.comp.data
def test_get_by_missing_string(self):
with pytest.raises(IncompatibleAttribute) as exc:
result = self.data['xyz']
assert exc.value.args[0] == 'xyz'
def test_immutable(self):
d = Data(x=[1, 2, 3])
with pytest.raises(ValueError) as exc:
d['x'][:] = 5
assert 'read-only' in exc.value.args[0]
assert not d['x'].flags['WRITEABLE']
def test_categorical_immutable(self):
d = Data()
c = CategoricalComponent(['M', 'M', 'F'], categories=['M', 'F'])
d.add_component(c, label='gender')
with pytest.raises(ValueError) as exc:
d['gender'][:] = 5
assert 'read-only' in exc.value.args[0]
assert not d['gender'].flags['WRITEABLE']
def test_component_id_item_access():
data = Data()
c1 = Component(np.array([1, 2, 3]))
data.add_component(c1, 'values')
c2 = Component(np.array([4., 5., 6.]))
data.add_component(c2, 'Flux')
assert data.id['values'] == data.find_component_id('values')
assert data.id['Flux'] == data.find_component_id('Flux')
def test_component_id_item_access_missing():
"""id attribute should raise KeyError if requesting a bad ComponentID"""
data = Data()
with pytest.raises(KeyError):
data.id['not found']
class TestPixelLabel(object):
def test(self):
assert pixel_label(0, 2) == "y"
assert pixel_label(1, 2) == "x"
assert pixel_label(0, 3) == "z"
assert pixel_label(1, 3) == "y"
assert pixel_label(2, 3) == "x"
assert pixel_label(1, 0) == "Axis 1"
assert pixel_label(1, 4) == "Axis 1"
@pytest.mark.parametrize(('kwargs'),
[{'x': [1, 2, 3]},
{'x': np.array([1, 2, 3])},
{'x': [[1, 2, 3], [2, 3, 4]]},
{'x': [1, 2], 'y': [2, 3]}])
def test_init_with_inputs(kwargs):
"""Passing array-like objects as keywords to Data
auto-populates Components with label names = keywords"""
d = Data(**kwargs)
for label, data in kwargs.items():
np.testing.assert_array_equal(d[d.id[label]], data)
def test_init_with_invalid_kwargs():
with pytest.raises(ValueError) as exc:
d = Data(x=[1, 2], y=[1, 2, 3])
assert exc.value.args[0].startswith('The dimensions of component')
def test_getitem_with_component_link():
d = Data(x=[1, 2, 3, 4])
y = d.id['x'] * 5
np.testing.assert_array_equal(d[y], [5, 10, 15, 20])
def test_getitem_with_component_link_and_slice():
d = Data(x=[1, 2, 3, 4])
y = d.id['x'] * 5
np.testing.assert_array_equal(d[y, ::2], [5, 15])
def test_add_link_with_binary_link():
d = Data(x=[1, 2, 3, 4], y=[4, 5, 6, 7])
z = d.id['x'] + d.id['y']
d.add_component_link(z, 'z')
np.testing.assert_array_equal(d[d.id['z']], [5, 7, 9, 11])
def test_foreign_pixel_components_not_in_visible():
"""Pixel components from other data should not be visible"""
# currently, this is trivially satisfied since all coordinates are hidden
from ..link_helpers import LinkSame
from ..data_collection import DataCollection
d1 = Data(x=[1], y=[2])
d2 = Data(w=[3], v=[4])
dc = DataCollection([d1, d2])
dc.add_link(LinkSame(d1.id['x'], d2.id['w']))
dc.add_link(LinkSame(d1.get_world_component_id(0),
d2.get_world_component_id(0)))
assert d2.get_pixel_component_id(0) not in d1.visible_components
np.testing.assert_array_equal(d1[d2.get_pixel_component_id(0)], [0])
| |
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from typing import Any, Dict, List, TYPE_CHECKING
import numpy as np
from cirq import protocols
from cirq.value import big_endian_int_to_digits, linear_dict
if TYPE_CHECKING:
import cirq
class StabilizerState(metaclass=abc.ABCMeta):
"""Interface for quantum stabilizer state representations.
This interface is used for CliffordTableau and StabilizerChForm quantum
state representations, allowing simulators to act on them abstractly.
"""
@abc.abstractmethod
def apply_x(self, axis: int, exponent: float = 1, global_shift: float = 0):
"""Apply an X operation to the state.
Args:
axis: The axis to which the operation should be applied.
exponent: The exponent of the X operation, must be a half-integer.
global_shift: The global phase shift of the raw operation, prior to
exponentiation. Typically the value in `gate.global_shift`.
Raises:
ValueError: If the exponent is not half-integer.
"""
@abc.abstractmethod
def apply_y(self, axis: int, exponent: float = 1, global_shift: float = 0):
"""Apply an Y operation to the state.
Args:
axis: The axis to which the operation should be applied.
exponent: The exponent of the Y operation, must be a half-integer.
global_shift: The global phase shift of the raw operation, prior to
exponentiation. Typically the value in `gate.global_shift`.
Raises:
ValueError: If the exponent is not half-integer.
"""
@abc.abstractmethod
def apply_z(self, axis: int, exponent: float = 1, global_shift: float = 0):
"""Apply a Z operation to the state.
Args:
axis: The axis to which the operation should be applied.
exponent: The exponent of the Z operation, must be a half-integer.
global_shift: The global phase shift of the raw operation, prior to
exponentiation. Typically the value in `gate.global_shift`.
Raises:
ValueError: If the exponent is not half-integer.
"""
@abc.abstractmethod
def apply_h(self, axis: int, exponent: float = 1, global_shift: float = 0):
"""Apply an H operation to the state.
Args:
axis: The axis to which the operation should be applied.
exponent: The exponent of the H operation, must be an integer.
global_shift: The global phase shift of the raw operation, prior to
exponentiation. Typically the value in `gate.global_shift`.
Raises:
ValueError: If the exponent is not an integer.
"""
@abc.abstractmethod
def apply_cz(
self, control_axis: int, target_axis: int, exponent: float = 1, global_shift: float = 0
):
"""Apply a CZ operation to the state.
Args:
control_axis: The control axis of the operation.
target_axis: The axis to which the operation should be applied.
exponent: The exponent of the CZ operation, must be an integer.
global_shift: The global phase shift of the raw operation, prior to
exponentiation. Typically the value in `gate.global_shift`.
Raises:
ValueError: If the exponent is not an integer.
"""
@abc.abstractmethod
def apply_cx(
self, control_axis: int, target_axis: int, exponent: float = 1, global_shift: float = 0
):
"""Apply a CX operation to the state.
Args:
control_axis: The control axis of the operation.
target_axis: The axis to which the operation should be applied.
exponent: The exponent of the CX operation, must be an integer.
global_shift: The global phase shift of the raw operation, prior to
exponentiation. Typically the value in `gate.global_shift`.
Raises:
ValueError: If the exponent is not an integer.
"""
@abc.abstractmethod
def apply_global_phase(self, coefficient: linear_dict.Scalar):
"""Apply a global phase to the state.
Args:
coefficient: The global phase to apply.
"""
class CliffordTableau(StabilizerState):
"""Tableau representation of a stabilizer state
(based on Aaronson and Gottesman 2006).
The tableau stores the stabilizer generators of
the state using three binary arrays: xs, zs, and rs.
Each row of the arrays represents a Pauli string, P, that is
an eigenoperator of the state vector with eigenvalue one: P|psi> = |psi>.
"""
def __init__(self, num_qubits, initial_state: int = 0):
"""Initializes CliffordTableau
Args:
num_qubits: The number of qubits in the system.
initial_state: The computational basis representation of the
state as a big endian int.
"""
self.n = num_qubits
# The last row (`2n+1`-th row) is the scratch row used in _measurement
# computation process only. It should not be exposed to external usage.
self._rs = np.zeros(2 * self.n + 1, dtype=bool)
for (i, val) in enumerate(
big_endian_int_to_digits(initial_state, digit_count=num_qubits, base=2)
):
self._rs[self.n + i] = bool(val)
self._xs = np.zeros((2 * self.n + 1, self.n), dtype=bool)
self._zs = np.zeros((2 * self.n + 1, self.n), dtype=bool)
for i in range(self.n):
self._xs[i, i] = True
self._zs[self.n + i, i] = True
@property
def xs(self) -> np.array:
return self._xs[:-1, :]
@xs.setter
def xs(self, new_xs: np.array) -> None:
assert np.shape(new_xs) == (2 * self.n, self.n)
self._xs[:-1, :] = np.array(new_xs).astype(bool)
@property
def zs(self) -> np.array:
return self._zs[:-1, :]
@zs.setter
def zs(self, new_zs: np.array) -> None:
assert np.shape(new_zs) == (2 * self.n, self.n)
self._zs[:-1, :] = np.array(new_zs).astype(bool)
@property
def rs(self) -> np.array:
return self._rs[:-1]
@rs.setter
def rs(self, new_rs: np.array) -> None:
assert np.shape(new_rs) == (2 * self.n,)
self._rs[:-1] = np.array(new_rs).astype(bool)
def matrix(self) -> np.array:
"""Returns the 2n * 2n matrix representation of the Clifford tableau."""
return np.concatenate([self.xs, self.zs], axis=1)
def _json_dict_(self) -> Dict[str, Any]:
return protocols.obj_to_dict_helper(self, ['n', 'rs', 'xs', 'zs'])
@classmethod
def _from_json_dict_(cls, n, rs, xs, zs, **kwargs):
state = cls(n)
state.rs = np.array(rs).astype(bool)
state.xs = np.array(xs).astype(bool)
state.zs = np.array(zs).astype(bool)
return state
def _validate(self) -> bool:
"""Check if the Clifford Tabluea satisfies the symplectic property."""
table = np.concatenate([self.xs, self.zs], axis=1)
perm = list(range(self.n, 2 * self.n)) + list(range(self.n))
skew_eye = np.eye(2 * self.n, dtype=int)[perm]
return np.array_equal(np.mod(table.T.dot(skew_eye).dot(table), 2), skew_eye)
def __eq__(self, other):
if not isinstance(other, type(self)):
# coverage: ignore
return NotImplemented
return (
self.n == other.n
and np.array_equal(self.rs, other.rs)
and np.array_equal(self.xs, other.xs)
and np.array_equal(self.zs, other.zs)
)
def __copy__(self) -> 'CliffordTableau':
return self.copy()
def copy(self) -> 'CliffordTableau':
state = CliffordTableau(self.n)
state.rs = self.rs.copy()
state.xs = self.xs.copy()
state.zs = self.zs.copy()
return state
def __repr__(self) -> str:
stabilizers = ", ".join([repr(stab) for stab in self.stabilizers()])
return f'stabilizers: [{stabilizers}]'
def __str__(self) -> str:
string = ''
for i in range(self.n, 2 * self.n):
string += '- ' if self.rs[i] else '+ '
for k in range(0, self.n):
if self.xs[i, k] & (not self.zs[i, k]):
string += 'X '
elif (not self.xs[i, k]) & self.zs[i, k]:
string += 'Z '
elif self.xs[i, k] & self.zs[i, k]:
string += 'Y '
else:
string += 'I '
if i < 2 * self.n - 1:
string += '\n'
return string
def _str_full_(self) -> str:
string = ''
string += 'stable' + ' ' * max(self.n * 2 - 3, 1)
string += '| destable\n'
string += '-' * max(7, self.n * 2 + 3) + '+' + '-' * max(10, self.n * 2 + 4) + '\n'
for j in range(self.n):
for i in [j + self.n, j]:
string += '- ' if self.rs[i] else '+ '
for k in range(0, self.n):
if self.xs[i, k] & (not self.zs[i, k]):
string += 'X%d' % k
elif (not self.xs[i, k]) & self.zs[i, k]:
string += 'Z%d' % k
elif self.xs[i, k] & self.zs[i, k]:
string += 'Y%d' % k
else:
string += ' '
if i == j + self.n:
string += ' ' * max(0, 4 - self.n * 2) + ' | '
string += '\n'
return string
def then(self, second: 'CliffordTableau') -> 'CliffordTableau':
"""Returns a composed CliffordTableau of this tableau and the second tableau.
Then composed tableau is equal to (up to global phase) the composed
unitary operation of the two tableaux, i.e. equivalent to applying the unitary
operation of this CliffordTableau then applying the second one.
Args:
second: The second CliffordTableau to compose with.
Returns:
The composed CliffordTableau.
Raises:
TypeError: If the type of second is not CliffordTableau.
ValueError: If the number of qubits in the second tableau mismatch with
this tableau.
"""
if not isinstance(second, CliffordTableau):
raise TypeError("The type for second tableau must be the CliffordTableau type")
if self.n != second.n:
raise ValueError(
f"Mismatched number of qubits of two tableaux: {self.n} vs {second.n}."
)
# Convert the underlying data type from bool to int for easier numerical computation.
m1 = self.matrix().astype(int)
m2 = second.matrix().astype(int)
# The following computation is based on Theorem 36 in
# https://arxiv.org/pdf/2009.03218.pdf.
# Any pauli string (one stabilizer) in Clifford Tableau should be able to be expressed as
# (1i)^p (-1)^s X^(mx) Z^(mz)
# where p and s are binary scalar and mx and mz are binary vectors.
num_ys1 = np.sum(m1[:, : self.n] * m1[:, self.n :], axis=1)
num_ys2 = np.sum(m2[:, : self.n] * m2[:, self.n :], axis=1)
p1 = np.mod(num_ys1, 2)
p2 = np.mod(num_ys2, 2)
# Note the `s` is not equal to `r`, which depends on the number of Y gates.
# For example, r * Y_1Y_2Y_3 can be expanded into i^3 * r * X_1Z_1 X_2Z_2 X_3Z_3.
# The global phase is i * (-1) * r ==> s = r + 1 and p = 1.
s1 = self.rs.astype(int) + np.mod(num_ys1, 4) // 2
s2 = second.rs.astype(int) + np.mod(num_ys2, 4) // 2
lmbda = np.zeros((2 * self.n, 2 * self.n))
lmbda[: self.n, self.n :] = np.eye(self.n)
m_12 = np.mod(m1 @ m2, 2)
p_12 = np.mod(p1 + m1 @ p2, 2)
s_12 = (
s1
+ m1 @ s2
+ p1 * (m1 @ p2)
+ np.diag(m1 @ np.tril(np.outer(p2, p2.T) + m2 @ lmbda @ m2.T, -1) @ m1.T)
)
num_ys12 = np.sum(m_12[:, : self.n] * m_12[:, self.n :], axis=1)
merged_sign = np.mod(p_12 + 2 * s_12 - num_ys12, 4) // 2
merged_tableau = CliffordTableau(num_qubits=self.n)
merged_tableau.xs = m_12[:, : self.n]
merged_tableau.zs = m_12[:, self.n :]
merged_tableau.rs = merged_sign
return merged_tableau
def inverse(self) -> 'CliffordTableau':
"""Returns the inverse Clifford tableau of this tableau."""
ret_table = CliffordTableau(num_qubits=self.n)
# It relies on the symplectic property of Clifford tableau.
# [A^T C^T [0 I [A B [0 I
# B^T D^T] I 0] C D] = I 0]
# So the inverse is [[D^T B^T], [C^T A^T]]
ret_table.xs[: self.n] = self.zs[self.n :].T
ret_table.zs[: self.n] = self.zs[: self.n].T
ret_table.xs[self.n :] = self.xs[self.n :].T
ret_table.zs[self.n :] = self.xs[: self.n].T
# Update the sign -- rs.
# The idea is noting the sign of tabluea `a` contributes to the composed tableau
# `a.then(b)` directly. (While the sign in `b` need take very complicated transformation.)
# Refer above `then` function implementation for more details.
ret_table.rs = ret_table.then(self).rs
return ret_table
def __matmul__(self, second: 'CliffordTableau'):
if not isinstance(second, CliffordTableau):
return NotImplemented
return second.then(self)
def _rowsum(self, q1, q2):
"""Implements the "rowsum" routine defined by
Aaronson and Gottesman.
Multiplies the stabilizer in row q1 by the stabilizer in row q2."""
def g(x1, z1, x2, z2):
if not x1 and not z1:
return 0
elif x1 and z1:
return int(z2) - int(x2)
elif x1 and not z1:
return int(z2) * (2 * int(x2) - 1)
else:
return int(x2) * (1 - 2 * int(z2))
r = 2 * int(self._rs[q1]) + 2 * int(self._rs[q2])
for j in range(self.n):
r += g(self._xs[q2, j], self._zs[q2, j], self._xs[q1, j], self._zs[q1, j])
r %= 4
self._rs[q1] = bool(r)
self._xs[q1, :] ^= self._xs[q2, :]
self._zs[q1, :] ^= self._zs[q2, :]
def _row_to_dense_pauli(self, i: int) -> 'cirq.DensePauliString':
"""Return a dense Pauli string for the given row in the tableau.
Args:
i: index of the row in the tableau.
Returns:
A DensePauliString representing the row. The length of the string
is equal to the total number of qubits and each character
represents the effective single Pauli operator on that qubit. The
overall phase is captured in the coefficient.
"""
from cirq.ops.dense_pauli_string import DensePauliString
coefficient = -1 if self.rs[i] else 1
pauli_mask = ""
for k in range(self.n):
if self.xs[i, k] & (not self.zs[i, k]):
pauli_mask += "X"
elif (not self.xs[i, k]) & self.zs[i, k]:
pauli_mask += "Z"
elif self.xs[i, k] & self.zs[i, k]:
pauli_mask += "Y"
else:
pauli_mask += "I"
return DensePauliString(pauli_mask, coefficient=coefficient)
def stabilizers(self) -> List['cirq.DensePauliString']:
"""Returns the stabilizer generators of the state. These
are n operators {S_1,S_2,...,S_n} such that S_i |psi> = |psi>"""
return [self._row_to_dense_pauli(i) for i in range(self.n, 2 * self.n)]
def destabilizers(self) -> List['cirq.DensePauliString']:
"""Returns the destabilizer generators of the state. These
are n operators {S_1,S_2,...,S_n} such that along with the stabilizer
generators above generate the full Pauli group on n qubits."""
return [self._row_to_dense_pauli(i) for i in range(0, self.n)]
def _measure(self, q, prng: np.random.RandomState) -> int:
"""Performs a projective measurement on the q'th qubit.
Returns: the result (0 or 1) of the measurement.
"""
is_commuting = True
for i in range(self.n, 2 * self.n):
if self.xs[i, q]:
p = i
is_commuting = False
break
if is_commuting:
self._xs[2 * self.n, :] = False
self._zs[2 * self.n, :] = False
self._rs[2 * self.n] = False
for i in range(self.n):
if self.xs[i, q]:
self._rowsum(2 * self.n, self.n + i)
return int(self._rs[2 * self.n])
for i in range(2 * self.n):
if i != p and self.xs[i, q]:
self._rowsum(i, p)
self.xs[p - self.n, :] = self.xs[p, :].copy()
self.zs[p - self.n, :] = self.zs[p, :].copy()
self.rs[p - self.n] = self.rs[p]
self.xs[p, :] = False
self.zs[p, :] = False
self.zs[p, q] = True
self.rs[p] = bool(prng.randint(2))
return int(self.rs[p])
def apply_x(self, axis: int, exponent: float = 1, global_shift: float = 0):
if exponent % 2 == 0:
return
if exponent % 0.5 != 0.0:
raise ValueError('X exponent must be half integer') # coverage: ignore
effective_exponent = exponent % 2
if effective_exponent == 0.5:
self.xs[:, axis] ^= self.zs[:, axis]
self.rs[:] ^= self.xs[:, axis] & self.zs[:, axis]
elif effective_exponent == 1:
self.rs[:] ^= self.zs[:, axis]
elif effective_exponent == 1.5:
self.rs[:] ^= self.xs[:, axis] & self.zs[:, axis]
self.xs[:, axis] ^= self.zs[:, axis]
def apply_y(self, axis: int, exponent: float = 1, global_shift: float = 0):
if exponent % 2 == 0:
return
if exponent % 0.5 != 0.0:
raise ValueError('Y exponent must be half integer') # coverage: ignore
effective_exponent = exponent % 2
if effective_exponent == 0.5:
self.rs[:] ^= self.xs[:, axis] & (~self.zs[:, axis])
(self.xs[:, axis], self.zs[:, axis]) = (
self.zs[:, axis].copy(),
self.xs[:, axis].copy(),
)
elif effective_exponent == 1:
self.rs[:] ^= self.xs[:, axis] ^ self.zs[:, axis]
elif effective_exponent == 1.5:
self.rs[:] ^= ~(self.xs[:, axis]) & self.zs[:, axis]
(self.xs[:, axis], self.zs[:, axis]) = (
self.zs[:, axis].copy(),
self.xs[:, axis].copy(),
)
def apply_z(self, axis: int, exponent: float = 1, global_shift: float = 0):
if exponent % 2 == 0:
return
if exponent % 0.5 != 0.0:
raise ValueError('Z exponent must be half integer') # coverage: ignore
effective_exponent = exponent % 2
if effective_exponent == 0.5:
self.rs[:] ^= self.xs[:, axis] & self.zs[:, axis]
self.zs[:, axis] ^= self.xs[:, axis]
elif effective_exponent == 1:
self.rs[:] ^= self.xs[:, axis]
elif effective_exponent == 1.5:
self.rs[:] ^= self.xs[:, axis] & (~self.zs[:, axis])
self.zs[:, axis] ^= self.xs[:, axis]
def apply_h(self, axis: int, exponent: float = 1, global_shift: float = 0):
if exponent % 2 == 0:
return
if exponent % 1 != 0:
raise ValueError('H exponent must be integer') # coverage: ignore
self.apply_y(axis, 0.5)
self.apply_x(axis)
def apply_cz(
self, control_axis: int, target_axis: int, exponent: float = 1, global_shift: float = 0
):
if exponent % 2 == 0:
return
if exponent % 1 != 0:
raise ValueError('CZ exponent must be integer') # coverage: ignore
(self.xs[:, target_axis], self.zs[:, target_axis]) = (
self.zs[:, target_axis].copy(),
self.xs[:, target_axis].copy(),
)
self.rs[:] ^= self.xs[:, target_axis] & self.zs[:, target_axis]
self.rs[:] ^= (
self.xs[:, control_axis]
& self.zs[:, target_axis]
& (~(self.xs[:, target_axis] ^ self.zs[:, control_axis]))
)
self.xs[:, target_axis] ^= self.xs[:, control_axis]
self.zs[:, control_axis] ^= self.zs[:, target_axis]
(self.xs[:, target_axis], self.zs[:, target_axis]) = (
self.zs[:, target_axis].copy(),
self.xs[:, target_axis].copy(),
)
self.rs[:] ^= self.xs[:, target_axis] & self.zs[:, target_axis]
def apply_cx(
self, control_axis: int, target_axis: int, exponent: float = 1, global_shift: float = 0
):
if exponent % 2 == 0:
return
if exponent % 1 != 0:
raise ValueError('CX exponent must be integer') # coverage: ignore
self.rs[:] ^= (
self.xs[:, control_axis]
& self.zs[:, target_axis]
& (~(self.xs[:, target_axis] ^ self.zs[:, control_axis]))
)
self.xs[:, target_axis] ^= self.xs[:, control_axis]
self.zs[:, control_axis] ^= self.zs[:, target_axis]
def apply_global_phase(self, coefficient: linear_dict.Scalar):
pass
| |
# vim: set et ts=4 sw=4 fileencoding=utf-8:
'''
tests.test_yalp_grok
====================
'''
import os
import unittest
from yalp_grok import grok_match
class TestOnePattern(unittest.TestCase):
''' Test a single patter match '''
def test_int_match(self):
text = '1024'
pat = '%{INT:test_int}'
match = grok_match(text, pat)
self.assertIsNotNone(match)
self.assertEqual(match['test_int'], '1024')
def test_number_match(self):
text = '1024'
pat = '%{NUMBER:test_num}'
match = grok_match(text, pat)
self.assertIsNotNone(match)
self.assertEqual(match['test_num'], '1024')
def test_word_match(self):
text = 'garyelephant '
pat = '%{WORD:name} '
match = grok_match(text, pat)
self.assertIsNotNone(match)
self.assertEqual(match['name'], text.strip())
def test_ip_match(self):
text = '192.168.1.1'
pat = '%{IP:ip}'
match = grok_match(text, pat)
self.assertIsNotNone(match)
self.assertEqual(match['ip'], text.strip())
def test_host_match(self):
text = 'github.com'
pat = '%{HOST:website}'
match = grok_match(text, pat)
self.assertIsNotNone(match)
self.assertEqual(match['website'], text.strip())
def test_timestamp_iso8601_match(self):
text = '1989-11-04 05:33:02+0800'
pat = '%{TIMESTAMP_ISO8601:ts}'
match = grok_match(text, pat)
self.assertIsNotNone(match)
self.assertEqual(match['ts'], text.strip())
def test_missing_variable_name(self):
'''
test_missing_variable_name
You get empty dict because variable name is not set,
compare "%{WORD}" and "%{WORD:variable_name}"
'''
text = 'github'
pat = '%{WORD}'
match = grok_match(text, pat)
self.assertIsNotNone(match)
self.assertEqual(match, {})
def test_no_match(self):
text = 'github'
pat = '%{NUMBER:test_num}'
match = grok_match(text, pat)
self.assertIsNone(match)
class TestMutiplePatterns(unittest.TestCase):
'''
Test matching more complex patters
'''
def test_multiple_patterns(self):
text = 'gary 25 "never quit"'
pat = '%{WORD:name} %{INT:age} %{QUOTEDSTRING:motto}'
match = grok_match(text, pat)
self.assertIsNotNone(match)
self.assertEqual(match['name'], 'gary')
self.assertEqual(match['age'], '25')
self.assertEqual(match['motto'], '"never quit"')
def test_missing_variable_names(self):
text = 'gary 25 "never quit"'
pat = '%{WORD} %{INT} %{QUOTEDSTRING}'
match = grok_match(text, pat)
self.assertIsNotNone(match)
self.assertEqual(match, {})
def test_not_match(self):
#"male" is not INT
text = 'gary male "never quit"'
pat = '%{WORD:name} %{INT:age} %{QUOTEDSTRING:motto}'
match = grok_match(text, pat)
self.assertIsNone(match)
def test_qs(self):
text = 'gary "25" "never quit" "blah"'
pat = '%{WORD:name} "%{INT:age}" %{QS:motto} %{QS:blah}'
match = grok_match(text, pat)
self.assertIsNotNone(match)
self.assertEqual(match['name'], 'gary')
self.assertEqual(match['age'], '25')
self.assertEqual(match['motto'], '"never quit"')
def test_nginx_log_match(self):
text = (
'edge.v.iask.com.edge.sinastorage.com 14.18.243.65 6.032s - [21/Jul/2014:16:00:02 +0800]'
' "GET /edge.v.iask.com/125880034.hlv HTTP/1.0" 200 70528990 "-"'
' "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36"'
)
pat = (
'%{HOST:host} %{IP:client_ip} %{NUMBER:delay}s - \[%{HTTPDATE:time_stamp}\]'
' "%{WORD:verb} %{URIPATHPARAM:uri_path} HTTP/%{NUMBER:http_ver}" %{INT:http_status} %{INT:bytes} %{QS:referrer}'
' %{QS:agent}'
)
match = grok_match(text, pat)
self.assertIsNotNone(match)
self.assertEqual(match['host'], 'edge.v.iask.com.edge.sinastorage.com')
self.assertEqual(match['client_ip'], '14.18.243.65')
self.assertEqual(match['delay'], '6.032')
self.assertEqual(match['time_stamp'], '21/Jul/2014:16:00:02 +0800')
self.assertEqual(match['verb'], 'GET')
self.assertEqual(match['uri_path'], '/edge.v.iask.com/125880034.hlv')
self.assertEqual(match['http_ver'], '1.0')
self.assertEqual(match['http_status'], '200')
self.assertEqual(match['bytes'], '70528990')
self.assertEqual(match['referrer'], '"-"')
self.assertEqual(match['agent'], '"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36"')
def test_compound_matches(self):
text = (
'127.0.0.1 - - [15/Sep/2015:13:41:35 -0400] "GET /index.html '
'HTTP/1.1" 502 352 "-" '
'"Mozilla/5.0 (X11; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0"'
)
pat = '%{COMBINEDAPACHELOG}'
match = grok_match(text, pat)
self.assertIsNotNone(match)
self.assertEqual(match['clientip'], '127.0.0.1')
self.assertEqual(match['ident'], '-')
self.assertEqual(match['auth'], '-')
self.assertEqual(match['timestamp'], '15/Sep/2015:13:41:35 -0400')
self.assertEqual(match['verb'], 'GET')
self.assertEqual(match['request'], '/index.html')
self.assertEqual(match['httpversion'], '1.1')
self.assertEqual(match['response'], '502')
self.assertEqual(match['bytes'], '352')
self.assertEqual(match['referrer'], '"-"')
self.assertEqual(match['agent'], '"Mozilla/5.0 (X11; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0"')
class TestCustomPatterns(unittest.TestCase):
'''
Test custom patterns
'''
def test_custom_patterns(self):
custom_pats = {'ID' : '%{WORD}-%{INT}'}
text = 'Beijing-1104,gary 25 "never quit"'
pat = '%{ID:user_id},%{WORD:name} %{INT:age} %{QUOTEDSTRING:motto}'
match = grok_match(text, pat, custom_patterns=custom_pats)
self.assertIsNotNone(match)
self.assertEqual(match['user_id'], 'Beijing-1104')
self.assertEqual(match['name'], 'gary')
self.assertEqual(match['age'], '25')
self.assertEqual(match['motto'], '"never quit"')
def test_custom_pat_files(self):
pats_dir = os.path.join(os.path.dirname(__file__), 'test_patterns')
text = 'Beijing-1104,gary 25 "never quit"'
pat = '%{ID:user_id},%{WORD:name} %{INT:age} %{QUOTEDSTRING:motto}'
match = grok_match(text, pat, custom_patterns_dir=pats_dir)
self.assertIsNotNone(match)
self.assertEqual(match['user_id'], 'Beijing-1104')
self.assertEqual(match['name'], 'gary')
self.assertEqual(match['age'], '25')
self.assertEqual(match['motto'], '"never quit"')
class TestAutoMap(unittest.TestCase):
'''
Test type casting with auto generated type map
'''
def test_nginx_log_match(self):
text = (
'edge.v.iask.com.edge.sinastorage.com 14.18.243.65 6.032s - [21/Jul/2014:16:00:02 +0800]'
' "GET /edge.v.iask.com/125880034.hlv HTTP/1.0" 200 70528990 "-"'
' "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36"'
)
pat = (
'%{HOST:host} %{IP:client_ip} %{NUMBER:delay}s - \[%{HTTPDATE:time_stamp}\]'
' "%{WORD:verb} %{URIPATHPARAM:uri_path} HTTP/%{NUMBER:http_ver}" %{INT:http_status} %{INT:bytes} %{QS:referrer}'
' %{QS:agent}'
)
match = grok_match(text, pat, auto_map=True)
self.assertIsNotNone(match)
self.assertEqual(match['host'], 'edge.v.iask.com.edge.sinastorage.com')
self.assertEqual(match['client_ip'], '14.18.243.65')
self.assertEqual(match['delay'], 6.032)
self.assertEqual(match['time_stamp'], '21/Jul/2014:16:00:02 +0800')
self.assertEqual(match['verb'], 'GET')
self.assertEqual(match['uri_path'], '/edge.v.iask.com/125880034.hlv')
self.assertEqual(match['http_ver'], 1.0)
self.assertEqual(match['http_status'], 200)
self.assertEqual(match['bytes'], 70528990)
self.assertEqual(match['referrer'], '"-"')
self.assertEqual(match['agent'],
'"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36"')
def test_compound_matches(self):
text = (
'127.0.0.1 - - [15/Sep/2015:13:41:35 -0400] "GET /index.html '
'HTTP/1.1" 502 352 "-" '
'"Mozilla/5.0 (X11; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0"'
)
pat = '%{COMBINEDAPACHELOG}'
match = grok_match(text, pat, auto_map=True)
self.assertIsNotNone(match)
self.assertEqual(match['clientip'], '127.0.0.1')
self.assertEqual(match['ident'], '-')
self.assertEqual(match['auth'], '-')
self.assertEqual(match['timestamp'], '15/Sep/2015:13:41:35 -0400')
self.assertEqual(match['verb'], 'GET')
self.assertEqual(match['request'], '/index.html')
self.assertEqual(match['httpversion'], 1.1)
self.assertEqual(match['response'], 502)
self.assertEqual(match['bytes'], 352)
self.assertEqual(match['referrer'], '"-"')
self.assertEqual(match['agent'], '"Mozilla/5.0 (X11; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0"')
def test_non_neg_int_match(self):
text = '3245521'
pat = '%{NONNEGINT:test_int}'
match = grok_match(text, pat, auto_map=True)
self.assertIsNotNone(match)
self.assertEqual(match['test_int'], 3245521)
def test_syslog_match(self):
text = (
'2016-03-25T05:55:32 <3245521.83838> host kernel: [1858.738417] [<ffffffff811e6c31>] SyS_ioctl+0x81/0xa0'
)
pat = '%{SYSLOGLINE}'
match = grok_match(text, pat, auto_map=True)
self.assertEqual(match['facility'], 3245521)
self.assertEqual(match['pid'], None)
self.assertEqual(match['priority'], 83838)
class TestDefinedType(unittest.TestCase):
def test_syslog_match(self):
pats_dir = os.path.join(os.path.dirname(__file__), 'test_patterns')
text = (
'2016-03-25T05:55:32 <3245521.83838> host kernel: [1858.738417] [<ffffffff811e6c31>] SyS_ioctl+0x81/0xa0'
)
pat = '%{TYPEDSYSLOGLINE}'
match = grok_match(text, pat, custom_patterns_dir=pats_dir)
self.assertEqual(match['facility'], 3245521)
self.assertEqual(match['pid'], None)
self.assertEqual(match['priority'], 83838)
def test_non_neg_int_match(self):
text = '3245521'
pat = '%{NONNEGINT:test_int:int}'
match = grok_match(text, pat)
self.assertIsNotNone(match)
self.assertEqual(match['test_int'], 3245521)
def test_conversion_failure(self):
text = 'not_a_int'
pat = '%{WORD:test_str:int}'
match = grok_match(text, pat)
self.assertIsNotNone(match)
self.assertEqual(match['test_str'], 'not_a_int')
def test_conversion_failure_float(self):
text = 'not_a_float'
pat = '%{WORD:test_str:float}'
match = grok_match(text, pat)
self.assertIsNotNone(match)
self.assertEqual(match['test_str'], 'not_a_float')
def test_unsupported_type(self):
text = 'unknown_type'
pat = '%{WORD:test_str:decimal}'
match = grok_match(text, pat)
self.assertIsNotNone(match)
self.assertEqual(match['test_str'], 'unknown_type')
def test_compound_matches(self):
pats_dir = os.path.join(os.path.dirname(__file__), 'test_patterns')
text = (
'127.0.0.1 - - [15/Sep/2015:13:41:35 -0400] "GET /index.html '
'HTTP/1.1" 502 352 "-" '
'"Mozilla/5.0 (X11; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0"'
)
pat = '%{TYPEDCOMBINEDAPACHELOG}'
match = grok_match(text, pat, custom_patterns_dir=pats_dir)
self.assertIsNotNone(match)
self.assertEqual(match['clientip'], '127.0.0.1')
self.assertEqual(match['ident'], '-')
self.assertEqual(match['auth'], '-')
self.assertEqual(match['timestamp'], '15/Sep/2015:13:41:35 -0400')
self.assertEqual(match['verb'], 'GET')
self.assertEqual(match['request'], '/index.html')
self.assertEqual(match['httpversion'], 1.1)
self.assertEqual(match['response'], 502)
self.assertEqual(match['bytes'], 352)
self.assertEqual(match['referrer'], '"-"')
self.assertEqual(match['agent'], '"Mozilla/5.0 (X11; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0"')
def test_nginx_log_match(self):
text = (
'edge.v.iask.com.edge.sinastorage.com 14.18.243.65 6.032s - [21/Jul/2014:16:00:02 +0800]'
' "GET /edge.v.iask.com/125880034.hlv HTTP/1.0" 200 70528990 "-"'
' "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36"'
)
pat = (
'%{HOST:host} %{IP:client_ip} %{NUMBER:delay:float}s - \[%{HTTPDATE:time_stamp}\]'
' "%{WORD:verb} %{URIPATHPARAM:uri_path} HTTP/%{NUMBER:http_ver:float}" %{INT:http_status:int} %{INT:bytes:int} %{QS:referrer}'
' %{QS:agent}'
)
match = grok_match(text, pat)
self.assertIsNotNone(match)
self.assertEqual(match['host'], 'edge.v.iask.com.edge.sinastorage.com')
self.assertEqual(match['client_ip'], '14.18.243.65')
self.assertEqual(match['delay'], 6.032)
self.assertEqual(match['time_stamp'], '21/Jul/2014:16:00:02 +0800')
self.assertEqual(match['verb'], 'GET')
self.assertEqual(match['uri_path'], '/edge.v.iask.com/125880034.hlv')
self.assertEqual(match['http_ver'], 1.0)
self.assertEqual(match['http_status'], 200)
self.assertEqual(match['bytes'], 70528990)
self.assertEqual(match['referrer'], '"-"')
self.assertEqual(match['agent'],
'"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36"')
| |
#!/usr/bin/env python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script will check out llvm and clang, and then package the results up
to a tgz file."""
import argparse
import fnmatch
import itertools
import os
import shutil
import subprocess
import sys
import tarfile
# Path constants.
THIS_DIR = os.path.dirname(__file__)
THIRD_PARTY_DIR = os.path.join(THIS_DIR, '..', '..', '..', 'third_party')
LLVM_DIR = os.path.join(THIRD_PARTY_DIR, 'llvm')
LLVM_BOOTSTRAP_DIR = os.path.join(THIRD_PARTY_DIR, 'llvm-bootstrap')
LLVM_BOOTSTRAP_INSTALL_DIR = os.path.join(THIRD_PARTY_DIR,
'llvm-bootstrap-install')
LLVM_BUILD_DIR = os.path.join(THIRD_PARTY_DIR, 'llvm-build')
LLVM_RELEASE_DIR = os.path.join(LLVM_BUILD_DIR, 'Release+Asserts')
STAMP_FILE = os.path.join(LLVM_BUILD_DIR, 'cr_build_revision')
def Tee(output, logfile):
logfile.write(output)
print output,
def TeeCmd(cmd, logfile, fail_hard=True):
"""Runs cmd and writes the output to both stdout and logfile."""
# Reading from PIPE can deadlock if one buffer is full but we wait on a
# different one. To work around this, pipe the subprocess's stderr to
# its stdout buffer and don't give it a stdin.
# shell=True is required in cmd.exe since depot_tools has an svn.bat, and
# bat files only work with shell=True set.
proc = subprocess.Popen(cmd, bufsize=1, shell=sys.platform == 'win32',
stdin=open(os.devnull), stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
for line in iter(proc.stdout.readline,''):
Tee(line, logfile)
if proc.poll() is not None:
break
exit_code = proc.wait()
if exit_code != 0 and fail_hard:
print 'Failed:', cmd
sys.exit(1)
def PrintTarProgress(tarinfo):
print 'Adding', tarinfo.name
return tarinfo
def main():
if sys.platform == 'win32':
try:
subprocess.check_output(['grep', '--help'], shell=True)
except subprocess.CalledProcessError:
print 'Add gnuwin32 to your PATH, then try again.'
return 1
parser = argparse.ArgumentParser(description='build and package clang')
parser.add_argument('--gcc-toolchain',
help="the prefix for the GCC version used for building. "
"For /opt/foo/bin/gcc, pass "
"'--gcc-toolchain '/opt/foo'")
args = parser.parse_args()
with open('buildlog.txt', 'w') as log:
Tee('Diff in llvm:\n', log)
TeeCmd(['svn', 'stat', LLVM_DIR], log, fail_hard=False)
TeeCmd(['svn', 'diff', LLVM_DIR], log, fail_hard=False)
Tee('Diff in llvm/tools/clang:\n', log)
TeeCmd(['svn', 'stat', os.path.join(LLVM_DIR, 'tools', 'clang')],
log, fail_hard=False)
TeeCmd(['svn', 'diff', os.path.join(LLVM_DIR, 'tools', 'clang')],
log, fail_hard=False)
# TODO(thakis): compiler-rt is in projects/compiler-rt on Windows but
# llvm/compiler-rt elsewhere. So this diff call is currently only right on
# Windows.
Tee('Diff in llvm/compiler-rt:\n', log)
TeeCmd(['svn', 'stat', os.path.join(LLVM_DIR, 'projects', 'compiler-rt')],
log, fail_hard=False)
TeeCmd(['svn', 'diff', os.path.join(LLVM_DIR, 'projects', 'compiler-rt')],
log, fail_hard=False)
Tee('Diff in llvm/projects/libcxx:\n', log)
TeeCmd(['svn', 'stat', os.path.join(LLVM_DIR, 'projects', 'libcxx')],
log, fail_hard=False)
TeeCmd(['svn', 'diff', os.path.join(LLVM_DIR, 'projects', 'libcxx')],
log, fail_hard=False)
Tee('Diff in llvm/projects/libcxxabi:\n', log)
TeeCmd(['svn', 'stat', os.path.join(LLVM_DIR, 'projects', 'libcxxabi')],
log, fail_hard=False)
TeeCmd(['svn', 'diff', os.path.join(LLVM_DIR, 'projects', 'libcxxabi')],
log, fail_hard=False)
Tee('Starting build\n', log)
# Do a clobber build.
shutil.rmtree(LLVM_BOOTSTRAP_DIR, ignore_errors=True)
shutil.rmtree(LLVM_BOOTSTRAP_INSTALL_DIR, ignore_errors=True)
shutil.rmtree(LLVM_BUILD_DIR, ignore_errors=True)
build_cmd = [sys.executable, os.path.join(THIS_DIR, 'update.py'),
'--bootstrap', '--force-local-build', '--run-tests']
if args.gcc_toolchain is not None:
build_cmd.extend(['--gcc-toolchain', args.gcc_toolchain])
TeeCmd(build_cmd, log)
stamp = open(STAMP_FILE).read().rstrip()
pdir = 'clang-' + stamp
print pdir
shutil.rmtree(pdir, ignore_errors=True)
# Copy a whitelist of files to the directory we're going to tar up.
# This supports the same patterns that the fnmatch module understands.
exe_ext = '.exe' if sys.platform == 'win32' else ''
want = ['bin/llvm-symbolizer' + exe_ext,
'lib/clang/*/asan_blacklist.txt',
'lib/clang/*/cfi_blacklist.txt',
# Copy built-in headers (lib/clang/3.x.y/include).
'lib/clang/*/include/*',
]
if sys.platform == 'win32':
want.append('bin/clang-cl.exe')
want.append('bin/lld-link.exe')
else:
so_ext = 'dylib' if sys.platform == 'darwin' else 'so'
want.extend(['bin/clang',
'lib/libFindBadConstructs.' + so_ext,
'lib/libBlinkGCPlugin.' + so_ext,
])
if sys.platform == 'darwin':
want.extend(['bin/libc++.1.dylib',
# Copy only the OSX (ASan and profile) and iossim (ASan)
# runtime libraries:
'lib/clang/*/lib/darwin/*asan_osx*',
'lib/clang/*/lib/darwin/*asan_iossim*',
'lib/clang/*/lib/darwin/*profile_osx*',
])
elif sys.platform.startswith('linux'):
# Copy only
# lib/clang/*/lib/linux/libclang_rt.{[atm]san,san,ubsan,profile}-*.a ,
# but not dfsan.
want.extend(['lib/clang/*/lib/linux/*[atm]san*',
'lib/clang/*/lib/linux/*ubsan*',
'lib/clang/*/lib/linux/*libclang_rt.san*',
'lib/clang/*/lib/linux/*profile*',
'lib/clang/*/msan_blacklist.txt',
])
elif sys.platform == 'win32':
want.extend(['lib/clang/*/lib/windows/clang_rt.asan*.dll',
'lib/clang/*/lib/windows/clang_rt.asan*.lib',
'lib/clang/*/include_sanitizer/*',
])
if args.gcc_toolchain is not None:
# Copy the stdlibc++.so.6 we linked Clang against so it can run.
want.append('lib/libstdc++.so.6')
for root, dirs, files in os.walk(LLVM_RELEASE_DIR):
# root: third_party/llvm-build/Release+Asserts/lib/..., rel_root: lib/...
rel_root = root[len(LLVM_RELEASE_DIR)+1:]
rel_files = [os.path.join(rel_root, f) for f in files]
wanted_files = list(set(itertools.chain.from_iterable(
fnmatch.filter(rel_files, p) for p in want)))
if wanted_files:
# Guaranteed to not yet exist at this point:
os.makedirs(os.path.join(pdir, rel_root))
for f in wanted_files:
src = os.path.join(LLVM_RELEASE_DIR, f)
dest = os.path.join(pdir, f)
shutil.copy(src, dest)
# Strip libraries.
if sys.platform == 'darwin' and f.endswith('.dylib'):
# Fix LC_ID_DYLIB for the ASan dynamic libraries to be relative to
# @executable_path.
# TODO(glider): this is transitional. We'll need to fix the dylib
# name either in our build system, or in Clang. See also
# http://crbug.com/344836.
subprocess.call(['install_name_tool', '-id',
'@executable_path/' + os.path.basename(dest), dest])
subprocess.call(['strip', '-x', dest])
elif (sys.platform.startswith('linux') and
os.path.splitext(f)[1] in ['.so', '.a']):
subprocess.call(['strip', '-g', dest])
# Set up symlinks.
if sys.platform != 'win32':
os.symlink('clang', os.path.join(pdir, 'bin', 'clang++'))
os.symlink('clang', os.path.join(pdir, 'bin', 'clang-cl'))
if sys.platform == 'darwin':
os.symlink('libc++.1.dylib', os.path.join(pdir, 'bin', 'libc++.dylib'))
# Also copy libc++ headers.
shutil.copytree(os.path.join(LLVM_BOOTSTRAP_INSTALL_DIR, 'include', 'c++'),
os.path.join(pdir, 'include', 'c++'))
# Copy buildlog over.
shutil.copy('buildlog.txt', pdir)
# Create archive.
tar_entries = ['bin', 'lib', 'buildlog.txt']
if sys.platform == 'darwin':
tar_entries += ['include']
with tarfile.open(pdir + '.tgz', 'w:gz') as tar:
for entry in tar_entries:
tar.add(os.path.join(pdir, entry), arcname=entry, filter=PrintTarProgress)
if sys.platform == 'darwin':
platform = 'Mac'
elif sys.platform == 'win32':
platform = 'Win'
else:
platform = 'Linux_x64'
print 'To upload, run:'
print ('gsutil cp -a public-read %s.tgz '
'gs://chromium-browser-clang/%s/%s.tgz') % (pdir, platform, pdir)
# Zip up gold plugin on Linux.
if sys.platform.startswith('linux'):
golddir = 'llvmgold-' + stamp
shutil.rmtree(golddir, ignore_errors=True)
os.makedirs(os.path.join(golddir, 'lib'))
shutil.copy(os.path.join(LLVM_RELEASE_DIR, 'lib', 'LLVMgold.so'),
os.path.join(golddir, 'lib'))
with tarfile.open(golddir + '.tgz', 'w:gz') as tar:
tar.add(os.path.join(golddir, 'lib'), arcname='lib',
filter=PrintTarProgress)
print ('gsutil cp -a public-read %s.tgz '
'gs://chromium-browser-clang/%s/%s.tgz') % (golddir, platform,
golddir)
# FIXME: Warn if the file already exists on the server.
if __name__ == '__main__':
sys.exit(main())
| |
import os
import os.path
import shutil
import subprocess
import sys
from glob import glob
from pocs.utils import current_time
from pocs.utils.logger import get_root_logger
from pocs.utils.config import load_config
class Webcam(object):
""" Simple module to take a picture with the webcams
This class will capture images from any webcam entry in the config file.
The capturing is done on a loop, with defaults of 255 stacked images and
a minute cadence.
Note:
All parameters are optional.
Note:
This is a port of Olivier's `SKYCAM_start_webcamloop` function
in skycam.c
Note:
TODO: The images then have their flux measured and the gain and brightness
adjusted accordingly. Images analysis is stored in the (mongo) database
Args:
webcam (dict): Config options for the camera, required.
frames (int): Number of frames to capture per image. Default 255
resolution (str): Resolution for images. Default "1600x1200"
brightness (str): Initial camera brightness. Default "50%"
gain (str): Initial camera gain. Default "50%"
delay (int): Time to wait between captures. Default 60 (seconds)
"""
def __init__(self,
webcam_config,
frames=255,
resolution="1600x1200",
brightness="50%",
gain="50%"):
self.config = load_config(config_files='peas')
self.logger = get_root_logger()
self._today_dir = None
self.webcam_dir = self.config['directories'].get('webcam', '/var/panoptes/webcams/')
assert os.path.exists(self.webcam_dir), self.logger.warning(
"Webcam directory must exist: {}".format(self.webcam_dir))
self.logger.info("Creating webcams")
# Lookup the webcams
if webcam_config is None:
err_msg = "No webcams to connect. Please check config.yaml and all appropriate ports"
self.logger.warning(err_msg)
self.webcam_config = webcam_config
self.name = self.webcam_config.get('name', 'GenericWebCam')
self.port_name = self.webcam_config.get('port').split('/')[-1]
# Command for taking pics
self.cmd = shutil.which('fswebcam')
# Defaults
self._timestamp = "%Y-%m-%d %H:%M:%S"
self._thumbnail_resolution = '240x120'
# Create the string for the params
self.base_params = "-F {} -r {} --set brightness={} --set gain={} --jpeg 100 --timestamp \"{}\" ".format(
frames, resolution, brightness, gain, self._timestamp)
self.logger.info("{} created".format(self.name))
def capture(self):
""" Capture an image from a webcam
Given a webcam, this attempts to capture an image using the subprocess
command. Also creates a thumbnail of the image
Args:
webcam (dict): Entry for the webcam. Example::
{
'name': 'Pier West',
'port': '/dev/video0',
'params': {
'rotate': 270
},
}
The values for the `params` key will be passed directly to fswebcam
"""
webcam = self.webcam_config
assert isinstance(webcam, dict)
self.logger.debug("Capturing image for {}...".format(webcam.get('name')))
camera_name = self.port_name
# Create the directory for storing images
timestamp = current_time(flatten=True)
today_dir = timestamp.split('T')[0]
today_path = "{}/{}".format(self.webcam_dir, today_dir)
try:
if today_path != self._today_dir:
# If yesterday is not None, archive it
if self._today_dir is not None:
self.logger.debug("Making timelapse for webcam")
self.create_timelapse(
self._today_dir, out_file="{}/{}_{}.mp4".format(
self.webcam_dir, today_dir, self.port_name),
remove_after=True)
# If today doesn't exist, make it
if not os.path.exists(today_path):
self.logger.debug("Making directory for day's webcam")
os.makedirs(today_path, exist_ok=True)
self._today_dir = today_path
except OSError as err:
self.logger.warning("Cannot create new dir: {} \t {}".format(today_path, err))
# Output file names
out_file = '{}/{}_{}.jpeg'.format(today_path, camera_name, timestamp)
# We also create a thumbnail and always link it to the same image
# name so that it is always current.
thumbnail_file = '{}/tn_{}.jpeg'.format(self.webcam_dir, camera_name)
options = self.base_params
if 'params' in webcam:
for opt, val in webcam.get('params').items():
options += "--{}={}".format(opt, val)
# Assemble all the parameters
params = " -d {} --title \"{}\" {} --save {} --scale {} {}".format(
webcam.get('port'),
webcam.get('name'),
options,
out_file,
self._thumbnail_resolution,
thumbnail_file
)
static_out_file = ''
# Actually call the command.
# NOTE: This is a blocking call (within this process). See `start_capturing`
try:
self.logger.debug("Webcam subproccess command: {} {}".format(self.cmd, params))
with open(os.devnull, 'w') as devnull:
retcode = subprocess.call(self.cmd + params, shell=True,
stdout=devnull, stderr=devnull)
if retcode < 0:
self.logger.warning(
"Image captured terminated for {}. Return code: {} \t Error: {}".format(
webcam.get('name'),
retcode,
sys.stderr
)
)
else:
self.logger.debug("Image captured for {}".format(webcam.get('name')))
# Static files (always points to most recent)
static_out_file = '{}/{}.jpeg'.format(self.webcam_dir, camera_name)
static_tn_out_file = '{}/tn_{}.jpeg'.format(self.webcam_dir, camera_name)
# Symlink the latest image and thumbnail
if os.path.lexists(static_out_file):
os.remove(static_out_file)
os.symlink(out_file, static_out_file)
if os.path.lexists(static_tn_out_file):
os.remove(static_tn_out_file)
os.symlink(out_file, static_tn_out_file)
return retcode
except OSError as e:
self.logger.warning("Execution failed:".format(e, file=sys.stderr))
return {'out_fn': static_out_file}
def create_timelapse(self, directory, fps=12, out_file=None, remove_after=False):
""" Create a timelapse movie for the given directory """
assert os.path.exists(directory), self.logger.warning(
"Directory does not exist: {}".format(directory))
ffmpeg_cmd = shutil.which('ffmpeg')
if out_file is None:
out_file = self.port_name
out_file = '{}/{}.mp4'.format(directory, out_file)
cmd = [ffmpeg_cmd, '-f', 'image2', '-r', str(fps), '-pattern_type', 'glob',
'-i', '{}{}*.jpeg'.format(directory, self.port_name), '-c:v', 'libx264', '-pix_fmt', 'yuv420p', out_file]
self.logger.debug("Timelapse command: {}".format(cmd))
try:
subprocess.run(cmd)
except subprocess.CalledProcessError as err:
self.logger.warning("Problem making timelapse: {}".format(err))
if remove_after:
self.logger.debug("Removing all images files")
for f in glob('{}{}*.jpeg'.format(directory, self.port_name)):
os.remove(f)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
import mxnet as mx
from mxnet.test_utils import *
from mxnet.base import MXNetError
import numpy as np
import os, gzip
import pickle as pickle
import time
try:
import h5py
except ImportError:
h5py = None
import sys
from common import assertRaises
import unittest
def test_MNISTIter():
# prepare data
get_mnist_ubyte()
batch_size = 100
train_dataiter = mx.io.MNISTIter(
image="data/train-images-idx3-ubyte",
label="data/train-labels-idx1-ubyte",
data_shape=(784,),
batch_size=batch_size, shuffle=1, flat=1, silent=0, seed=10)
# test_loop
nbatch = 60000 / batch_size
batch_count = 0
for batch in train_dataiter:
batch_count += 1
assert(nbatch == batch_count)
# test_reset
train_dataiter.reset()
train_dataiter.iter_next()
label_0 = train_dataiter.getlabel().asnumpy().flatten()
train_dataiter.iter_next()
train_dataiter.iter_next()
train_dataiter.iter_next()
train_dataiter.iter_next()
train_dataiter.reset()
train_dataiter.iter_next()
label_1 = train_dataiter.getlabel().asnumpy().flatten()
assert(sum(label_0 - label_1) == 0)
def test_Cifar10Rec():
get_cifar10()
dataiter = mx.io.ImageRecordIter(
path_imgrec="data/cifar/train.rec",
mean_img="data/cifar/cifar10_mean.bin",
rand_crop=False,
and_mirror=False,
shuffle=False,
data_shape=(3,28,28),
batch_size=100,
preprocess_threads=4,
prefetch_buffer=1)
labelcount = [0 for i in range(10)]
batchcount = 0
for batch in dataiter:
npdata = batch.data[0].asnumpy().flatten().sum()
sys.stdout.flush()
batchcount += 1
nplabel = batch.label[0].asnumpy()
for i in range(nplabel.shape[0]):
labelcount[int(nplabel[i])] += 1
for i in range(10):
assert(labelcount[i] == 5000)
def test_NDArrayIter():
data = np.ones([1000, 2, 2])
label = np.ones([1000, 1])
for i in range(1000):
data[i] = i / 100
label[i] = i / 100
dataiter = mx.io.NDArrayIter(data, label, 128, True, last_batch_handle='pad')
batchidx = 0
for batch in dataiter:
batchidx += 1
assert(batchidx == 8)
dataiter = mx.io.NDArrayIter(data, label, 128, False, last_batch_handle='pad')
batchidx = 0
labelcount = [0 for i in range(10)]
for batch in dataiter:
label = batch.label[0].asnumpy().flatten()
assert((batch.data[0].asnumpy()[:,0,0] == label).all())
for i in range(label.shape[0]):
labelcount[int(label[i])] += 1
for i in range(10):
if i == 0:
assert(labelcount[i] == 124)
else:
assert(labelcount[i] == 100)
def test_NDArrayIter_h5py():
if not h5py:
return
data = np.ones([1000, 2, 2])
label = np.ones([1000, 1])
for i in range(1000):
data[i] = i / 100
label[i] = i / 100
try:
os.remove("ndarraytest.h5")
except OSError:
pass
with h5py.File("ndarraytest.h5") as f:
f.create_dataset("data", data=data)
f.create_dataset("label", data=label)
dataiter = mx.io.NDArrayIter(f["data"], f["label"], 128, True, last_batch_handle='pad')
batchidx = 0
for batch in dataiter:
batchidx += 1
assert(batchidx == 8)
dataiter = mx.io.NDArrayIter(f["data"], f["label"], 128, False, last_batch_handle='pad')
labelcount = [0 for i in range(10)]
for batch in dataiter:
label = batch.label[0].asnumpy().flatten()
assert((batch.data[0].asnumpy()[:,0,0] == label).all())
for i in range(label.shape[0]):
labelcount[int(label[i])] += 1
try:
os.remove("ndarraytest.h5")
except OSError:
pass
for i in range(10):
if i == 0:
assert(labelcount[i] == 124)
else:
assert(labelcount[i] == 100)
def test_NDArrayIter_csr():
# creating toy data
num_rows = rnd.randint(5, 15)
num_cols = rnd.randint(1, 20)
batch_size = rnd.randint(1, num_rows)
shape = (num_rows, num_cols)
csr, _ = rand_sparse_ndarray(shape, 'csr')
dns = csr.asnumpy()
# CSRNDArray or scipy.sparse.csr_matrix with last_batch_handle not equal to 'discard' will throw NotImplementedError
assertRaises(NotImplementedError, mx.io.NDArrayIter, {'data': csr}, dns, batch_size)
try:
import scipy.sparse as spsp
train_data = spsp.csr_matrix(dns)
assertRaises(NotImplementedError, mx.io.NDArrayIter, {'data': train_data}, dns, batch_size)
except ImportError:
pass
# CSRNDArray with shuffle
csr_iter = iter(mx.io.NDArrayIter({'csr_data': csr, 'dns_data': dns}, dns, batch_size,
shuffle=True, last_batch_handle='discard'))
num_batch = 0
for batch in csr_iter:
num_batch += 1
assert(num_batch == num_rows // batch_size)
# make iterators
csr_iter = iter(mx.io.NDArrayIter(csr, csr, batch_size, last_batch_handle='discard'))
begin = 0
for batch in csr_iter:
expected = np.zeros((batch_size, num_cols))
end = begin + batch_size
expected[:num_rows - begin] = dns[begin:end]
if end > num_rows:
expected[num_rows - begin:] = dns[0:end - num_rows]
assert_almost_equal(batch.data[0].asnumpy(), expected)
begin += batch_size
def test_LibSVMIter():
def check_libSVMIter_synthetic():
cwd = os.getcwd()
data_path = os.path.join(cwd, 'data.t')
label_path = os.path.join(cwd, 'label.t')
with open(data_path, 'w') as fout:
fout.write('1.0 0:0.5 2:1.2\n')
fout.write('-2.0\n')
fout.write('-3.0 0:0.6 1:2.4 2:1.2\n')
fout.write('4 2:-1.2\n')
with open(label_path, 'w') as fout:
fout.write('1.0\n')
fout.write('-2.0 0:0.125\n')
fout.write('-3.0 2:1.2\n')
fout.write('4 1:1.0 2:-1.2\n')
data_dir = os.path.join(cwd, 'data')
data_train = mx.io.LibSVMIter(data_libsvm=data_path, label_libsvm=label_path,
data_shape=(3, ), label_shape=(3, ), batch_size=3)
first = mx.nd.array([[ 0.5, 0., 1.2], [ 0., 0., 0.], [ 0.6, 2.4, 1.2]])
second = mx.nd.array([[ 0., 0., -1.2], [ 0.5, 0., 1.2], [ 0., 0., 0.]])
i = 0
for batch in iter(data_train):
expected = first.asnumpy() if i == 0 else second.asnumpy()
assert_almost_equal(data_train.getdata().asnumpy(), expected)
i += 1
def check_libSVMIter_news_data():
news_metadata = {
'name': 'news20.t',
'origin_name': 'news20.t.bz2',
'url': "https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/gluon/dataset/news20.t.bz2",
'feature_dim': 62060,
'num_classes': 20,
'num_examples': 3993,
}
batch_size = 33
num_examples = news_metadata['num_examples']
data_dir = os.path.join(os.getcwd(), 'data')
get_bz2_data(data_dir, news_metadata['name'], news_metadata['url'],
news_metadata['origin_name'])
path = os.path.join(data_dir, news_metadata['name'])
data_train = mx.io.LibSVMIter(data_libsvm=path, data_shape=(news_metadata['feature_dim'],),
batch_size=batch_size)
for epoch in range(2):
num_batches = 0
for batch in data_train:
# check the range of labels
assert(np.sum(batch.label[0].asnumpy() > 20) == 0)
assert(np.sum(batch.label[0].asnumpy() <= 0) == 0)
num_batches += 1
expected_num_batches = num_examples / batch_size
assert(num_batches == int(expected_num_batches)), num_batches
data_train.reset()
def check_libSVMIter_exception():
cwd = os.getcwd()
data_path = os.path.join(cwd, 'data.t')
label_path = os.path.join(cwd, 'label.t')
with open(data_path, 'w') as fout:
fout.write('1.0 0:0.5 2:1.2\n')
fout.write('-2.0\n')
# Below line has a neg indice. Should throw an exception
fout.write('-3.0 -1:0.6 1:2.4 2:1.2\n')
fout.write('4 2:-1.2\n')
with open(label_path, 'w') as fout:
fout.write('1.0\n')
fout.write('-2.0 0:0.125\n')
fout.write('-3.0 2:1.2\n')
fout.write('4 1:1.0 2:-1.2\n')
data_dir = os.path.join(cwd, 'data')
data_train = mx.io.LibSVMIter(data_libsvm=data_path, label_libsvm=label_path,
data_shape=(3, ), label_shape=(3, ), batch_size=3)
for batch in iter(data_train):
data_train.get_data().asnumpy()
check_libSVMIter_synthetic()
check_libSVMIter_news_data()
assertRaises(MXNetError, check_libSVMIter_exception)
def test_DataBatch():
from nose.tools import ok_
from mxnet.io import DataBatch
import re
batch = DataBatch(data=[mx.nd.ones((2,3))])
ok_(re.match('DataBatch: data shapes: \[\(2L?, 3L?\)\] label shapes: None', str(batch)))
batch = DataBatch(data=[mx.nd.ones((2,3)), mx.nd.ones((7,8))], label=[mx.nd.ones((4,5))])
ok_(re.match('DataBatch: data shapes: \[\(2L?, 3L?\), \(7L?, 8L?\)\] label shapes: \[\(4L?, 5L?\)\]', str(batch)))
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/7826")
def test_CSVIter():
def check_CSVIter_synthetic():
cwd = os.getcwd()
data_path = os.path.join(cwd, 'data.t')
label_path = os.path.join(cwd, 'label.t')
with open(data_path, 'w') as fout:
for i in range(1000):
fout.write(','.join(['1' for _ in range(8*8)]) + '\n')
with open(label_path, 'w') as fout:
for i in range(1000):
fout.write('0\n')
data_train = mx.io.CSVIter(data_csv=data_path, data_shape=(8,8),
label_csv=label_path, batch_size=100)
expected = mx.nd.ones((100, 8, 8))
for batch in iter(data_train):
assert_almost_equal(data_train.getdata().asnumpy(), expected.asnumpy())
check_CSVIter_synthetic()
if __name__ == "__main__":
test_NDArrayIter()
if h5py:
test_NDArrayIter_h5py()
test_MNISTIter()
test_Cifar10Rec()
test_LibSVMIter()
test_NDArrayIter_csr()
test_CSVIter()
| |
#!/usr/bin/env python
#------------------------------------------------------------------------------
# Copyright 2015 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
# Import ArcPy site-package and os modules
#
import arcpy
import os
import sys, traceback
import time
from datetime import datetime
from datetime import date
from decimal import *
lineCount = 0
## ======================================================
## Read in parameters
## ======================================================
featClass = arcpy.GetParameterAsText(0)
geonameFilePath = arcpy.GetParameterAsText(1)
countryCodeTable = arcpy.GetParameterAsText(2)
admin1CodeTable = arcpy.GetParameterAsText(3)
featureCodeTable = arcpy.GetParameterAsText(4)
desc = arcpy.Describe(featClass)
fieldNameList = [field.name.upper() for field in desc.Fields]
## ======================================================
## Read geoname file and insert feature
## ======================================================
try:
# Defines how many features have to be inserted before it will
# report status
reportNum = 10000
row, rows, pntObj = None, None, None
rowCC, rowsCC = None, None
rowADM1, rowsADM1 = None, None
rowFeatCode, rowsFeatCode = None, None
pntObj = arcpy.Point()
# ======================================================
# Create CountryCode/CountryName dictionary
# ======================================================
arcpy.AddMessage("- Reading CountryCode table " + countryCodeTable + "...")
# Create dictionary
countryCodeDict = {}
# Create search cursor
rowsCC = arcpy.SearchCursor(countryCodeTable)
# Loop through cursor and extract values into dictionary
for rowCC in rowsCC:
countryCodeDict[rowCC.Code] = rowCC.Name
# ======================================================
# Create Primary Administrative Code/Administrative Name dictionary and
# Primary Administrative Code/Administrative Class dictionary
# ======================================================
arcpy.AddMessage("- Reading Primary Administrative Code table " + admin1CodeTable + "...")
# Create dictionaries
admin1CodeDict = {}
admin1ClassDict = {}
# Create search cursor
rowsADM1 = arcpy.SearchCursor(admin1CodeTable)
# Loop through cursor and extract values into dictionary
for rowADM1 in rowsADM1:
admin1CodeDict[rowADM1.Code] = rowADM1.Name
admin1ClassDict[rowADM1.Code] = rowADM1.AdminDivisionClass
# ======================================================
# Create Feature Code/Feature Name dictionary
# ======================================================
arcpy.AddMessage("- Reading Feature Code table " + featureCodeTable + "...")
# Create dictionary
featCodeDict = {}
# Create search cursor
rowsFeatCode = arcpy.SearchCursor(featureCodeTable)
# Loop through cursor and extract values into dictionary
for rowFeatCode in rowsFeatCode:
featCodeDict[rowFeatCode.Code] = rowFeatCode.Name
# ======================================================
# Open geonames text file
# ======================================================
# Open geoname file
arcpy.AddMessage("- Opening geoname file " + geonameFilePath + "...")
fileGeoname = open(geonameFilePath, "r")
# Get list of fields in geoname file
for lineGeoname in fileGeoname:
lineCount = lineCount + 1
fileFieldValueList = lineGeoname.split("\t")
fileFieldList = [field.rstrip('\n').upper() for field in fileFieldValueList]
break
# Find any fields in geonames txt file which do not exist in the feature class
fieldsExtra = list(set(fileFieldList) - set(fieldNameList))
if len(fieldsExtra) > 0:
arcpy.AddWarning("Warning: The following fields exist in geonames " \
"file, but do not exist in feature class; these " \
"fields will not be populated: " + str(fieldsExtra))
# ======================================================
# Loop through geonames file and insert features
# ======================================================
# Create insert cursor
rows = arcpy.InsertCursor(featClass)
arcpy.AddMessage("- Creating features (report progress every " + str(reportNum) + " features)...")
for lineGeoname in fileGeoname:
lineCount = lineCount + 1
# Reset variables
lat = ''
long_ = ''
ufi = ''
uni = ''
adm1 = ''
countryCode1 = ''
placeName = ''
featDSGCode = ''
adm1NameAll = ''
adm1Name = ''
adm1ClassAll = ''
adm1Class = ''
mgrs = ''
userValue = ''
# Geoname file is Tab delimited so split line by Tab
fileFieldValueList = lineGeoname.split("\t")
# Create new row
row = rows.newRow()
# Populate feature class fields from text file
fieldIndex = 0
for fieldValue in fileFieldValueList:
# Remove any trailing newline character from field name
# and field value
fieldName = fileFieldList[fieldIndex].rstrip('\n')
fieldValue = fieldValue.rstrip('\n')
if fieldValue <> '':
# Format date value
if fieldName.upper() in ["MODIFY_DATE", "NM_MODIFY_DATE"]:
fieldValue = fieldValue + " 00:00:00 AM"
if fieldName.upper() == "CC1":
country1List = fieldValue.split(",")
countryCode1 = country1List[0]
row.setValue("COUNTRYCODE1", countryCode1)
# Populate country name field
countryName = countryCodeDict.get(countryCode1)
if countryName is None:
row.setNull("COUNTRYNAME1")
else:
row.setValue("COUNTRYNAME1", countryName)
# Extract Latitude and Longitude values to create
# point geometry
if fieldName.upper() == "LAT":
lat = float(fieldValue)
if fieldName.upper() == "LONG":
long_ = float(fieldValue)
if fieldName.upper() == "UFI":
ufi = fieldValue
if fieldName.upper() == "UNI":
uni = fieldValue
if fieldName.upper() == "ADM1":
adm1 = fieldValue
if fieldName.upper() == "FULL_NAME_ND_RO":
placeName = fieldValue
if fieldName.upper() == "DSG":
featDSGCode = fieldValue
if fieldName.upper() == "MGRS":
mgrs = fieldValue
# Populate geodatabase field with text file value
try:
if fieldName.upper() not in ["LAT", "LONG"]:
if fieldName in fieldNameList:
row.setValue(fieldName, fieldValue)
except:
arcpy.AddWarning("Warning: exception setting field: " \
+ fieldName + " to value " + fieldValue + \
" (Input geoname file row number: " \
+ str(lineCount) + ")")
else:
if fieldName in fieldNameList:
row.setNull(fieldName)
fieldIndex = fieldIndex + 1
# Set CountryCode/First-order Administrative Class field value
if countryCode1 <> '' and adm1 <> '':
row.setValue("ADM1CODE", countryCode1 + adm1)
# Populate primary admin field value
adm1NameAll = admin1CodeDict.get(countryCode1 + adm1)
if adm1NameAll is None:
row.setNull("ADM1NAMEALL")
else:
row.setValue("ADM1NAMEALL", adm1NameAll)
## Populate ADM1NAME field with first name in list
#
# Extract first element:
# some admin name have multiple "versions" for
# example BE11 is
# "Brussels-Capital Region [conventional] /
# Brussels Hoofdstedelijk [Dutch] /
# Bruxelles-Capitale [French]"
#
# Extract first value minus "/", "[", "]" and
# contents within brackets
adm1Name = adm1NameAll.split("/")[0].split("[")[0].strip()
row.setValue("ADM1NAME", adm1Name)
userValue = "Principal Admin Division: " + adm1Name
## Populate Admin Division Class field (ADM1CLASS)
adm1ClassAll = admin1ClassDict.get(countryCode1 + adm1)
if adm1ClassAll is None:
row.setNull("ADM1CLASSALL")
row.setNull("ADM1CLASS")
else:
row.setValue("ADM1CLASSALL", adm1ClassAll)
# 'Assemble' the Admin1 Class value
i = adm1ClassAll.find("(")
if i > -1:
# Extract characters before "("
adm1Class = adm1ClassAll[:i].strip()
# Extract characters after ")"
adm1Type = adm1ClassAll[i:].strip()
else:
adm1Class = adm1ClassAll
adm1Type = ''
adm1Class = adm1Class.split("/")[0].split("[")[0].strip()
adm1Class = adm1Class + " " + adm1Type
# Remove trailing space that exists if adm1Type does
# not have a value
adm1Class = adm1Class.strip()
row.setValue("ADM1CLASS", adm1Class)
userValue = userValue + " [" + adm1Class + "]"
row.setValue("USER_FLD", userValue)
# Set Feature Designation Name field value
if featDSGCode <> '':
featDSGName = featCodeDict.get(featDSGCode)
if featDSGName is None:
row.setNull("DSGNAME")
else:
row.setValue("DSGNAME", featDSGName)
# Populate Place name field using the reading order non-diatrictic
row.setValue("PLACENAME", placeName)
# Set lat/long values on point object
pntObj.Y = lat
pntObj.X = long_
# Populate geometry
row.Shape = pntObj
# Insert feature
try:
rows.insertRow(row)
except:
arcpy.AddWarning("Error inserting row: " + str(lineCount))
# Print progress
if lineCount % reportNum == 0:
arcpy.AddMessage("\tCreating feature number: " + str(lineCount))
# Close met file
fileGeoname.close()
# Set Output parameter (required so that script
# tool output can be connected to other model tools)
arcpy.SetParameter(5, featClass)
arcpy.AddMessage("Completed, " + str(lineCount) + " records completed")
except:
# Get the traceback object
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error into a message string
pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n"
# Return python error messages for use in script tool or Python Window
arcpy.AddError(pymsg)
arcpy.AddError(msgs)
finally:
# Regardless of whether the script succeeds or not, delete
# the row and cursor
if row:
del row
if rows:
del rows
if pntObj:
del pntObj
# Close met file
if fileGeoname:
fileGeoname.close()
| |
'''
LICENSING
-------------------------------------------------
Loopa: Arduino-esque event loop app framework.
Copyright (C) 2016 Muterra, Inc.
Contributors
------------
Nick Badger
badg@muterra.io | badg@nickbadger.com | nickbadger.com
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the
Free Software Foundation, Inc.,
51 Franklin Street,
Fifth Floor,
Boston, MA 02110-1301 USA
------------------------------------------------------
'''
import unittest
import threading
import queue
import asyncio
import atexit
from loopa.core import _ThreadHelper
from loopa.core import ManagedTask
from loopa.core import TaskLooper
from loopa.core import TaskCommander
# ###############################################
# Cleanup stuff
# ###############################################
def cleanup():
myloop = asyncio.get_event_loop()
myloop.close()
atexit.register(cleanup)
# ###############################################
# "Paragon of adequacy" test fixtures
# ###############################################
def make_target():
flag = threading.Event()
q = queue.Queue()
def target(args, kwargs):
q.put(args)
q.put(kwargs)
flag.set()
return flag, q, target
class ManagedTaskTester1(ManagedTask):
# Create a default
reoutput = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.flag = threading.Event()
async def task_run(self, *args, **kwargs):
self.reoutput = (args, kwargs)
self.flag.set()
class ManagedTaskTester2(ManagedTask):
# Create a default
output = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.flag1 = threading.Event()
self.flag2 = threading.Event()
async def task_run(self, *args, **kwargs):
try:
self.output = (args, kwargs)
self.flag1.set()
await asyncio.sleep(30)
finally:
self.flag2.set()
class TaskLooperTester1(TaskLooper):
initter = None
runner = None
stopper = None
async def loop_init(self, *args, limit=10, **kwargs):
self.limit = int(limit)
self.initter = (args, kwargs)
self.runner = 0
async def loop_run(self):
# We want to make sure it runs exactly the correct number of times.
# Therefore, always increment, even if above limit.
self.runner += 1
# Call stop exactly once, at the limit
if self.runner == self.limit:
self.stop()
# If we exceed it sufficiently, raise to exit.
elif self.runner >= (2 * self.limit):
raise asyncio.CancelledError()
async def loop_stop(self):
self.stopper = self.initter
class TaskLooperTester2(TaskLooper):
''' Same as above, but cancelled from a different thread.
'''
initter = None
runner = None
stopper = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._dime = threading.Event()
self._nickel = threading.Event()
def stop_on_dime(self):
try:
self._dime.wait()
self.stop_threadsafe_nowait()
finally:
self._nickel.set()
async def loop_init(self, *args, limit=10, **kwargs):
self._breakerworker = threading.Thread(
target=self.stop_on_dime,
daemon=True
)
self._breakerworker.start()
self.limit = int(limit)
self.initter = (args, kwargs)
self.runner = 0
async def loop_run(self):
# We want to make sure it runs exactly the correct number of times.
# Therefore, always increment, even if above limit.
self.runner += 1
# Save exactly one change for the last one, to ensure that we don't
# re-enter the while loop after calling stop.
if self.runner == self.limit:
self._dime.set()
self._nickel.wait()
# If we exceed it sufficiently, raise to exit.
elif self.runner >= (2 * self.limit):
raise asyncio.CancelledError()
async def loop_stop(self):
self.stopper = self.initter
class TaskCommanderTester1(TaskLooper):
''' TaskLooper for testing the TaskCommander.
'''
# ###############################################
# Testing
# ###############################################
class ThreadhelperTest(unittest.TestCase):
def test_thelper(self):
flag, q, target = make_target()
args = [1, 2, 3]
kwargs = {'foo': 'bar'}
thelper = _ThreadHelper(daemon=True)
thelper.set_target(target, args, kwargs)
thelper.start()
flag.wait(timeout=30)
args2 = q.get()
kwargs2 = q.get()
self.assertEqual(args, args2)
self.assertEqual(kwargs, kwargs2)
class ManagedTaskTest(unittest.TestCase):
def test_foreground(self):
# Keep the loop open in case we do any other tests in the foreground
lm = ManagedTaskTester1(threaded=False, reusable_loop=True, debug=True)
args = (1, 2, 3)
kwargs = {'foo': 'bar'}
lm.start(*args, **kwargs)
args2, kwargs2 = lm.reoutput
self.assertEqual(args2, args)
self.assertEqual(kwargs2, kwargs)
def test_background(self):
lm = ManagedTaskTester1(threaded=True, reusable_loop=False, debug=True)
args = (1, 2, 3)
kwargs = {'foo': 'bar'}
lm.start(*args, **kwargs)
lm.flag.wait(timeout=30)
args2, kwargs2 = lm.reoutput
self.assertEqual(args2, args)
self.assertEqual(kwargs2, kwargs)
# Don't call stop, because we want to make sure the loop closes itself
# appropriately. Instead, wait for the shutdown flag.
lm._shutdown_complete_flag.wait(timeout=30)
self.assertTrue(lm._loop.is_closed())
def test_background_stop(self):
lm = ManagedTaskTester2(threaded=True, reusable_loop=False, debug=True)
args = (1, 2, 3)
kwargs = {'foo': 'bar'}
lm.start(*args, **kwargs)
lm.flag1.wait(timeout=30)
# Ensure it stops before the end of the sleep call
lm.stop_threadsafe(timeout=5)
lm.flag2.wait(timeout=5)
args2, kwargs2 = lm.output
self.assertEqual(args2, args)
self.assertEqual(kwargs2, kwargs)
self.assertTrue(lm._loop.is_closed())
class TaskLooperTest(unittest.TestCase):
''' Test the TaskLooper.
'''
def test_self_stop(self):
# Keep the loop open in case we do any other tests in the foreground
lm = TaskLooperTester1(threaded=False, reusable_loop=True, debug=True)
limit = 10
args = (1, 2, 3)
kwargs = {'foo': 'bar'}
lm.start(limit=limit, *args, **kwargs)
args2, kwargs2 = lm.initter
args3, kwargs3 = lm.stopper
self.assertEqual(args2, args)
self.assertEqual(args3, args)
self.assertEqual(kwargs2, kwargs)
self.assertEqual(kwargs3, kwargs)
self.assertEqual(lm.runner, limit)
def test_threaded_stop(self):
# Keep the loop open in case we do any other tests in the foreground
lm = TaskLooperTester2(threaded=True, reusable_loop=False, debug=True)
limit = 10
args = (1, 2, 3)
kwargs = {'foo': 'bar'}
lm.start(limit=limit, *args, **kwargs)
lm.stop_on_dime()
lm._shutdown_complete_flag.wait(timeout=10)
args2, kwargs2 = lm.initter
args3, kwargs3 = lm.stopper
self.assertEqual(args2, args)
self.assertEqual(args3, args)
self.assertEqual(kwargs2, kwargs)
self.assertEqual(kwargs3, kwargs)
self.assertEqual(lm.runner, limit)
class TaskCommanderTest(unittest.TestCase):
def test_simple_nostop(self):
tm1 = ManagedTaskTester1()
tm2 = ManagedTaskTester1()
com = TaskCommander(reusable_loop=True, debug=True)
args = (1, 2, 3)
kwargs = {'foo': 'bar'}
com.register_task(tm1, *args, **kwargs)
com.register_task(tm2, *args, **kwargs)
com.start()
tm1.flag.wait(timeout=30)
tm2.flag.wait(timeout=30)
args2, kwargs2 = tm1.reoutput
self.assertEqual(args2, args)
self.assertEqual(kwargs2, kwargs)
args3, kwargs3 = tm2.reoutput
self.assertEqual(args3, args)
self.assertEqual(kwargs3, kwargs)
# Don't call stop, because we want to make sure the loop closes itself
# appropriately. Instead, wait for the shutdown flag.
com._shutdown_complete_flag.wait(timeout=30)
def test_simple_stop(self):
tm1 = ManagedTaskTester2()
tm2 = ManagedTaskTester2()
com = TaskCommander(threaded=True, reusable_loop=False, debug=True)
args = (1, 2, 3)
kwargs = {'foo': 'bar'}
com.register_task(tm1, *args, **kwargs)
com.register_task(tm2, *args, **kwargs)
com.start()
tm1.flag1.wait(timeout=30)
tm2.flag1.wait(timeout=30)
# Ensure it stops before the end of the sleep call
com.stop_threadsafe(timeout=5)
tm1.flag2.wait(timeout=5)
tm2.flag2.wait(timeout=5)
args2, kwargs2 = tm1.output
self.assertEqual(args2, args)
self.assertEqual(kwargs2, kwargs)
args3, kwargs3 = tm2.output
self.assertEqual(args3, args)
self.assertEqual(kwargs3, kwargs)
# Don't call stop, because we want to make sure the loop closes itself
# appropriately. Instead, wait for the shutdown flag.
com._shutdown_complete_flag.wait(timeout=30)
def test_looper_nostop(self):
tm1 = TaskLooperTester1()
tm2 = TaskLooperTester1()
com = TaskCommander(reusable_loop=True, debug=True)
limit = 10
args = (1, 2, 3)
kwargs = {'foo': 'bar'}
com.register_task(tm1, *args, limit=limit, **kwargs)
com.register_task(tm2, *args, limit=limit, **kwargs)
com.start()
args2, kwargs2 = tm1.initter
args3, kwargs3 = tm1.stopper
self.assertEqual(args2, args)
self.assertEqual(args3, args)
self.assertEqual(kwargs2, kwargs)
self.assertEqual(kwargs3, kwargs)
self.assertEqual(tm1.runner, limit)
args2, kwargs2 = tm2.initter
args3, kwargs3 = tm2.stopper
self.assertEqual(args2, args)
self.assertEqual(args3, args)
self.assertEqual(kwargs2, kwargs)
self.assertEqual(kwargs3, kwargs)
self.assertEqual(tm2.runner, limit)
# Don't call stop, because we want to make sure the loop closes itself
# appropriately. Instead, wait for the shutdown flag.
com._shutdown_complete_flag.wait(timeout=30)
if __name__ == "__main__":
unittest.main()
| |
""" miscellaneous sorting / groupby utilities """
import numpy as np
from pandas.compat import long, string_types, PY3
from pandas.core.dtypes.common import (
ensure_platform_int,
ensure_int64,
is_list_like,
is_categorical_dtype)
from pandas.core.dtypes.cast import infer_dtype_from_array
from pandas.core.dtypes.missing import isna
import pandas.core.algorithms as algorithms
from pandas._libs import lib, algos, hashtable
from pandas._libs.hashtable import unique_label_indices
_INT64_MAX = np.iinfo(np.int64).max
def get_group_index(labels, shape, sort, xnull):
"""
For the particular label_list, gets the offsets into the hypothetical list
representing the totally ordered cartesian product of all possible label
combinations, *as long as* this space fits within int64 bounds;
otherwise, though group indices identify unique combinations of
labels, they cannot be deconstructed.
- If `sort`, rank of returned ids preserve lexical ranks of labels.
i.e. returned id's can be used to do lexical sort on labels;
- If `xnull` nulls (-1 labels) are passed through.
Parameters
----------
labels: sequence of arrays
Integers identifying levels at each location
shape: sequence of ints same length as labels
Number of unique levels at each location
sort: boolean
If the ranks of returned ids should match lexical ranks of labels
xnull: boolean
If true nulls are excluded. i.e. -1 values in the labels are
passed through
Returns
-------
An array of type int64 where two elements are equal if their corresponding
labels are equal at all location.
"""
def _int64_cut_off(shape):
acc = long(1)
for i, mul in enumerate(shape):
acc *= long(mul)
if not acc < _INT64_MAX:
return i
return len(shape)
def maybe_lift(lab, size):
# promote nan values (assigned -1 label in lab array)
# so that all output values are non-negative
return (lab + 1, size + 1) if (lab == -1).any() else (lab, size)
labels = map(ensure_int64, labels)
if not xnull:
labels, shape = map(list, zip(*map(maybe_lift, labels, shape)))
labels = list(labels)
shape = list(shape)
# Iteratively process all the labels in chunks sized so less
# than _INT64_MAX unique int ids will be required for each chunk
while True:
# how many levels can be done without overflow:
nlev = _int64_cut_off(shape)
# compute flat ids for the first `nlev` levels
stride = np.prod(shape[1:nlev], dtype='i8')
out = stride * labels[0].astype('i8', subok=False, copy=False)
for i in range(1, nlev):
if shape[i] == 0:
stride = 0
else:
stride //= shape[i]
out += labels[i] * stride
if xnull: # exclude nulls
mask = labels[0] == -1
for lab in labels[1:nlev]:
mask |= lab == -1
out[mask] = -1
if nlev == len(shape): # all levels done!
break
# compress what has been done so far in order to avoid overflow
# to retain lexical ranks, obs_ids should be sorted
comp_ids, obs_ids = compress_group_index(out, sort=sort)
labels = [comp_ids] + labels[nlev:]
shape = [len(obs_ids)] + shape[nlev:]
return out
def get_compressed_ids(labels, sizes):
"""
Group_index is offsets into cartesian product of all possible labels. This
space can be huge, so this function compresses it, by computing offsets
(comp_ids) into the list of unique labels (obs_group_ids).
Parameters
----------
labels : list of label arrays
sizes : list of size of the levels
Returns
-------
tuple of (comp_ids, obs_group_ids)
"""
ids = get_group_index(labels, sizes, sort=True, xnull=False)
return compress_group_index(ids, sort=True)
def is_int64_overflow_possible(shape):
the_prod = long(1)
for x in shape:
the_prod *= long(x)
return the_prod >= _INT64_MAX
def decons_group_index(comp_labels, shape):
# reconstruct labels
if is_int64_overflow_possible(shape):
# at some point group indices are factorized,
# and may not be deconstructed here! wrong path!
raise ValueError('cannot deconstruct factorized group indices!')
label_list = []
factor = 1
y = 0
x = comp_labels
for i in reversed(range(len(shape))):
labels = (x - y) % (factor * shape[i]) // factor
np.putmask(labels, comp_labels < 0, -1)
label_list.append(labels)
y = labels * factor
factor *= shape[i]
return label_list[::-1]
def decons_obs_group_ids(comp_ids, obs_ids, shape, labels, xnull):
"""
reconstruct labels from observed group ids
Parameters
----------
xnull: boolean,
if nulls are excluded; i.e. -1 labels are passed through
"""
if not xnull:
lift = np.fromiter(((a == -1).any() for a in labels), dtype='i8')
shape = np.asarray(shape, dtype='i8') + lift
if not is_int64_overflow_possible(shape):
# obs ids are deconstructable! take the fast route!
out = decons_group_index(obs_ids, shape)
return out if xnull or not lift.any() \
else [x - y for x, y in zip(out, lift)]
i = unique_label_indices(comp_ids)
i8copy = lambda a: a.astype('i8', subok=False, copy=True)
return [i8copy(lab[i]) for lab in labels]
def indexer_from_factorized(labels, shape, compress=True):
ids = get_group_index(labels, shape, sort=True, xnull=False)
if not compress:
ngroups = (ids.size and ids.max()) + 1
else:
ids, obs = compress_group_index(ids, sort=True)
ngroups = len(obs)
return get_group_index_sorter(ids, ngroups)
def lexsort_indexer(keys, orders=None, na_position='last'):
from pandas.core.arrays import Categorical
labels = []
shape = []
if isinstance(orders, bool):
orders = [orders] * len(keys)
elif orders is None:
orders = [True] * len(keys)
for key, order in zip(keys, orders):
# we are already a Categorical
if is_categorical_dtype(key):
c = key
# create the Categorical
else:
c = Categorical(key, ordered=True)
if na_position not in ['last', 'first']:
raise ValueError('invalid na_position: {!r}'.format(na_position))
n = len(c.categories)
codes = c.codes.copy()
mask = (c.codes == -1)
if order: # ascending
if na_position == 'last':
codes = np.where(mask, n, codes)
elif na_position == 'first':
codes += 1
else: # not order means descending
if na_position == 'last':
codes = np.where(mask, n, n - codes - 1)
elif na_position == 'first':
codes = np.where(mask, 0, n - codes)
if mask.any():
n += 1
shape.append(n)
labels.append(codes)
return indexer_from_factorized(labels, shape)
def nargsort(items, kind='quicksort', ascending=True, na_position='last'):
"""
This is intended to be a drop-in replacement for np.argsort which
handles NaNs. It adds ascending and na_position parameters.
GH #6399, #5231
"""
# specially handle Categorical
if is_categorical_dtype(items):
if na_position not in {'first', 'last'}:
raise ValueError('invalid na_position: {!r}'.format(na_position))
mask = isna(items)
cnt_null = mask.sum()
sorted_idx = items.argsort(ascending=ascending, kind=kind)
if ascending and na_position == 'last':
# NaN is coded as -1 and is listed in front after sorting
sorted_idx = np.roll(sorted_idx, -cnt_null)
elif not ascending and na_position == 'first':
# NaN is coded as -1 and is listed in the end after sorting
sorted_idx = np.roll(sorted_idx, cnt_null)
return sorted_idx
items = np.asanyarray(items)
idx = np.arange(len(items))
mask = isna(items)
non_nans = items[~mask]
non_nan_idx = idx[~mask]
nan_idx = np.nonzero(mask)[0]
if not ascending:
non_nans = non_nans[::-1]
non_nan_idx = non_nan_idx[::-1]
indexer = non_nan_idx[non_nans.argsort(kind=kind)]
if not ascending:
indexer = indexer[::-1]
# Finally, place the NaNs at the end or the beginning according to
# na_position
if na_position == 'last':
indexer = np.concatenate([indexer, nan_idx])
elif na_position == 'first':
indexer = np.concatenate([nan_idx, indexer])
else:
raise ValueError('invalid na_position: {!r}'.format(na_position))
return indexer
class _KeyMapper(object):
"""
Ease my suffering. Map compressed group id -> key tuple
"""
def __init__(self, comp_ids, ngroups, levels, labels):
self.levels = levels
self.labels = labels
self.comp_ids = comp_ids.astype(np.int64)
self.k = len(labels)
self.tables = [hashtable.Int64HashTable(ngroups)
for _ in range(self.k)]
self._populate_tables()
def _populate_tables(self):
for labs, table in zip(self.labels, self.tables):
table.map(self.comp_ids, labs.astype(np.int64))
def get_key(self, comp_id):
return tuple(level[table.get_item(comp_id)]
for table, level in zip(self.tables, self.levels))
def get_flattened_iterator(comp_ids, ngroups, levels, labels):
# provide "flattened" iterator for multi-group setting
mapper = _KeyMapper(comp_ids, ngroups, levels, labels)
return [mapper.get_key(i) for i in range(ngroups)]
def get_indexer_dict(label_list, keys):
""" return a diction of {labels} -> {indexers} """
shape = list(map(len, keys))
group_index = get_group_index(label_list, shape, sort=True, xnull=True)
ngroups = ((group_index.size and group_index.max()) + 1) \
if is_int64_overflow_possible(shape) \
else np.prod(shape, dtype='i8')
sorter = get_group_index_sorter(group_index, ngroups)
sorted_labels = [lab.take(sorter) for lab in label_list]
group_index = group_index.take(sorter)
return lib.indices_fast(sorter, group_index, keys, sorted_labels)
# ----------------------------------------------------------------------
# sorting levels...cleverly?
def get_group_index_sorter(group_index, ngroups):
"""
algos.groupsort_indexer implements `counting sort` and it is at least
O(ngroups), where
ngroups = prod(shape)
shape = map(len, keys)
that is, linear in the number of combinations (cartesian product) of unique
values of groupby keys. This can be huge when doing multi-key groupby.
np.argsort(kind='mergesort') is O(count x log(count)) where count is the
length of the data-frame;
Both algorithms are `stable` sort and that is necessary for correctness of
groupby operations. e.g. consider:
df.groupby(key)[col].transform('first')
"""
count = len(group_index)
alpha = 0.0 # taking complexities literally; there may be
beta = 1.0 # some room for fine-tuning these parameters
do_groupsort = (count > 0 and ((alpha + beta * ngroups) <
(count * np.log(count))))
if do_groupsort:
sorter, _ = algos.groupsort_indexer(ensure_int64(group_index),
ngroups)
return ensure_platform_int(sorter)
else:
return group_index.argsort(kind='mergesort')
def compress_group_index(group_index, sort=True):
"""
Group_index is offsets into cartesian product of all possible labels. This
space can be huge, so this function compresses it, by computing offsets
(comp_ids) into the list of unique labels (obs_group_ids).
"""
size_hint = min(len(group_index), hashtable._SIZE_HINT_LIMIT)
table = hashtable.Int64HashTable(size_hint)
group_index = ensure_int64(group_index)
# note, group labels come out ascending (ie, 1,2,3 etc)
comp_ids, obs_group_ids = table.get_labels_groupby(group_index)
if sort and len(obs_group_ids) > 0:
obs_group_ids, comp_ids = _reorder_by_uniques(obs_group_ids, comp_ids)
return comp_ids, obs_group_ids
def _reorder_by_uniques(uniques, labels):
# sorter is index where elements ought to go
sorter = uniques.argsort()
# reverse_indexer is where elements came from
reverse_indexer = np.empty(len(sorter), dtype=np.int64)
reverse_indexer.put(sorter, np.arange(len(sorter)))
mask = labels < 0
# move labels to right locations (ie, unsort ascending labels)
labels = algorithms.take_nd(reverse_indexer, labels, allow_fill=False)
np.putmask(labels, mask, -1)
# sort observed ids
uniques = algorithms.take_nd(uniques, sorter, allow_fill=False)
return uniques, labels
def safe_sort(values, labels=None, na_sentinel=-1, assume_unique=False):
"""
Sort ``values`` and reorder corresponding ``labels``.
``values`` should be unique if ``labels`` is not None.
Safe for use with mixed types (int, str), orders ints before strs.
.. versionadded:: 0.19.0
Parameters
----------
values : list-like
Sequence; must be unique if ``labels`` is not None.
labels : list_like
Indices to ``values``. All out of bound indices are treated as
"not found" and will be masked with ``na_sentinel``.
na_sentinel : int, default -1
Value in ``labels`` to mark "not found".
Ignored when ``labels`` is None.
assume_unique : bool, default False
When True, ``values`` are assumed to be unique, which can speed up
the calculation. Ignored when ``labels`` is None.
Returns
-------
ordered : ndarray
Sorted ``values``
new_labels : ndarray
Reordered ``labels``; returned when ``labels`` is not None.
Raises
------
TypeError
* If ``values`` is not list-like or if ``labels`` is neither None
nor list-like
* If ``values`` cannot be sorted
ValueError
* If ``labels`` is not None and ``values`` contain duplicates.
"""
if not is_list_like(values):
raise TypeError("Only list-like objects are allowed to be passed to"
"safe_sort as values")
if not isinstance(values, np.ndarray):
# don't convert to string types
dtype, _ = infer_dtype_from_array(values)
values = np.asarray(values, dtype=dtype)
def sort_mixed(values):
# order ints before strings, safe in py3
str_pos = np.array([isinstance(x, string_types) for x in values],
dtype=bool)
nums = np.sort(values[~str_pos])
strs = np.sort(values[str_pos])
return np.concatenate([nums, np.asarray(strs, dtype=object)])
sorter = None
if PY3 and lib.infer_dtype(values) == 'mixed-integer':
# unorderable in py3 if mixed str/int
ordered = sort_mixed(values)
else:
try:
sorter = values.argsort()
ordered = values.take(sorter)
except TypeError:
# try this anyway
ordered = sort_mixed(values)
# labels:
if labels is None:
return ordered
if not is_list_like(labels):
raise TypeError("Only list-like objects or None are allowed to be"
"passed to safe_sort as labels")
labels = ensure_platform_int(np.asarray(labels))
from pandas import Index
if not assume_unique and not Index(values).is_unique:
raise ValueError("values should be unique if labels is not None")
if sorter is None:
# mixed types
(hash_klass, _), values = algorithms._get_data_algo(
values, algorithms._hashtables)
t = hash_klass(len(values))
t.map_locations(values)
sorter = ensure_platform_int(t.lookup(ordered))
reverse_indexer = np.empty(len(sorter), dtype=np.int_)
reverse_indexer.put(sorter, np.arange(len(sorter)))
mask = (labels < -len(values)) | (labels >= len(values)) | \
(labels == na_sentinel)
# (Out of bound indices will be masked with `na_sentinel` next, so we may
# deal with them here without performance loss using `mode='wrap'`.)
new_labels = reverse_indexer.take(labels, mode='wrap')
np.putmask(new_labels, mask, na_sentinel)
return ordered, ensure_platform_int(new_labels)
| |
# -*- coding: utf-8 -*-
'''
Service support for RHEL-based systems, including support for both upstart and sysvinit
'''
# Import python libs
import glob
import logging
import os
import stat
# Import salt libs
import salt.utils
from salt.modules import state_std
log = logging.getLogger(__name__)
__func_alias__ = {
'reload_': 'reload'
}
# Define the module's virtual name
__virtualname__ = 'service'
# Import upstart module if needed
HAS_UPSTART = False
if salt.utils.which('initctl'):
try:
# Don't re-invent the wheel, import the helper functions from the
# upstart module.
from salt.modules.upstart \
import _upstart_enable, _upstart_disable, _upstart_is_enabled
except Exception as exc:
log.error('Unable to import helper functions from '
'salt.modules.upstart: {0}'.format(exc))
else:
HAS_UPSTART = True
def __virtual__():
'''
Only work on systems which default to systemd
'''
# Enable on these platforms only.
enable = set((
'RedHat',
'CentOS',
'ScientificLinux',
'CloudLinux',
'Amazon',
'Fedora',
'ALT',
'OEL',
'SUSE Enterprise Server',
'McAfee OS Server'
))
if 'os' in __grains__ and __grains__['os'] in enable:
try:
osrelease = float(__grains__.get('osrelease', 0))
except ValueError:
return False
if __grains__['os'] == 'Fedora':
if __grains__.get('osrelease', 0) > 15:
return False
if __grains__['os'] == 'RedHat':
if osrelease >= 7:
return False
return __virtualname__
return False
def _runlevel():
'''
Return the current runlevel
'''
out = __salt__['cmd.run']('/sbin/runlevel')
# unknown will be returned while inside a kickstart environment, since
# this is usually a server deployment it should be safe to assume runlevel
# 3. If not all service related states will throw an out of range
# exception here which will cause other functions to fail.
if 'unknown' in out:
return '3'
else:
return out.split()[1]
def _chkconfig_add(name):
'''
Run 'chkconfig --add' for a service whose script is installed in
/etc/init.d. The service is initially configured to be disabled at all
run-levels.
'''
cmd = '/sbin/chkconfig --add {0}'.format(name)
if __salt__['cmd.retcode'](cmd) == 0:
log.info('Added initscript "{0}" to chkconfig'.format(name))
return True
else:
log.error('Unable to add initscript "{0}" to chkconfig'.format(name))
return False
def _service_is_upstart(name):
'''
Return True if the service is an upstart service, otherwise return False.
'''
return HAS_UPSTART and os.path.exists('/etc/init/{0}.conf'.format(name))
def _service_is_sysv(name):
'''
Return True if the service is a System V service (includes those managed by
chkconfig); otherwise return False.
'''
try:
# Look for user-execute bit in file mode.
return bool(os.stat(
os.path.join('/etc/init.d', name)).st_mode & stat.S_IXUSR)
except OSError:
return False
def _service_is_chkconfig(name):
'''
Return True if the service is managed by chkconfig.
'''
cmdline = '/sbin/chkconfig --list {0}'.format(name)
return __salt__['cmd.retcode'](cmdline, ignore_retcode=True) == 0
def _sysv_is_enabled(name, runlevel=None):
'''
Return True if the sysv (or chkconfig) service is enabled for the specified
runlevel; otherwise return False. If `runlevel` is None, then use the
current runlevel.
'''
# Try chkconfig first.
result = _chkconfig_is_enabled(name, runlevel)
if result:
return True
if runlevel is None:
runlevel = _runlevel()
return (
len(glob.glob('/etc/rc.d/rc{0}.d/S??{1}'.format(runlevel, name))) > 0)
def _chkconfig_is_enabled(name, runlevel=None):
'''
Return True if the service is enabled according to chkconfig; otherwise
return False. If `runlevel` is None, then use the current runlevel.
'''
cmdline = '/sbin/chkconfig --list {0}'.format(name)
result = __salt__['cmd.run_all'](cmdline)
if result['retcode'] == 0:
cols = result['stdout'].splitlines()[0].split()
try:
if cols[0].strip(':') == name:
if runlevel is None:
runlevel = _runlevel()
if len(cols) > 3 and '{0}:on'.format(runlevel) in cols:
return True
elif len(cols) < 3 and cols[1] and cols[1] == 'on':
return True
except IndexError:
pass
return False
def _sysv_enable(name):
'''
Enable the named sysv service to start at boot. The service will be enabled
using chkconfig with default run-levels if the service is chkconfig
compatible. If chkconfig is not available, then this will fail.
'''
if not _service_is_chkconfig(name) and not _chkconfig_add(name):
return False
cmd = '/sbin/chkconfig {0} on'.format(name)
return not __salt__['cmd.retcode'](cmd)
def _sysv_disable(name):
'''
Disable the named sysv service from starting at boot. The service will be
disabled using chkconfig with default run-levels if the service is chkconfig
compatible; otherwise, the service will be disabled for the current
run-level only.
'''
if not _service_is_chkconfig(name) and not _chkconfig_add(name):
return False
cmd = '/sbin/chkconfig {0} off'.format(name)
return not __salt__['cmd.retcode'](cmd)
def _upstart_services():
'''
Return list of upstart services.
'''
if HAS_UPSTART:
return [os.path.basename(name)[:-5]
for name in glob.glob('/etc/init/*.conf')]
else:
return []
def _sysv_services():
'''
Return list of sysv services.
'''
ret = []
return [name for name in os.listdir('/etc/init.d')
if _service_is_sysv(name)]
def get_enabled(limit=''):
'''
Return the enabled services. Use the ``limit`` param to restrict results
to services of that type.
CLI Examples:
.. code-block:: bash
salt '*' service.get_enabled
salt '*' service.get_enabled limit=upstart
salt '*' service.get_enabled limit=sysvinit
'''
limit = limit.lower()
if limit == 'upstart':
return sorted(name for name in _upstart_services()
if _upstart_is_enabled(name))
elif limit == 'sysvinit':
runlevel = _runlevel()
return sorted(name for name in _sysv_services()
if _sysv_is_enabled(name, runlevel))
else:
runlevel = _runlevel()
return sorted(
[name for name in _upstart_services()
if _upstart_is_enabled(name)]
+ [name for name in _sysv_services()
if _sysv_is_enabled(name, runlevel)])
def get_disabled(limit=''):
'''
Return the disabled services. Use the ``limit`` param to restrict results
to services of that type.
CLI Example:
.. code-block:: bash
salt '*' service.get_disabled
salt '*' service.get_disabled limit=upstart
salt '*' service.get_disabled limit=sysvinit
'''
limit = limit.lower()
if limit == 'upstart':
return sorted(name for name in _upstart_services()
if not _upstart_is_enabled(name))
elif limit == 'sysvinit':
runlevel = _runlevel()
return sorted(name for name in _sysv_services()
if not _sysv_is_enabled(name, runlevel))
else:
runlevel = _runlevel()
return sorted(
[name for name in _upstart_services()
if not _upstart_is_enabled(name)]
+ [name for name in _sysv_services()
if not _sysv_is_enabled(name, runlevel)])
def get_all(limit=''):
'''
Return all installed services. Use the ``limit`` param to restrict results
to services of that type.
CLI Example:
.. code-block:: bash
salt '*' service.get_all
salt '*' service.get_all limit=upstart
salt '*' service.get_all limit=sysvinit
'''
limit = limit.lower()
if limit == 'upstart':
return sorted(_upstart_services())
elif limit == 'sysvinit':
return sorted(_sysv_services())
else:
return sorted(_sysv_services() + _upstart_services())
def available(name, limit=''):
'''
Return True if the named service is available. Use the ``limit`` param to
restrict results to services of that type.
CLI Examples:
.. code-block:: bash
salt '*' service.available sshd
salt '*' service.available sshd limit=upstart
salt '*' service.available sshd limit=sysvinit
'''
if limit == 'upstart':
return _service_is_upstart(name)
elif limit == 'sysvinit':
return _service_is_sysv(name)
else:
return _service_is_upstart(name) or _service_is_sysv(name) or _service_is_chkconfig(name)
def missing(name, limit=''):
'''
The inverse of service.available.
Return True if the named service is not available. Use the ``limit`` param to
restrict results to services of that type.
CLI Examples:
.. code-block:: bash
salt '*' service.missing sshd
salt '*' service.missing sshd limit=upstart
salt '*' service.missing sshd limit=sysvinit
'''
if limit == 'upstart':
return not _service_is_upstart(name)
elif limit == 'sysvinit':
return not _service_is_sysv(name)
else:
if _service_is_upstart(name) or _service_is_sysv(name):
return False
else:
return True
def start(name, **kwargs):
'''
Start the specified service
CLI Example:
.. code-block:: bash
salt '*' service.start <service name>
'''
if _service_is_upstart(name):
cmd = 'start {0}'.format(name)
else:
cmd = '/sbin/service {0} start'.format(name)
result = __salt__['cmd.run_stdall'](cmd)
state_std(kwargs, result)
return not result['retcode']
def stop(name, **kwargs):
'''
Stop the specified service
CLI Example:
.. code-block:: bash
salt '*' service.stop <service name>
'''
if _service_is_upstart(name):
cmd = 'stop {0}'.format(name)
else:
cmd = '/sbin/service {0} stop'.format(name)
result = __salt__['cmd.run_stdall'](cmd)
state_std(kwargs, result)
return not result['retcode']
def restart(name, **kwargs):
'''
Restart the named service
CLI Example:
.. code-block:: bash
salt '*' service.restart <service name>
'''
if _service_is_upstart(name):
cmd = 'restart {0}'.format(name)
else:
cmd = '/sbin/service {0} restart'.format(name)
result = __salt__['cmd.run_stdall'](cmd)
state_std(kwargs, result)
return not result['retcode']
def reload_(name, **kwargs):
'''
Reload the named service
CLI Example:
.. code-block:: bash
salt '*' service.reload <service name>
'''
if _service_is_upstart(name):
cmd = 'reload {0}'.format(name)
else:
cmd = '/sbin/service {0} reload'.format(name)
result = __salt__['cmd.run_stdall'](cmd)
state_std(kwargs, result)
return not result['retcode']
def status(name, sig=None):
'''
Return the status for a service, returns a bool whether the service is
running.
CLI Example:
.. code-block:: bash
salt '*' service.status <service name>
'''
if _service_is_upstart(name):
cmd = 'status {0}'.format(name)
return 'start/running' in __salt__['cmd.run'](cmd)
if sig:
return bool(__salt__['status.pid'](sig))
cmd = '/sbin/service {0} status'.format(name)
return __salt__['cmd.retcode'](cmd, ignore_retcode=True) == 0
def enable(name, **kwargs):
'''
Enable the named service to start at boot
CLI Example:
.. code-block:: bash
salt '*' service.enable <service name>
'''
if _service_is_upstart(name):
return _upstart_enable(name)
else:
return _sysv_enable(name)
def disable(name, **kwargs):
'''
Disable the named service to start at boot
CLI Example:
.. code-block:: bash
salt '*' service.disable <service name>
'''
if _service_is_upstart(name):
return _upstart_disable(name)
else:
return _sysv_disable(name)
def enabled(name):
'''
Check to see if the named service is enabled to start on boot
CLI Example:
.. code-block:: bash
salt '*' service.enabled <service name>
'''
if _service_is_upstart(name):
return _upstart_is_enabled(name)
else:
return _sysv_is_enabled(name)
def disabled(name):
'''
Check to see if the named service is disabled to start on boot
CLI Example:
.. code-block:: bash
salt '*' service.disabled <service name>
'''
if _service_is_upstart(name):
return not _upstart_is_enabled(name)
else:
return not _sysv_is_enabled(name)
| |
'''
VKeyboard
=========
.. image:: images/vkeyboard.jpg
:align: right
.. versionadded:: 1.0.8
VKeyboard is an onscreen keyboard for Kivy. Its operation is intended to be
transparent to the user. Using the widget directly is NOT recommended. Read the
section `Request keyboard`_ first.
Modes
-----
This virtual keyboard has a docked and free mode:
* docked mode (:attr:`VKeyboard.docked` = True)
Generally used when only one person is using the computer, like a tablet or
personal computer etc.
* free mode: (:attr:`VKeyboard.docked` = False)
Mostly for multitouch surfaces. This mode allows multiple virtual
keyboards to be used on the screen.
If the docked mode changes, you need to manually call
:meth:`VKeyboard.setup_mode` otherwise the change will have no impact.
During that call, the VKeyboard, implemented on top of a
:class:`~kivy.uix.scatter.Scatter`, will change the
behavior of the scatter and position the keyboard near the target (if target
and docked mode is set).
Layouts
-------
The virtual keyboard is able to load a custom layout. If you create a new
layout and put the JSON in :file:`<kivy_data_dir>/keyboards/<layoutid>.json`,
you can load it by setting :attr:`VKeyboard.layout` to your layoutid.
The JSON must be structured like this::
{
"title": "Title of your layout",
"description": "Description of your layout",
"cols": 15,
"rows": 5,
...
}
Then, you need to describe the keys in each row, for either a "normal",
"shift" or a "special" (added in version 1.9.0) mode. Keys for this row
data must be named `normal_<row>`, `shift_<row>` and `special_<row>`.
Replace `row` with the row number.
Inside each row, you will describe the key. A key is a 4 element list in
the format::
[ <text displayed on the keyboard>, <text to put when the key is pressed>,
<text that represents the keycode>, <size of cols> ]
Here are example keys::
# f key
["f", "f", "f", 1]
# capslock
["\u21B9", "\t", "tab", 1.5]
Finally, complete the JSON::
{
...
"normal_1": [
["`", "`", "`", 1], ["1", "1", "1", 1], ["2", "2", "2", 1],
["3", "3", "3", 1], ["4", "4", "4", 1], ["5", "5", "5", 1],
["6", "6", "6", 1], ["7", "7", "7", 1], ["8", "8", "8", 1],
["9", "9", "9", 1], ["0", "0", "0", 1], ["+", "+", "+", 1],
["=", "=", "=", 1], ["\u232b", null, "backspace", 2]
],
"shift_1": [ ... ],
"normal_2": [ ... ],
"special_2": [ ... ],
...
}
Request Keyboard
----------------
The instantiation of the virtual keyboard is controlled by the configuration.
Check `keyboard_mode` and `keyboard_layout` in the :doc:`api-kivy.config`.
If you intend to create a widget that requires a keyboard, do not use the
virtual keyboard directly, but prefer to use the best method available on
the platform. Check the :meth:`~kivy.core.window.WindowBase.request_keyboard`
method in the :doc:`api-kivy.core.window`.
If you want a specific layout when you request the keyboard, you should write
something like this (from 1.8.0, numeric.json can be in the same directory as
your main.py)::
keyboard = Window.request_keyboard(
self._keyboard_close, self)
if keyboard.widget:
vkeyboard = self._keyboard.widget
vkeyboard.layout = 'numeric.json'
'''
__all__ = ('VKeyboard', )
from kivy import kivy_data_dir
from kivy.vector import Vector
from kivy.config import Config
from kivy.uix.scatter import Scatter
from kivy.uix.label import Label
from kivy.properties import ObjectProperty, NumericProperty, StringProperty, \
BooleanProperty, DictProperty, OptionProperty, ListProperty
from kivy.logger import Logger
from kivy.graphics import Color, BorderImage, Canvas
from kivy.core.image import Image
from kivy.resources import resource_find
from kivy.clock import Clock
from os.path import join, splitext, basename
from os import listdir
from json import loads
default_layout_path = join(kivy_data_dir, 'keyboards')
class VKeyboard(Scatter):
'''
VKeyboard is an onscreen keyboard with multitouch support.
Its layout is entirely customizable and you can switch between available
layouts using a button in the bottom right of the widget.
:Events:
`on_key_down`: keycode, internal, modifiers
Fired when the keyboard received a key down event (key press).
`on_key_up`: keycode, internal, modifiers
Fired when the keyboard received a key up event (key release).
'''
target = ObjectProperty(None, allownone=True)
'''Target widget associated with the VKeyboard. If set, it will be used to
send keyboard events. If the VKeyboard mode is "free", it will also be used
to set the initial position.
:attr:`target` is an :class:`~kivy.properties.ObjectProperty` instance and
defaults to None.
'''
callback = ObjectProperty(None, allownone=True)
'''Callback can be set to a function that will be called if the
VKeyboard is closed by the user.
:attr:`target` is an :class:`~kivy.properties.ObjectProperty` instance and
defaults to None.
'''
layout = StringProperty(None)
'''Layout to use for the VKeyboard. By default, it will be the
layout set in the configuration, according to the `keyboard_layout`
in `[kivy]` section.
.. versionchanged:: 1.8.0
If layout is a .json filename, it will loaded and added to the
available_layouts.
:attr:`layout` is a :class:`~kivy.properties.StringProperty` and defaults
to None.
'''
layout_path = StringProperty(default_layout_path)
'''Path from which layouts are read.
:attr:`layout` is a :class:`~kivy.properties.StringProperty` and
defaults to :file:`<kivy_data_dir>/keyboards/`
'''
available_layouts = DictProperty({})
'''Dictionary of all available layouts. Keys are the layout ID, and the
value is the JSON (translated into a Python object).
:attr:`available_layouts` is a :class:`~kivy.properties.DictProperty` and
defaults to {}.
'''
docked = BooleanProperty(False)
'''Indicate whether the VKeyboard is docked on the screen or not. If you
change it, you must manually call :meth:`setup_mode` otherwise it will have
no impact. If the VKeyboard is created by the Window, the docked mode will
be automatically set by the configuration, using the `keyboard_mode` token
in `[kivy]` section.
:attr:`docked` is a :class:`~kivy.properties.BooleanProperty` and defaults
to False.
'''
margin_hint = ListProperty([.05, .06, .05, .06])
'''Margin hint, used as spacing between keyboard background and keys
content. The margin is composed of four values, between 0 and 1::
margin_hint = [top, right, bottom, left]
The margin hints will be multiplied by width and height, according to their
position.
:attr:`margin_hint` is a :class:`~kivy.properties.ListProperty` and
defaults to [.05, .06, .05, .06]
'''
key_margin = ListProperty([2, 2, 2, 2])
'''Key margin, used to create space between keys. The margin is composed of
four values, in pixels::
key_margin = [top, right, bottom, left]
:attr:`key_margin` is a :class:`~kivy.properties.ListProperty` and defaults
to [2, 2, 2, 2]
'''
background_color = ListProperty([1, 1, 1, 1])
'''Background color, in the format (r, g, b, a). If a background is
set, the color will be combined with the background texture.
:attr:`background_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [1, 1, 1, 1].
'''
background = StringProperty(
'atlas://data/images/defaulttheme/vkeyboard_background')
'''Filename of the background image.
:attr:`background` a :class:`~kivy.properties.StringProperty` and defaults
to :file:`atlas://data/images/defaulttheme/vkeyboard_background`.
'''
background_disabled = StringProperty(
'atlas://data/images/defaulttheme/vkeyboard_disabled_background')
'''Filename of the background image when vkeyboard is disabled.
.. versionadded:: 1.8.0
:attr:`background_disabled` is a
:class:`~kivy.properties.StringProperty` and defaults to
:file:`atlas://data/images/defaulttheme/vkeyboard__disabled_background`.
'''
key_background_color = ListProperty([1, 1, 1, 1])
'''Key background color, in the format (r, g, b, a). If a key background is
set, the color will be combined with the key background texture.
:attr:`key_background_color` is a :class:`~kivy.properties.ListProperty`
and defaults to [1, 1, 1, 1].
'''
key_background_normal = StringProperty(
'atlas://data/images/defaulttheme/vkeyboard_key_normal')
'''Filename of the key background image for use when no touches are active
on the widget.
:attr:`key_background_normal` a :class:`~kivy.properties.StringProperty`
and defaults to
:file:`atlas://data/images/defaulttheme/vkeyboard_key_normal`.
'''
key_disabled_background_normal = StringProperty(
'atlas://data/images/defaulttheme/vkeyboard_key_normal')
'''Filename of the key background image for use when no touches are active
on the widget and vkeyboard is disabled.
.. versionadded:: 1.8.0
:attr:`key_disabled_background_normal` a
:class:`~kivy.properties.StringProperty` and defaults to
:file:`atlas://data/images/defaulttheme/vkeyboard_disabled_key_normal`.
'''
key_background_down = StringProperty(
'atlas://data/images/defaulttheme/vkeyboard_key_down')
'''Filename of the key background image for use when a touch is active
on the widget.
:attr:`key_background_down` a :class:`~kivy.properties.StringProperty`
and defaults to
:file:`atlas://data/images/defaulttheme/vkeyboard_key_down`.
'''
background_border = ListProperty([16, 16, 16, 16])
'''Background image border. Used for controlling the
:attr:`~kivy.graphics.vertex_instructions.BorderImage.border` property of
the background.
:attr:`background_border` is a :class:`~kivy.properties.ListProperty` and
defaults to [16, 16, 16, 16]
'''
key_border = ListProperty([8, 8, 8, 8])
'''Key image border. Used for controlling the
:attr:`~kivy.graphics.vertex_instructions.BorderImage.border` property of
the key.
:attr:`key_border` is a :class:`~kivy.properties.ListProperty` and
defaults to [16, 16, 16, 16]
'''
# XXX internal variables
layout_mode = OptionProperty('normal',
options=('normal', 'shift', 'special'))
layout_geometry = DictProperty({})
have_capslock = BooleanProperty(False)
have_shift = BooleanProperty(False)
have_special = BooleanProperty(False)
active_keys = DictProperty({})
font_size = NumericProperty('20dp')
font_name = StringProperty('data/fonts/DejaVuSans.ttf')
repeat_touch = ObjectProperty(allownone=True)
__events__ = ('on_key_down', 'on_key_up', 'on_textinput')
def __init__(self, **kwargs):
# XXX move to style.kv
kwargs.setdefault('size_hint', (None, None))
kwargs.setdefault('scale_min', .4)
kwargs.setdefault('scale_max', 1.6)
kwargs.setdefault('size', (700, 200))
kwargs.setdefault('docked', False)
layout_mode = self._trigger_update_layout_mode = Clock.create_trigger(
self._update_layout_mode)
layouts = self._trigger_load_layouts = Clock.create_trigger(
self._load_layouts)
layout = self._trigger_load_layout = Clock.create_trigger(
self._load_layout)
fbind = self.fast_bind
fbind('docked', self.setup_mode)
fbind('have_shift', layout_mode)
fbind('have_capslock', layout_mode)
fbind('have_special', layout_mode)
fbind('layout_path', layouts)
fbind('layout', layout)
super(VKeyboard, self).__init__(**kwargs)
# load all the layouts found in the layout_path directory
self._load_layouts()
# ensure we have default layouts
available_layouts = self.available_layouts
if not available_layouts:
Logger.critical('VKeyboard: unable to load default layouts')
# load the default layout from configuration
if self.layout is None:
self.layout = Config.get('kivy', 'keyboard_layout')
else:
# ensure the current layout is found on the available layout
self._trigger_load_layout()
# update layout mode (shift or normal)
self._trigger_update_layout_mode()
# create a top layer to draw active keys on
with self.canvas:
self.background_key_layer = Canvas()
self.active_keys_layer = Canvas()
def on_disabled(self, intance, value):
self.refresh_keys()
def _update_layout_mode(self, *l):
# update mode according to capslock and shift key
mode = self.have_capslock != self.have_shift
mode = 'shift' if mode else 'normal'
if self.have_special:
mode = "special"
if mode != self.layout_mode:
self.layout_mode = mode
self.refresh(False)
def _load_layout(self, *largs):
# ensure new layouts are loaded first
if self._trigger_load_layouts.is_triggered:
self._load_layouts()
self._trigger_load_layouts.cancel()
value = self.layout
available_layouts = self.available_layouts
# it's a filename, try to load it directly
if self.layout[-5:] == '.json':
if value not in available_layouts:
fn = resource_find(self.layout)
self._load_layout_fn(fn, self.layout)
if not available_layouts:
return
if value not in available_layouts and value != 'qwerty':
Logger.error(
'Vkeyboard: <%s> keyboard layout mentioned in '
'conf file was not found, fallback on qwerty' %
value)
self.layout = 'qwerty'
self.refresh(True)
def _load_layouts(self, *largs):
# first load available layouts from json files
# XXX fix to be able to reload layout when path is changing
value = self.layout_path
for fn in listdir(value):
self._load_layout_fn(join(value, fn),
basename(splitext(fn)[0]))
def _load_layout_fn(self, fn, name):
available_layouts = self.available_layouts
if fn[-5:] != '.json':
return
with open(fn, 'r') as fd:
json_content = fd.read()
layout = loads(json_content)
available_layouts[name] = layout
def setup_mode(self, *largs):
'''Call this method when you want to readjust the keyboard according to
options: :attr:`docked` or not, with attached :attr:`target` or not:
* If :attr:`docked` is True, it will call :meth:`setup_mode_dock`
* If :attr:`docked` is False, it will call :meth:`setup_mode_free`
Feel free to overload these methods to create new
positioning behavior.
'''
if self.docked:
self.setup_mode_dock()
else:
self.setup_mode_free()
def setup_mode_dock(self, *largs):
'''Setup the keyboard in docked mode.
Dock mode will reset the rotation, disable translation, rotation and
scale. Scale and position will be automatically adjusted to attach the
keyboard to the bottom of the screen.
.. note::
Don't call this method directly, use :meth:`setup_mode` instead.
'''
self.do_translation = False
self.do_rotation = False
self.do_scale = False
self.rotation = 0
win = self.get_parent_window()
scale = win.width / float(self.width)
self.scale = scale
self.pos = 0, 0
win.bind(on_resize=self._update_dock_mode)
def _update_dock_mode(self, win, *largs):
scale = win.width / float(self.width)
self.scale = scale
self.pos = 0, 0
def setup_mode_free(self):
'''Setup the keyboard in free mode.
Free mode is designed to let the user control the position and
orientation of the keyboard. The only real usage is for a multiuser
environment, but you might found other ways to use it.
If a :attr:`target` is set, it will place the vkeyboard under the
target.
.. note::
Don't call this method directly, use :meth:`setup_mode` instead.
'''
self.do_translation = True
self.do_rotation = True
self.do_scale = True
target = self.target
if not target:
return
# NOTE all math will be done in window point of view
# determine rotation of the target
a = Vector(1, 0)
b = Vector(target.to_window(0, 0))
c = Vector(target.to_window(1, 0)) - b
self.rotation = -a.angle(c)
# determine the position of center/top of the keyboard
dpos = Vector(self.to_window(self.width / 2., self.height))
# determine the position of center/bottom of the target
cpos = Vector(target.to_window(target.center_x, target.y))
# the goal now is to map both point, calculate the diff between them
diff = dpos - cpos
# we still have an issue, self.pos represent the bounding box,
# not the 0,0 coordinate of the scatter. we need to apply also
# the diff between them (inside and outside coordinate matrix).
# It's hard to explain, but do a scheme on a paper, write all
# the vector i'm calculating, and you'll understand. :)
diff2 = Vector(self.x + self.width / 2., self.y + self.height) - \
Vector(self.to_parent(self.width / 2., self.height))
diff -= diff2
# now we have a good "diff", set it as a pos.
self.pos = -diff
def change_layout(self):
# XXX implement popup with all available layouts
pass
def refresh(self, force=False):
'''(internal) Recreate the entire widget and graphics according to the
selected layout.
'''
self.clear_widgets()
if force:
self.refresh_keys_hint()
self.refresh_keys()
self.refresh_active_keys_layer()
def refresh_active_keys_layer(self):
self.active_keys_layer.clear()
active_keys = self.active_keys
layout_geometry = self.layout_geometry
background = resource_find(self.key_background_down)
texture = Image(background, mipmap=True).texture
with self.active_keys_layer:
Color(1, 1, 1)
for line_nb, index in active_keys.values():
pos, size = layout_geometry['LINE_%d' % line_nb][index]
BorderImage(texture=texture, pos=pos, size=size,
border=self.key_border)
def refresh_keys_hint(self):
layout = self.available_layouts[self.layout]
layout_cols = layout['cols']
layout_rows = layout['rows']
layout_geometry = self.layout_geometry
mtop, mright, mbottom, mleft = self.margin_hint
# get relative EFFICIENT surface of the layout without external margins
el_hint = 1. - mleft - mright
eh_hint = 1. - mtop - mbottom
ex_hint = 0 + mleft
ey_hint = 0 + mbottom
# get relative unit surface
uw_hint = (1. / layout_cols) * el_hint
uh_hint = (1. / layout_rows) * eh_hint
layout_geometry['U_HINT'] = (uw_hint, uh_hint)
# calculate individual key RELATIVE surface and pos (without key
# margin)
current_y_hint = ey_hint + eh_hint
for line_nb in range(1, layout_rows + 1):
current_y_hint -= uh_hint
# get line_name
line_name = '%s_%d' % (self.layout_mode, line_nb)
line_hint = 'LINE_HINT_%d' % line_nb
layout_geometry[line_hint] = []
current_x_hint = ex_hint
# go through the list of keys (tuples of 4)
for key in layout[line_name]:
# calculate relative pos, size
layout_geometry[line_hint].append([
(current_x_hint, current_y_hint),
(key[3] * uw_hint, uh_hint)])
current_x_hint += key[3] * uw_hint
self.layout_geometry = layout_geometry
def refresh_keys(self):
layout = self.available_layouts[self.layout]
layout_rows = layout['rows']
layout_geometry = self.layout_geometry
w, h = self.size
kmtop, kmright, kmbottom, kmleft = self.key_margin
uw_hint, uh_hint = layout_geometry['U_HINT']
for line_nb in range(1, layout_rows + 1):
llg = layout_geometry['LINE_%d' % line_nb] = []
llg_append = llg.append
for key in layout_geometry['LINE_HINT_%d' % line_nb]:
x_hint, y_hint = key[0]
w_hint, h_hint = key[1]
kx = x_hint * w
ky = y_hint * h
kw = w_hint * w
kh = h_hint * h
# now adjust, considering the key margin
kx = int(kx + kmleft)
ky = int(ky + kmbottom)
kw = int(kw - kmleft - kmright)
kh = int(kh - kmbottom - kmtop)
pos = (kx, ky)
size = (kw, kh)
llg_append((pos, size))
self.layout_geometry = layout_geometry
self.draw_keys()
def draw_keys(self):
layout = self.available_layouts[self.layout]
layout_rows = layout['rows']
layout_geometry = self.layout_geometry
layout_mode = self.layout_mode
# draw background
w, h = self.size
background = resource_find(self.background_disabled
if self.disabled else
self.background)
texture = Image(background, mipmap=True).texture
self.background_key_layer.clear()
with self.background_key_layer:
Color(*self.background_color)
BorderImage(texture=texture, size=self.size,
border=self.background_border)
# XXX seperate drawing the keys and the fonts to avoid
# XXX reloading the texture each time
# first draw keys without the font
key_normal = resource_find(self.key_background_disabled_normal
if self.disabled else
self.key_background_normal)
texture = Image(key_normal, mipmap=True).texture
with self.background_key_layer:
for line_nb in range(1, layout_rows + 1):
for pos, size in layout_geometry['LINE_%d' % line_nb]:
BorderImage(texture=texture, pos=pos, size=size,
border=self.key_border)
# then draw the text
# calculate font_size
font_size = int(w) / 46
# draw
for line_nb in range(1, layout_rows + 1):
key_nb = 0
for pos, size in layout_geometry['LINE_%d' % line_nb]:
# retrieve the relative text
text = layout[layout_mode + '_' + str(line_nb)][key_nb][0]
l = Label(text=text, font_size=font_size, pos=pos, size=size,
font_name=self.font_name)
self.add_widget(l)
key_nb += 1
def on_key_down(self, *largs):
pass
def on_key_up(self, *largs):
pass
def on_textinput(self, *largs):
pass
def get_key_at_pos(self, x, y):
w, h = self.size
x_hint = x / w
# focus on the surface without margins
layout_geometry = self.layout_geometry
layout = self.available_layouts[self.layout]
layout_rows = layout['rows']
mtop, mright, mbottom, mleft = self.margin_hint
# get the line of the layout
e_height = h - (mbottom + mtop) * h # efficient height in pixels
line_height = e_height / layout_rows # line height in px
y = y - mbottom * h
line_nb = layout_rows - int(y / line_height)
if line_nb > layout_rows:
line_nb = layout_rows
if line_nb < 1:
line_nb = 1
# get the key within the line
key_index = ''
current_key_index = 0
for key in layout_geometry['LINE_HINT_%d' % line_nb]:
if x_hint >= key[0][0] and x_hint < key[0][0] + key[1][0]:
key_index = current_key_index
break
else:
current_key_index += 1
if key_index == '':
return None
# get the full character
key = layout['%s_%d' % (self.layout_mode, line_nb)][key_index]
return [key, (line_nb, key_index)]
def collide_margin(self, x, y):
'''Do a collision test, and return True if the (x, y) is inside the
vkeyboard margin.
'''
mtop, mright, mbottom, mleft = self.margin_hint
x_hint = x / self.width
y_hint = y / self.height
if x_hint > mleft and x_hint < 1. - mright \
and y_hint > mbottom and y_hint < 1. - mtop:
return False
return True
def process_key_on(self, touch):
x, y = self.to_local(*touch.pos)
key = self.get_key_at_pos(x, y)
if not key:
return
key_data = key[0]
displayed_char, internal, special_char, size = key_data
line_nb, key_index = key[1]
# save pressed key on the touch
ud = touch.ud[self.uid] = {}
ud['key'] = key
# for caps lock or shift only:
uid = touch.uid
if special_char is not None:
# Do not repeat special keys
if special_char in ('capslock', 'shift', 'layout', 'special'):
Clock.unschedule(self._start_repeat_key)
self.repeat_touch = None
if special_char == 'capslock':
self.have_capslock = not self.have_capslock
uid = -1
elif special_char == 'shift':
self.have_shift = True
elif special_char == 'special':
self.have_special = True
elif special_char == 'layout':
self.change_layout()
# send info to the bus
b_keycode = special_char
b_modifiers = self._get_modifiers()
if self.get_parent_window().__class__.__module__ == \
'kivy.core.window.window_sdl2' and internal:
self.dispatch('on_textinput', internal)
else:
self.dispatch('on_key_down', b_keycode, internal, b_modifiers)
# save key as an active key for drawing
self.active_keys[uid] = key[1]
self.refresh_active_keys_layer()
def process_key_up(self, touch):
uid = touch.uid
if self.uid not in touch.ud:
return
# save pressed key on the touch
key_data, key = touch.ud[self.uid]['key']
displayed_char, internal, special_char, size = key_data
# send info to the bus
b_keycode = special_char
b_modifiers = self._get_modifiers()
self.dispatch('on_key_up', b_keycode, internal, b_modifiers)
if special_char == 'capslock':
uid = -1
if uid in self.active_keys:
self.active_keys.pop(uid, None)
if special_char == 'shift':
self.have_shift = False
elif special_char == 'special':
self.have_special = False
if special_char == 'capslock' and self.have_capslock:
self.active_keys[-1] = key
self.refresh_active_keys_layer()
def _get_modifiers(self):
ret = []
if self.have_shift:
ret.append('shift')
if self.have_capslock:
ret.append('capslock')
return ret
def _start_repeat_key(self, *kwargs):
Clock.schedule_interval(self._repeat_key, 0.05)
def _repeat_key(self, *kwargs):
self.process_key_on(self.repeat_touch)
def on_touch_down(self, touch):
x, y = touch.pos
if not self.collide_point(x, y):
return
if self.disabled:
return True
x, y = self.to_local(x, y)
if not self.collide_margin(x, y):
if self.repeat_touch is None:
Clock.schedule_once(self._start_repeat_key, 0.5)
self.repeat_touch = touch
self.process_key_on(touch)
touch.grab(self, exclusive=True)
else:
super(VKeyboard, self).on_touch_down(touch)
return True
def on_touch_up(self, touch):
if touch.grab_current is self:
self.process_key_up(touch)
Clock.unschedule(self._start_repeat_key)
if touch == self.repeat_touch:
Clock.unschedule(self._repeat_key)
self.repeat_touch = None
return super(VKeyboard, self).on_touch_up(touch)
if __name__ == '__main__':
from kivy.base import runTouchApp
vk = VKeyboard(layout='azerty')
runTouchApp(vk)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration tests for TensorBoard.
These tests start up a full-fledged TensorBoard server.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import gzip
import json
import numbers
import os
import shutil
import tempfile
import threading
import zlib
from six import BytesIO
from six.moves import http_client
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.contrib.tensorboard.plugins.projector.projector_config_pb2 import ProjectorConfig
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.platform import resource_loader
from tensorflow.python.summary import event_multiplexer
from tensorflow.tensorboard.backend import server
from tensorflow.tensorboard.plugins import REGISTERED_PLUGINS
class TensorboardServerTest(tf.test.TestCase):
_only_use_meta_graph = False # Server data contains only a GraphDef
# Number of scalar-containing events to make.
_SCALAR_COUNT = 99
def setUp(self):
temp_dir = self._GenerateTestData()
self._multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=server.TENSORBOARD_SIZE_GUIDANCE)
server.ReloadMultiplexer(self._multiplexer, {temp_dir: None})
# 0 to pick an unused port.
self._server = server.BuildServer(
self._multiplexer, 'localhost', 0, '/foo/logdir/argument')
self._server_thread = threading.Thread(target=self._server.serve_forever)
self._server_thread.daemon = True
self._server_thread.start()
self._connection = http_client.HTTPConnection(
'localhost', self._server.server_address[1])
def tearDown(self):
self._connection.close()
self._server.shutdown()
self._server.server_close()
def _get(self, path, headers={}):
"""Perform a GET request for the given path."""
self._connection.request('GET', path, None, headers)
return self._connection.getresponse()
def _getJson(self, path):
"""Perform a GET request and decode the result as JSON."""
self._connection.request('GET', path)
response = self._connection.getresponse()
self.assertEqual(response.status, 200)
data = response.read()
if response.getheader('Content-Encoding') == 'gzip':
data = gzip.GzipFile('', 'rb', 9, BytesIO(data)).read()
return json.loads(data.decode('utf-8'))
def testBasicStartup(self):
"""Start the server up and then shut it down immediately."""
pass
def testRequestMainPage(self):
"""Navigate to the main page and verify that it returns a 200."""
response = self._get('/')
self.assertEqual(response.status, 200)
def testRequestNonexistentPage(self):
"""Request a page that doesn't exist; it should 404."""
response = self._get('/asdf')
self.assertEqual(response.status, 404)
def testDirectoryTraversal(self):
"""Attempt a directory traversal attack."""
response = self._get('/..' * 30 + '/etc/passwd')
self.assertEqual(response.status, 400)
def testLogdir(self):
"""Test the format of the data/logdir endpoint."""
parsed_object = self._getJson('/data/logdir')
self.assertEqual(parsed_object, {'logdir': '/foo/logdir/argument'})
def testRuns(self):
"""Test the format of the /data/runs endpoint."""
run_json = self._getJson('/data/runs')
# Don't check the actual timestamp since it's time-dependent.
self.assertTrue(isinstance(run_json['run1']['firstEventTimestamp'],
numbers.Number))
del run_json['run1']['firstEventTimestamp']
self.assertEqual(run_json, {'run1': {
'compressedHistograms': ['histogram'],
'scalars': ['simple_values'],
'histograms': ['histogram'],
'images': ['image'],
'audio': ['audio'],
# if only_use_meta_graph, the graph is extracted from the metagraph
'graph': True,
'meta_graph': self._only_use_meta_graph,
'run_metadata': ['test run']}})
def testApplicationPaths_getCached(self):
"""Test the format of the /data/runs endpoint."""
for path in ('/',): # TODO(jart): '/app.js' in open source
connection = http_client.HTTPConnection(
'localhost', self._server.server_address[1])
connection.request('GET', path)
response = connection.getresponse()
self.assertEqual(response.status, 200, msg=path)
self.assertEqual(response.getheader('Cache-Control'),
'private, max-age=3600', msg=path)
connection.close()
def testDataPaths_disableAllCaching(self):
"""Test the format of the /data/runs endpoint."""
for path in ('/data/runs',
'/data/logdir',
'/data/scalars?run=run1&tag=simple_values',
'/data/scalars?run=run1&tag=simple_values&format=csv',
'/data/images?run=run1&tag=image',
'/data/individualImage?run=run1&tag=image&index=0',
'/data/audio?run=run1&tag=audio',
'/data/run_metadata?run=run1&tag=test%20run'):
connection = http_client.HTTPConnection(
'localhost', self._server.server_address[1])
connection.request('GET', path)
response = connection.getresponse()
self.assertEqual(response.status, 200, msg=path)
self.assertEqual(response.getheader('Expires'), '0', msg=path)
response.read()
connection.close()
def testHistograms(self):
"""Test the format of /data/histograms."""
self.assertEqual(
self._getJson('/data/histograms?tag=histogram&run=run1'),
[[0, 0, [0, 2.0, 3.0, 6.0, 5.0, [0.0, 1.0, 2.0], [1.0, 1.0, 1.0]]]])
def testSampleScalars(self):
"""Test the sample_count parameter of /data/scalars."""
for i in xrange(10, self._SCALAR_COUNT, 10):
samples = self._getJson('/data/scalars?sample_count=%d' % i)
values = samples['run1']['simple_values']
# Verify that we got the right amount of values and that we got the
# endpoints.
self.assertEqual(len(values), i)
self.assertEqual(values[0], [100, 10, 1])
self.assertEqual(values[-1], [9900, 990, 99])
def testSampleScalarsWithLargeSampleCount(self):
"""Test using a large sample_count."""
samples = self._getJson('/data/scalars?sample_count=999999')
values = samples['run1']['simple_values']
self.assertEqual(len(values), self._SCALAR_COUNT)
def testImages(self):
"""Test listing images and retrieving an individual image."""
image_json = self._getJson('/data/images?tag=image&run=run1')
image_query = image_json[0]['query']
# We don't care about the format of the image query.
del image_json[0]['query']
self.assertEqual(image_json, [{
'wall_time': 0,
'step': 0,
'height': 1,
'width': 1
}])
response = self._get('/data/individualImage?%s' % image_query)
self.assertEqual(response.status, 200)
def testAudio(self):
"""Test listing audio and retrieving an individual audio clip."""
audio_json = self._getJson('/data/audio?tag=audio&run=run1')
audio_query = audio_json[0]['query']
# We don't care about the format of the audio query.
del audio_json[0]['query']
self.assertEqual(audio_json, [{
'wall_time': 0,
'step': 0,
'content_type': 'audio/wav'
}])
response = self._get('/data/individualAudio?%s' % audio_query)
self.assertEqual(response.status, 200)
def testGraph(self):
"""Test retrieving the graph definition."""
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs')
self.assertEqual(response.status, 200)
graph_pbtxt = response.read()
# Parse the graph from pbtxt into a graph message.
graph = tf.GraphDef()
graph = text_format.Parse(graph_pbtxt, graph)
self.assertEqual(len(graph.node), 2)
self.assertEqual(graph.node[0].name, 'a')
self.assertEqual(graph.node[1].name, 'b')
# Make sure the second node has an attribute that was filtered out because
# it was too large and was added to the "too large" attributes list.
self.assertEqual(list(graph.node[1].attr.keys()), ['_very_large_attrs'])
self.assertEqual(graph.node[1].attr['_very_large_attrs'].list.s,
[b'very_large_attr'])
def testProjectorRunsWithEmbeddings(self):
"""Test the format of /runs endpoint in projector."""
if 'projector' not in REGISTERED_PLUGINS:
return
run_json = self._getJson('/data/plugin/projector/runs')
self.assertEqual(run_json, ['run1'])
def testProjectorInfo(self):
"""Test the format of /info endpoint in projector."""
if 'projector' not in REGISTERED_PLUGINS:
return
info_json = self._getJson('/data/plugin/projector/info?run=run1')
self.assertItemsEqual(info_json['embeddings'], [
{
'tensorShape': [1, 2],
'tensorName': 'var1'
},
{
'tensorShape': [10, 10],
'tensorName': 'var2'
},
{
'tensorShape': [100, 100],
'tensorName': 'var3'
}
])
def testProjectorTensor(self):
"""Test the format of /tensor endpoint in projector."""
if 'projector' not in REGISTERED_PLUGINS:
return
tensor_tsv = (self._get('/data/plugin/projector/tensor?run=run1&name=var1')
.read())
self.assertEqual(tensor_tsv, b'6.0\t6.0')
def testAcceptGzip_compressesResponse(self):
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs',
{'Accept-Encoding': 'gzip'})
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader('Content-Encoding'), 'gzip')
pbtxt = gzip.GzipFile('', 'rb', 9, BytesIO(response.read())).read()
graph = text_format.Parse(pbtxt, tf.GraphDef())
self.assertEqual(len(graph.node), 2)
def testAcceptAnyEncoding_compressesResponse(self):
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs',
{'Accept-Encoding': '*'})
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader('Content-Encoding'), 'gzip')
pbtxt = gzip.GzipFile('', 'rb', 9, BytesIO(response.read())).read()
graph = text_format.Parse(pbtxt, tf.GraphDef())
self.assertEqual(len(graph.node), 2)
def testAcceptDoodleEncoding_doesNotCompressResponse(self):
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs',
{'Accept-Encoding': 'doodle'})
self.assertEqual(response.status, 200)
self.assertIsNone(response.getheader('Content-Encoding'))
graph = text_format.Parse(response.read(), tf.GraphDef())
self.assertEqual(len(graph.node), 2)
def testAcceptGzip_doesNotCompressImage(self):
response = self._get('/data/individualImage?run=run1&tag=image&index=0',
{'Accept-Encoding': 'gzip'})
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader('Content-Encoding'), None)
def testRunMetadata(self):
"""Test retrieving the run metadata information."""
response = self._get('/data/run_metadata?run=run1&tag=test%20run')
self.assertEqual(response.status, 200)
run_metadata_pbtxt = response.read()
# Parse from pbtxt into a message.
run_metadata = tf.RunMetadata()
text_format.Parse(run_metadata_pbtxt, run_metadata)
self.assertEqual(len(run_metadata.step_stats.dev_stats), 1)
self.assertEqual(run_metadata.step_stats.dev_stats[0].device, 'test device')
def _GenerateTestData(self):
"""Generates the test data directory.
The test data has a single run named run1 which contains:
- a histogram
- an image at timestamp and step 0
- scalar events containing the value i at step 10 * i and wall time
100 * i, for i in [1, _SCALAR_COUNT).
- a graph definition
Returns:
temp_dir: The directory the test data is generated under.
"""
temp_dir = tempfile.mkdtemp(prefix=self.get_temp_dir())
self.addCleanup(shutil.rmtree, temp_dir)
run1_path = os.path.join(temp_dir, 'run1')
os.makedirs(run1_path)
writer = tf.train.SummaryWriter(run1_path)
histogram_value = tf.HistogramProto(min=0,
max=2,
num=3,
sum=6,
sum_squares=5,
bucket_limit=[0, 1, 2],
bucket=[1, 1, 1])
# Add a simple graph event.
graph_def = tf.GraphDef()
node1 = graph_def.node.add()
node1.name = 'a'
node2 = graph_def.node.add()
node2.name = 'b'
node2.attr['very_large_attr'].s = b'a' * 2048 # 2 KB attribute
meta_graph_def = meta_graph_pb2.MetaGraphDef(graph_def=graph_def)
if self._only_use_meta_graph:
writer.add_meta_graph(meta_graph_def)
else:
writer.add_graph(graph_def)
# Add a simple run metadata event.
run_metadata = tf.RunMetadata()
device_stats = run_metadata.step_stats.dev_stats.add()
device_stats.device = 'test device'
writer.add_run_metadata(run_metadata, 'test run')
# 1x1 transparent GIF.
encoded_image = base64.b64decode(
'R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7')
image_value = tf.Summary.Image(height=1,
width=1,
colorspace=1,
encoded_image_string=encoded_image)
audio_value = tf.Summary.Audio(sample_rate=44100,
length_frames=22050,
num_channels=2,
encoded_audio_string=b'',
content_type='audio/wav')
writer.add_event(tf.Event(wall_time=0,
step=0,
summary=tf.Summary(value=[
tf.Summary.Value(tag='histogram',
histo=histogram_value),
tf.Summary.Value(tag='image',
image=image_value),
tf.Summary.Value(tag='audio',
audio=audio_value)
])))
# Write 100 simple values.
for i in xrange(1, self._SCALAR_COUNT + 1):
writer.add_event(tf.Event(
# We use different values for wall time, step, and the value so we can
# tell them apart.
wall_time=100 * i,
step=10 * i,
summary=tf.Summary(value=[tf.Summary.Value(tag='simple_values',
simple_value=i)])))
writer.flush()
writer.close()
if 'projector' in REGISTERED_PLUGINS:
self._GenerateProjectorTestData(run1_path)
return temp_dir
def _GenerateProjectorTestData(self, run_path):
# Write a projector config file in run1.
config_path = os.path.join(run_path, 'projector_config.pbtxt')
config = ProjectorConfig()
config_pbtxt = text_format.MessageToString(config)
with tf.gfile.GFile(config_path, 'w') as f:
f.write(config_pbtxt)
# Write a checkpoint with some dummy variables.
with tf.Graph().as_default():
sess = tf.Session()
checkpoint_path = os.path.join(run_path, 'model')
tf.get_variable(
'var1', [1, 2], initializer=tf.constant_initializer(6.0))
tf.get_variable('var2', [10, 10])
tf.get_variable('var3', [100, 100])
sess.run(tf.initialize_all_variables())
saver = tf.train.Saver(write_version=tf.train.SaverDef.V1)
saver.save(sess, checkpoint_path)
class TensorboardServerUsingMetagraphOnlyTest(TensorboardServerTest):
# Tests new ability to use only the MetaGraphDef
_only_use_meta_graph = True # Server data contains only a MetaGraphDef
class ParseEventFilesSpecTest(tf.test.TestCase):
def testRunName(self):
logdir_string = 'lol:/cat'
expected = {'/cat': 'lol'}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testPathWithColonThatComesAfterASlash_isNotConsideredARunName(self):
logdir_string = '/lol:/cat'
expected = {'/lol:/cat': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testMultipleDirectories(self):
logdir_string = '/a,/b'
expected = {'/a': None, '/b': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testNormalizesPaths(self):
logdir_string = '/lol/.//cat/../cat'
expected = {'/lol/cat': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testAbsolutifies(self):
logdir_string = 'lol/cat'
expected = {os.path.realpath('lol/cat'): None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testRespectsGCSPath(self):
logdir_string = 'gs://foo/path'
expected = {'gs://foo/path': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testDoesNotExpandUserInGCSPath(self):
logdir_string = 'gs://~/foo/path'
expected = {'gs://~/foo/path': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
def testDoesNotNormalizeGCSPath(self):
logdir_string = 'gs://foo/./path//..'
expected = {'gs://foo/./path//..': None}
self.assertEqual(server.ParseEventFilesSpec(logdir_string), expected)
class TensorBoardAssetsTest(tf.test.TestCase):
def testTagFound(self):
tag = resource_loader.load_resource('tensorboard/TAG')
self.assertTrue(tag)
if __name__ == '__main__':
tf.test.main()
| |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Mitigacion.forma_organizada'
db.alter_column(u'vulnerabilidades_finca_mitigacion', 'forma_organizada', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'Mitigacion.almacenamiento'
db.alter_column(u'vulnerabilidades_finca_mitigacion', 'almacenamiento', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'Mitigacion.registro_monitoreo'
db.alter_column(u'vulnerabilidades_finca_mitigacion', 'registro_monitoreo', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'Mitigacion.elaborar'
db.alter_column(u'vulnerabilidades_finca_mitigacion', 'elaborar', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'Mitigacion.plan_manejo'
db.alter_column(u'vulnerabilidades_finca_mitigacion', 'plan_manejo', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'Mitigacion.plan_negocio'
db.alter_column(u'vulnerabilidades_finca_mitigacion', 'plan_negocio', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'Mitigacion.infraestructura'
db.alter_column(u'vulnerabilidades_finca_mitigacion', 'infraestructura', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'Mitigacion.como_realiza'
db.alter_column(u'vulnerabilidades_finca_mitigacion', 'como_realiza', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'Mitigacion.puntaje'
db.alter_column(u'vulnerabilidades_finca_mitigacion', 'puntaje', self.gf('django.db.models.fields.FloatField')(null=True))
# Changing field 'Mitigacion.cada_cuanto'
db.alter_column(u'vulnerabilidades_finca_mitigacion', 'cada_cuanto', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'Mitigacion.recursos'
db.alter_column(u'vulnerabilidades_finca_mitigacion', 'recursos', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'Mitigacion.contrato'
db.alter_column(u'vulnerabilidades_finca_mitigacion', 'contrato', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'Mitigacion.plan_inversion'
db.alter_column(u'vulnerabilidades_finca_mitigacion', 'plan_inversion', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'Mitigacion.monitoreo_plagas'
db.alter_column(u'vulnerabilidades_finca_mitigacion', 'monitoreo_plagas', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'Mitigacion.tipo_certificado'
db.alter_column(u'vulnerabilidades_finca_mitigacion', 'tipo_certificado', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'Mitigacion.reconocida'
db.alter_column(u'vulnerabilidades_finca_mitigacion', 'reconocida', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'Mitigacion.certificado'
db.alter_column(u'vulnerabilidades_finca_mitigacion', 'certificado', self.gf('django.db.models.fields.IntegerField')(null=True))
def backwards(self, orm):
# Changing field 'Mitigacion.forma_organizada'
db.alter_column(u'vulnerabilidades_finca_mitigacion', 'forma_organizada', self.gf('django.db.models.fields.IntegerField')(default=None))
# Changing field 'Mitigacion.almacenamiento'
db.alter_column(u'vulnerabilidades_finca_mitigacion', 'almacenamiento', self.gf('django.db.models.fields.IntegerField')(default=None))
# Changing field 'Mitigacion.registro_monitoreo'
db.alter_column(u'vulnerabilidades_finca_mitigacion', 'registro_monitoreo', self.gf('django.db.models.fields.IntegerField')(default=None))
# Changing field 'Mitigacion.elaborar'
db.alter_column(u'vulnerabilidades_finca_mitigacion', 'elaborar', self.gf('django.db.models.fields.IntegerField')(default=None))
# Changing field 'Mitigacion.plan_manejo'
db.alter_column(u'vulnerabilidades_finca_mitigacion', 'plan_manejo', self.gf('django.db.models.fields.IntegerField')(default=None))
# Changing field 'Mitigacion.plan_negocio'
db.alter_column(u'vulnerabilidades_finca_mitigacion', 'plan_negocio', self.gf('django.db.models.fields.IntegerField')(default=None))
# Changing field 'Mitigacion.infraestructura'
db.alter_column(u'vulnerabilidades_finca_mitigacion', 'infraestructura', self.gf('django.db.models.fields.IntegerField')(default=None))
# Changing field 'Mitigacion.como_realiza'
db.alter_column(u'vulnerabilidades_finca_mitigacion', 'como_realiza', self.gf('django.db.models.fields.IntegerField')(default=None))
# Changing field 'Mitigacion.puntaje'
db.alter_column(u'vulnerabilidades_finca_mitigacion', 'puntaje', self.gf('django.db.models.fields.FloatField')(default=None))
# Changing field 'Mitigacion.cada_cuanto'
db.alter_column(u'vulnerabilidades_finca_mitigacion', 'cada_cuanto', self.gf('django.db.models.fields.IntegerField')(default=None))
# Changing field 'Mitigacion.recursos'
db.alter_column(u'vulnerabilidades_finca_mitigacion', 'recursos', self.gf('django.db.models.fields.IntegerField')(default=None))
# Changing field 'Mitigacion.contrato'
db.alter_column(u'vulnerabilidades_finca_mitigacion', 'contrato', self.gf('django.db.models.fields.IntegerField')(default=None))
# Changing field 'Mitigacion.plan_inversion'
db.alter_column(u'vulnerabilidades_finca_mitigacion', 'plan_inversion', self.gf('django.db.models.fields.IntegerField')(default=None))
# Changing field 'Mitigacion.monitoreo_plagas'
db.alter_column(u'vulnerabilidades_finca_mitigacion', 'monitoreo_plagas', self.gf('django.db.models.fields.IntegerField')(default=None))
# Changing field 'Mitigacion.tipo_certificado'
db.alter_column(u'vulnerabilidades_finca_mitigacion', 'tipo_certificado', self.gf('django.db.models.fields.IntegerField')(default=None))
# Changing field 'Mitigacion.reconocida'
db.alter_column(u'vulnerabilidades_finca_mitigacion', 'reconocida', self.gf('django.db.models.fields.IntegerField')(default=None))
# Changing field 'Mitigacion.certificado'
db.alter_column(u'vulnerabilidades_finca_mitigacion', 'certificado', self.gf('django.db.models.fields.IntegerField')(default=None))
models = {
u'encuesta.duenofinca': {
'Meta': {'object_name': 'DuenoFinca'},
'fecha_nacimiento': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'encuesta.encuesta': {
'Meta': {'object_name': 'Encuesta'},
'altitud': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'beneficiarios': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['encuesta.Organizacion']", 'null': 'True', 'blank': 'True'}),
'cedula': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'comunidad': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Comunidad']"}),
'departamento': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Departamento']"}),
'dueno': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.DuenoFinca']"}),
'fecha': ('django.db.models.fields.DateField', [], {}),
'finca': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'municipio': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Municipio']"}),
'nombre': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Entrevistado']"}),
'pais': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Pais']"}),
'position': ('geoposition.fields.GeopositionField', [], {'max_length': '42', 'null': 'True', 'blank': 'True'}),
'recolector': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Recolector']"}),
'sexo': ('django.db.models.fields.IntegerField', [], {})
},
u'encuesta.entrevistado': {
'Meta': {'object_name': 'Entrevistado'},
'fecha_nacimiento': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'encuesta.organizacion': {
'Meta': {'object_name': 'Organizacion'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'encuesta.recolector': {
'Meta': {'object_name': 'Recolector'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'lugar.comunidad': {
'Meta': {'object_name': 'Comunidad'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'municipio': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Municipio']"}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'lugar.departamento': {
'Meta': {'object_name': 'Departamento'},
'extension': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2'}),
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'pais': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Pais']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'})
},
u'lugar.municipio': {
'Meta': {'ordering': "['departamento__nombre']", 'object_name': 'Municipio'},
'departamento': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Departamento']"}),
'extension': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'latitud': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'longitud': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'})
},
u'lugar.pais': {
'Meta': {'object_name': 'Pais'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'vulnerabilidades_finca.causa': {
'Meta': {'object_name': 'Causa'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'vulnerabilidades_finca.elclima': {
'Meta': {'object_name': 'ElClima'},
'clima': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['vulnerabilidades_finca.TipoClima']"}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
'fecha': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'fecha'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['vulnerabilidades_finca.TipoYear']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'vulnerabilidades_finca.faltarecurso': {
'Meta': {'object_name': 'FaltaRecurso'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'vulnerabilidades_finca.lasplagas': {
'Meta': {'object_name': 'LasPlagas'},
'antracnosis': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'antracnosis'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['vulnerabilidades_finca.Plagas']"}),
'broca': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'broca'", 'symmetrical': 'False', 'to': u"orm['vulnerabilidades_finca.Plagas']"}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
'fecha': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'gallo': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'gallo'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['vulnerabilidades_finca.Plagas']"}),
'hierro': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'hierro'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['vulnerabilidades_finca.Plagas']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nematodos': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'nematodos'", 'symmetrical': 'False', 'to': u"orm['vulnerabilidades_finca.Plagas']"}),
'roya': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'roya'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['vulnerabilidades_finca.Plagas']"})
},
u'vulnerabilidades_finca.mitigacion': {
'Meta': {'object_name': 'Mitigacion'},
'almacenamiento': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'cada_cuanto': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'certificado': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'como_realiza': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'contrato': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'elaborar': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
'falta_recurso': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['vulnerabilidades_finca.FaltaRecurso']", 'symmetrical': 'False'}),
'forma_organizada': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'infraestructura': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'monitoreo_plagas': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'plan_inversion': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'plan_manejo': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'plan_negocio': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'puntaje': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'reconocida': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'recursos': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'registro_monitoreo': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'tipo_certificado': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'vulnerabilidades_finca.opciones': {
'Meta': {'object_name': 'Opciones'},
'causa': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['vulnerabilidades_finca.Causa']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'vulnerabilidades_finca.otroriesgos': {
'Meta': {'object_name': 'OtroRiesgos'},
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'motivo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['vulnerabilidades_finca.Opciones']"}),
'respuesta': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['vulnerabilidades_finca.Respuestas']", 'null': 'True', 'blank': 'True'})
},
u'vulnerabilidades_finca.plagas': {
'Meta': {'object_name': 'Plagas'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
u'vulnerabilidades_finca.respuestas': {
'Meta': {'object_name': 'Respuestas'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'vulnerabilidades_finca.suelofertilidad': {
'Meta': {'object_name': 'SueloFertilidad'},
'abundancia': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'conservacion': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'degrados': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'drenaje': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
'fertil': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'fertilidad': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'fertilizacion': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'foliar': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'materia_organica': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'obra_conservacion': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'pendiente': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'preparan': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'presencia': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'profundidad': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'textura': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'vulnerabilidades_finca.tipoclima': {
'Meta': {'object_name': 'TipoClima'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
u'vulnerabilidades_finca.tipoyear': {
'Meta': {'object_name': 'TipoYear'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '150'})
}
}
complete_apps = ['vulnerabilidades_finca']
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2022-01-02-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2022-01-02-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
"agentPoolName": _SERIALIZER.url("agent_pool_name", agent_pool_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request_initial(
subscription_id: str,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2022-01-02-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
"agentPoolName": _SERIALIZER.url("agent_pool_name", agent_pool_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2022-01-02-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
"agentPoolName": _SERIALIZER.url("agent_pool_name", agent_pool_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_upgrade_profile_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2022-01-02-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeProfiles/default')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
"agentPoolName": _SERIALIZER.url("agent_pool_name", agent_pool_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_available_agent_pool_versions_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2022-01-02-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/availableAgentPoolVersions')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_upgrade_node_image_version_request_initial(
subscription_id: str,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2022-01-02-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeNodeImageVersion')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
"agentPoolName": _SERIALIZER.url("agent_pool_name", agent_pool_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class AgentPoolsOperations(object):
"""AgentPoolsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2022_01_02_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> Iterable["_models.AgentPoolListResult"]:
"""Gets a list of agent pools in the specified managed cluster.
Gets a list of agent pools in the specified managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AgentPoolListResult or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2022_01_02_preview.models.AgentPoolListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPoolListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("AgentPoolListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> "_models.AgentPool":
"""Gets the specified managed cluster agent pool.
Gets the specified managed cluster agent pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AgentPool, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_01_02_preview.models.AgentPool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
parameters: "_models.AgentPool",
**kwargs: Any
) -> "_models.AgentPool":
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'AgentPool')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AgentPool', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
parameters: "_models.AgentPool",
**kwargs: Any
) -> LROPoller["_models.AgentPool"]:
"""Creates or updates an agent pool in the specified managed cluster.
Creates or updates an agent pool in the specified managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:param parameters: The agent pool to create or update.
:type parameters: ~azure.mgmt.containerservice.v2022_01_02_preview.models.AgentPool
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either AgentPool or the result of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2022_01_02_preview.models.AgentPool]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Deletes an agent pool in the specified managed cluster.
Deletes an agent pool in the specified managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore
@distributed_trace
def get_upgrade_profile(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> "_models.AgentPoolUpgradeProfile":
"""Gets the upgrade profile for an agent pool.
Gets the upgrade profile for an agent pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AgentPoolUpgradeProfile, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_01_02_preview.models.AgentPoolUpgradeProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPoolUpgradeProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_upgrade_profile_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
template_url=self.get_upgrade_profile.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AgentPoolUpgradeProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_upgrade_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeProfiles/default'} # type: ignore
@distributed_trace
def get_available_agent_pool_versions(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.AgentPoolAvailableVersions":
"""Gets a list of supported Kubernetes versions for the specified agent pool.
See `supported Kubernetes versions
<https://docs.microsoft.com/azure/aks/supported-kubernetes-versions>`_ for more details about
the version lifecycle.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AgentPoolAvailableVersions, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_01_02_preview.models.AgentPoolAvailableVersions
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPoolAvailableVersions"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_available_agent_pool_versions_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.get_available_agent_pool_versions.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AgentPoolAvailableVersions', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_available_agent_pool_versions.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/availableAgentPoolVersions'} # type: ignore
def _upgrade_node_image_version_initial(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> Optional["_models.AgentPool"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.AgentPool"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_upgrade_node_image_version_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
template_url=self._upgrade_node_image_version_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
response_headers = {}
if response.status_code == 202:
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_upgrade_node_image_version_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeNodeImageVersion'} # type: ignore
@distributed_trace
def begin_upgrade_node_image_version(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> LROPoller["_models.AgentPool"]:
"""Upgrades the node image version of an agent pool to the latest.
Upgrading the node image version of an agent pool applies the newest OS and runtime updates to
the nodes. AKS provides one new image per week with the latest updates. For more details on
node image versions, see: https://docs.microsoft.com/azure/aks/node-image-upgrade.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either AgentPool or the result of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2022_01_02_preview.models.AgentPool]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._upgrade_node_image_version_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response_headers = {}
response = pipeline_response.http_response
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_upgrade_node_image_version.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeNodeImageVersion'} # type: ignore
| |
# pylint: disable=too-many-lines
"""
Component to interface with cameras.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/camera/
"""
import asyncio
import collections
from contextlib import suppress
from datetime import timedelta
import logging
import hashlib
from random import SystemRandom
import aiohttp
from aiohttp import web
import async_timeout
from homeassistant.core import callback
from homeassistant.const import ATTR_ENTITY_PICTURE
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
from homeassistant.components.http import HomeAssistantView, KEY_AUTHENTICATED
from homeassistant.helpers.event import async_track_time_interval
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'camera'
DEPENDENCIES = ['http']
SCAN_INTERVAL = timedelta(seconds=30)
ENTITY_ID_FORMAT = DOMAIN + '.{}'
STATE_RECORDING = 'recording'
STATE_STREAMING = 'streaming'
STATE_IDLE = 'idle'
ENTITY_IMAGE_URL = '/api/camera_proxy/{0}?token={1}'
TOKEN_CHANGE_INTERVAL = timedelta(minutes=5)
_RND = SystemRandom()
@asyncio.coroutine
def async_get_image(hass, entity_id, timeout=10):
"""Fetch a image from a camera entity."""
websession = async_get_clientsession(hass)
state = hass.states.get(entity_id)
if state is None:
raise HomeAssistantError(
"No entity '{0}' for grab a image".format(entity_id))
url = "{0}{1}".format(
hass.config.api.base_url,
state.attributes.get(ATTR_ENTITY_PICTURE)
)
try:
with async_timeout.timeout(timeout, loop=hass.loop):
response = yield from websession.get(url)
if response.status != 200:
raise HomeAssistantError("Error {0} on {1}".format(
response.status, url))
image = yield from response.read()
return image
except (asyncio.TimeoutError, aiohttp.ClientError):
raise HomeAssistantError("Can't connect to {0}".format(url))
@asyncio.coroutine
def async_setup(hass, config):
"""Set up the camera component."""
component = EntityComponent(_LOGGER, DOMAIN, hass, SCAN_INTERVAL)
hass.http.register_view(CameraImageView(component.entities))
hass.http.register_view(CameraMjpegStream(component.entities))
yield from component.async_setup(config)
@callback
def update_tokens(time):
"""Update tokens of the entities."""
for entity in component.entities.values():
entity.async_update_token()
hass.async_add_job(entity.async_update_ha_state())
async_track_time_interval(hass, update_tokens, TOKEN_CHANGE_INTERVAL)
return True
class Camera(Entity):
"""The base class for camera entities."""
def __init__(self):
"""Initialize a camera."""
self.is_streaming = False
self.access_tokens = collections.deque([], 2)
self.async_update_token()
@property
def should_poll(self):
"""No need to poll cameras."""
return False
@property
def entity_picture(self):
"""Return a link to the camera feed as entity picture."""
return ENTITY_IMAGE_URL.format(self.entity_id, self.access_tokens[-1])
@property
def is_recording(self):
"""Return true if the device is recording."""
return False
@property
def brand(self):
"""Return the camera brand."""
return None
@property
def model(self):
"""Return the camera model."""
return None
def camera_image(self):
"""Return bytes of camera image."""
raise NotImplementedError()
def async_camera_image(self):
"""Return bytes of camera image.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.loop.run_in_executor(None, self.camera_image)
@asyncio.coroutine
def handle_async_mjpeg_stream(self, request):
"""Generate an HTTP MJPEG stream from camera images.
This method must be run in the event loop.
"""
response = web.StreamResponse()
response.content_type = ('multipart/x-mixed-replace; '
'boundary=--jpegboundary')
yield from response.prepare(request)
def write(img_bytes):
"""Write image to stream."""
response.write(bytes(
'--jpegboundary\r\n'
'Content-Type: image/jpeg\r\n'
'Content-Length: {}\r\n\r\n'.format(
len(img_bytes)), 'utf-8') + img_bytes + b'\r\n')
last_image = None
try:
while True:
img_bytes = yield from self.async_camera_image()
if not img_bytes:
break
if img_bytes and img_bytes != last_image:
write(img_bytes)
# Chrome seems to always ignore first picture,
# print it twice.
if last_image is None:
write(img_bytes)
last_image = img_bytes
yield from response.drain()
yield from asyncio.sleep(.5)
except asyncio.CancelledError:
_LOGGER.debug("Stream closed by frontend.")
response = None
finally:
if response is not None:
yield from response.write_eof()
@property
def state(self):
"""Return the camera state."""
if self.is_recording:
return STATE_RECORDING
elif self.is_streaming:
return STATE_STREAMING
else:
return STATE_IDLE
@property
def state_attributes(self):
"""Return the camera state attributes."""
attr = {
'access_token': self.access_tokens[-1],
}
if self.model:
attr['model_name'] = self.model
if self.brand:
attr['brand'] = self.brand
return attr
@callback
def async_update_token(self):
"""Update the used token."""
self.access_tokens.append(
hashlib.sha256(
_RND.getrandbits(256).to_bytes(32, 'little')).hexdigest())
class CameraView(HomeAssistantView):
"""Base CameraView."""
requires_auth = False
def __init__(self, entities):
"""Initialize a basic camera view."""
self.entities = entities
@asyncio.coroutine
def get(self, request, entity_id):
"""Start a GET request."""
camera = self.entities.get(entity_id)
if camera is None:
status = 404 if request[KEY_AUTHENTICATED] else 401
return web.Response(status=status)
authenticated = (request[KEY_AUTHENTICATED] or
request.GET.get('token') in camera.access_tokens)
if not authenticated:
return web.Response(status=401)
response = yield from self.handle(request, camera)
return response
@asyncio.coroutine
def handle(self, request, camera):
"""Handle the camera request."""
raise NotImplementedError()
class CameraImageView(CameraView):
"""Camera view to serve an image."""
url = '/api/camera_proxy/{entity_id}'
name = 'api:camera:image'
@asyncio.coroutine
def handle(self, request, camera):
"""Serve camera image."""
with suppress(asyncio.CancelledError, asyncio.TimeoutError):
with async_timeout.timeout(10, loop=request.app['hass'].loop):
image = yield from camera.async_camera_image()
if image:
return web.Response(body=image, content_type='image/jpeg')
return web.Response(status=500)
class CameraMjpegStream(CameraView):
"""Camera View to serve an MJPEG stream."""
url = '/api/camera_proxy_stream/{entity_id}'
name = 'api:camera:stream'
@asyncio.coroutine
def handle(self, request, camera):
"""Serve camera image."""
yield from camera.handle_async_mjpeg_stream(request)
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
from dingus import Dingus
from nose.tools import eq_
from build_pack_utils import utils
import tempfile
class TestGeoipConfig(object):
def __init__(self):
self.extension_module = utils.load_extension('extensions/geoip')
def test_should_compile_yes(self):
geoip = self.extension_module.GeoipConfig({
'PHP_EXTENSIONS': ['geoip']
})
eq_(True, geoip._should_compile())
def test_should_compile_no(self):
geoip = self.extension_module.GeoipConfig({
'PHP_EXTENSIONS': []
})
eq_(False, geoip._should_compile())
def test_should_download_no(self):
geoip = self.extension_module.GeoipConfig({
self.extension_module.GeoipConfig.GEOIP_LOCATION_KEY:
'value doesnt matter'
})
eq_(False, geoip._should_download())
def test_should_download_yes(self):
geoip = self.extension_module.GeoipConfig({})
eq_(True, geoip._should_download())
def test_geoip_key_default(self):
geoip = self.extension_module.GeoipConfig({})
eq_(geoip.DEFAULT_GEOIP_TRIGGER, geoip._geoip_key())
def test_geoip_key_custom(self):
geoip = self.extension_module.GeoipConfig({
self.extension_module.GeoipConfig.CUSTOM_GEOIP_KEY_NAME:
'custom_geoip_service'
})
eq_('custom_geoip_service', geoip._geoip_key())
def test_extract_download_info_no_vcap_services(self):
geoip = self.extension_module.GeoipConfig({})
eq_(None, geoip._extract_download_info())
def test_extract_download_info_no_geoip_service(self):
ctx = json.load(open('tests/data/geoip/vcap_services_no_geoip.json'))
geoip = self.extension_module.GeoipConfig(ctx)
eq_(None, geoip._extract_download_info())
def test_extract_download_info_geoip_service_present(self):
ctx = json.load(open('tests/data/geoip/vcap_services_geoip.json'))
geoip = self.extension_module.GeoipConfig(ctx)
expected = ('TEST',
'asdfghjkl;',
'GeoLite-Legacy-IPv6-City GeoLite-Legacy-IPv6-Country')
actual = geoip._extract_download_info()
for exp, act in zip(expected, actual):
eq_(exp, act)
def test_extract_download_info_just_products(self):
ctx = json.load(open(
'tests/data/geoip/vcap_services_geoip_products.json'))
geoip = self.extension_module.GeoipConfig(ctx)
expected = (None,
None,
'GeoLite-Legacy-IPv6-City GeoLite-Legacy-IPv6-Country')
actual = geoip._extract_download_info()
for exp, act in zip(expected, actual):
eq_(exp, act)
def test_extract_download_info_just_user(self):
ctx = json.load(open('tests/data/geoip/vcap_services_geoip_user.json'))
geoip = self.extension_module.GeoipConfig(ctx)
expected = ('TEST', None, None)
actual = geoip._extract_download_info()
for exp, act in zip(expected, actual):
eq_(exp, act)
def test_extract_download_info_just_license(self):
ctx = json.load(open(
'tests/data/geoip/vcap_services_geoip_license.json'))
geoip = self.extension_module.GeoipConfig(ctx)
expected = (None, 'asdfghjkl;', None)
actual = geoip._extract_download_info()
for exp, act in zip(expected, actual):
eq_(exp, act)
def test_extract_download_info_user_and_license(self):
ctx = json.load(open(
'tests/data/geoip/vcap_services_geoip_user_and_license.json'))
geoip = self.extension_module.GeoipConfig(ctx)
expected = ('TEST', 'asdfghjkl;', None)
actual = geoip._extract_download_info()
for exp, act in zip(expected, actual):
eq_(exp, act)
def test_build_download_cmd(self):
ctx = json.load(open('tests/data/geoip/vcap_services_geoip.json'))
ctx['BUILD_DIR'] = '/test/build_dir'
ctx['BP_DIR'] = '/test/bp_dir'
geoip = self.extension_module.GeoipConfig(ctx)
cmd = geoip._build_download_cmd()
eq_('/test/build_dir/php/geoipdb/bin/download_geoip_db.rb '
'--output_dir="/test/build_dir/php/geoipdb/dbs" '
'--user="TEST" '
'--license="asdfghjkl;" '
'--products='
'"GeoLite-Legacy-IPv6-City GeoLite-Legacy-IPv6-Country"', cmd)
def test_build_download_cmd_user_and_license(self):
ctx = json.load(open(
'tests/data/geoip/vcap_services_geoip_user_and_license.json'))
ctx['BUILD_DIR'] = '/test/build_dir'
ctx['BP_DIR'] = '/test/bp_dir'
geoip = self.extension_module.GeoipConfig(ctx)
cmd = geoip._build_download_cmd()
eq_('/test/build_dir/php/geoipdb/bin/download_geoip_db.rb '
'--output_dir="/test/build_dir/php/geoipdb/dbs" '
'--user="TEST" '
'--license="asdfghjkl;"', cmd)
def test_build_download_cmd_products(self):
ctx = json.load(open(
'tests/data/geoip/vcap_services_geoip_products.json'))
ctx['BUILD_DIR'] = '/test/build_dir'
ctx['BP_DIR'] = '/test/bp_dir'
geoip = self.extension_module.GeoipConfig(ctx)
cmd = geoip._build_download_cmd()
eq_('/test/build_dir/php/geoipdb/bin/download_geoip_db.rb '
'--output_dir="/test/build_dir/php/geoipdb/dbs" '
'--products='
'"GeoLite-Legacy-IPv6-City GeoLite-Legacy-IPv6-Country"', cmd)
def test_link_geoip_dat_geoip_dat_exists(self):
ctx = {}
ctx['BUILD_DIR'] = tempfile.mkdtemp()
geoip_dir = os.path.join(ctx['BUILD_DIR'], 'php', 'geoipdb', 'dbs')
os.makedirs(geoip_dir)
with open(os.path.join(geoip_dir, "GeoIP.dat"), 'w') as f:
f.write('xxx')
with open(os.path.join(geoip_dir, "GeoLiteCountry.dat"), 'w') as f:
f.write('yyy')
geoip = self.extension_module.GeoipConfig(ctx)
geoip._link_geoip_dat()
contents = ''
with open(os.path.join(geoip_dir, "GeoIP.dat"), 'r') as f:
contents = f.read()
eq_(contents, 'xxx')
shutil.rmtree(ctx['BUILD_DIR'])
def test_link_geoip_dat_geoip_dat_does_not_exist(self):
ctx = {}
ctx['BUILD_DIR'] = tempfile.mkdtemp()
geoip_dir = os.path.join(ctx['BUILD_DIR'], 'php', 'geoipdb', 'dbs')
os.makedirs(geoip_dir)
with open(os.path.join(geoip_dir, "GeoLiteCountry.dat"), 'w') as f:
f.write('yyy')
geoip = self.extension_module.GeoipConfig(ctx)
geoip._link_geoip_dat()
contents = ''
with open(os.path.join(geoip_dir, "GeoIP.dat"), 'r') as f:
contents = f.read()
eq_(contents, 'yyy')
shutil.rmtree(ctx['BUILD_DIR'])
def test_link_geoip_dat_geolitecountry_dat_does_not_exist(self):
ctx = {}
ctx['BUILD_DIR'] = tempfile.mkdtemp()
geoip_dir = os.path.join(ctx['BUILD_DIR'], 'php', 'geoipdb', 'dbs')
os.makedirs(geoip_dir)
geoip = self.extension_module.GeoipConfig(ctx)
geoip._link_geoip_dat()
eq_(os.path.isfile(os.path.join(geoip_dir, "GeoIP.dat")), False)
eq_(os.path.isfile(os.path.join(geoip_dir, "GeoLiteCountry.dat")), False)
shutil.rmtree(ctx['BUILD_DIR'])
| |
from guardian.models import GroupObjectPermission
from rest_framework import serializers as ser
from api.base.exceptions import InvalidModelValueError
from api.base.serializers import (
BaseAPISerializer, JSONAPISerializer, JSONAPIRelationshipSerializer,
DateByVersion, DevOnly, HideIfDisabled, IDField,
Link, LinksField, ListDictField, TypeField, RelationshipField,
WaterbutlerLink, ShowIfCurrentUser
)
from api.base.utils import absolute_reverse, get_user_auth
from api.files.serializers import QuickFilesSerializer
from osf.exceptions import ValidationValueError, ValidationError
from osf.models import OSFUser, QuickFilesNode
from website import util as website_utils
class QuickFilesRelationshipField(RelationshipField):
def to_representation(self, value):
relationship_links = super(QuickFilesRelationshipField, self).to_representation(value)
quickfiles_guid = value.created.filter(type=QuickFilesNode._typedmodels_type).values_list('guids___id', flat=True).get()
upload_url = website_utils.waterbutler_api_url_for(quickfiles_guid, 'osfstorage')
relationship_links['links']['upload'] = {
'href': upload_url,
'meta': {}
}
relationship_links['links']['download'] = {
'href': '{}?zip='.format(upload_url),
'meta': {}
}
return relationship_links
class UserSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'full_name',
'given_name',
'middle_names',
'family_name',
'id'
])
non_anonymized_fields = ['type']
id = IDField(source='_id', read_only=True)
type = TypeField()
full_name = ser.CharField(source='fullname', required=True, label='Full name', help_text='Display name used in the general user interface')
given_name = ser.CharField(required=False, allow_blank=True, help_text='For bibliographic citations')
middle_names = ser.CharField(required=False, allow_blank=True, help_text='For bibliographic citations')
family_name = ser.CharField(required=False, allow_blank=True, help_text='For bibliographic citations')
suffix = HideIfDisabled(ser.CharField(required=False, allow_blank=True, help_text='For bibliographic citations'))
date_registered = HideIfDisabled(DateByVersion(read_only=True))
active = HideIfDisabled(ser.BooleanField(read_only=True, source='is_active'))
timezone = HideIfDisabled(ser.CharField(required=False, help_text="User's timezone, e.g. 'Etc/UTC"))
locale = HideIfDisabled(ser.CharField(required=False, help_text="User's locale, e.g. 'en_US'"))
social = ListDictField(required=False)
can_view_reviews = ShowIfCurrentUser(ser.SerializerMethodField(help_text='Whether the current user has the `view_submissions` permission to ANY reviews provider.'))
links = HideIfDisabled(LinksField(
{
'html': 'absolute_url',
'profile_image': 'profile_image_url',
}
))
nodes = HideIfDisabled(RelationshipField(
related_view='users:user-nodes',
related_view_kwargs={'user_id': '<_id>'},
related_meta={'projects_in_common': 'get_projects_in_common'},
))
quickfiles = HideIfDisabled(QuickFilesRelationshipField(
related_view='users:user-quickfiles',
related_view_kwargs={'user_id': '<_id>'},
))
registrations = DevOnly(HideIfDisabled(RelationshipField(
related_view='users:user-registrations',
related_view_kwargs={'user_id': '<_id>'},
)))
institutions = HideIfDisabled(RelationshipField(
related_view='users:user-institutions',
related_view_kwargs={'user_id': '<_id>'},
self_view='users:user-institutions-relationship',
self_view_kwargs={'user_id': '<_id>'},
))
actions = ShowIfCurrentUser(RelationshipField(
related_view='users:user-action-list',
related_view_kwargs={'user_id': '<_id>'},
))
class Meta:
type_ = 'users'
def get_projects_in_common(self, obj):
user = get_user_auth(self.context['request']).user
if obj == user:
return user.contributor_to.count()
return obj.n_projects_in_common(user)
def absolute_url(self, obj):
if obj is not None:
return obj.absolute_url
return None
def get_absolute_url(self, obj):
return absolute_reverse('users:user-detail', kwargs={
'user_id': obj._id,
'version': self.context['request'].parser_context['kwargs']['version']
})
def get_can_view_reviews(self, obj):
group_qs = GroupObjectPermission.objects.filter(group__user=obj, permission__codename='view_submissions')
return group_qs.exists() or obj.userobjectpermission_set.filter(permission__codename='view_submissions')
def profile_image_url(self, user):
size = self.context['request'].query_params.get('profile_image_size')
return user.profile_image_url(size=size)
def update(self, instance, validated_data):
assert isinstance(instance, OSFUser), 'instance must be a User'
for attr, value in validated_data.items():
if 'social' == attr:
for key, val in value.items():
# currently only profileWebsites are a list, the rest of the social key only has one value
if key == 'profileWebsites':
instance.social[key] = val
else:
if len(val) > 1:
raise InvalidModelValueError(
detail='{} only accept a list of one single value'. format(key)
)
instance.social[key] = val[0]
else:
setattr(instance, attr, value)
try:
instance.save()
except ValidationValueError as e:
raise InvalidModelValueError(detail=e.message)
except ValidationError as e:
raise InvalidModelValueError(e)
return instance
class UserAddonSettingsSerializer(JSONAPISerializer):
"""
Overrides UserSerializer to make id required.
"""
id = ser.CharField(source='config.short_name', read_only=True)
user_has_auth = ser.BooleanField(source='has_auth', read_only=True)
links = LinksField({
'self': 'get_absolute_url',
'accounts': 'account_links'
})
class Meta:
type_ = 'user_addons'
def get_absolute_url(self, obj):
return absolute_reverse(
'users:user-addon-detail',
kwargs={
'provider': obj.config.short_name,
'user_id': self.context['request'].parser_context['kwargs']['user_id'],
'version': self.context['request'].parser_context['kwargs']['version']
}
)
def account_links(self, obj):
# TODO: [OSF-4933] remove this after refactoring Figshare
if hasattr(obj, 'external_accounts'):
return {
account._id: {
'account': absolute_reverse('users:user-external_account-detail', kwargs={
'user_id': obj.owner._id,
'provider': obj.config.short_name,
'account_id': account._id,
'version': self.context['request'].parser_context['kwargs']['version']
}),
'nodes_connected': [n.absolute_api_v2_url for n in obj.get_attached_nodes(account)]
}
for account in obj.external_accounts.all()
}
return {}
class UserDetailSerializer(UserSerializer):
"""
Overrides UserSerializer to make id required.
"""
id = IDField(source='_id', required=True)
class UserQuickFilesSerializer(QuickFilesSerializer):
links = LinksField({
'info': Link('files:file-detail', kwargs={'file_id': '<_id>'}),
'upload': WaterbutlerLink(),
'delete': WaterbutlerLink(),
'download': WaterbutlerLink(must_be_file=True),
})
class ReadEmailUserDetailSerializer(UserDetailSerializer):
email = ser.CharField(source='username', read_only=True)
class RelatedInstitution(JSONAPIRelationshipSerializer):
id = ser.CharField(required=False, allow_null=True, source='_id')
class Meta:
type_ = 'institutions'
def get_absolute_url(self, obj):
return obj.absolute_api_v2_url
class UserInstitutionsRelationshipSerializer(BaseAPISerializer):
data = ser.ListField(child=RelatedInstitution())
links = LinksField({'self': 'get_self_url',
'html': 'get_related_url'})
def get_self_url(self, obj):
return absolute_reverse('users:user-institutions-relationship', kwargs={
'user_id': obj['self']._id,
'version': self.context['request'].parser_context['kwargs']['version']
})
def get_related_url(self, obj):
return absolute_reverse('users:user-institutions', kwargs={
'user_id': obj['self']._id,
'version': self.context['request'].parser_context['kwargs']['version']
})
def get_absolute_url(self, obj):
return obj.absolute_api_v2_url
class Meta:
type_ = 'institutions'
| |
# encoding: utf-8
"""
check.py
Created by Thomas Mangin on 2013-03-18.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
class TYPE (object):
NULL = 0x01
BOOLEAN = 0x02
INTEGER = 0x04
STRING = 0x08
ARRAY = 0x10
HASH = 0x20
class PRESENCE (object):
OPTIONAL = 0x01
MANDATORY = 0x02
# TYPE CHECK
def null (data):
return type(data) == type(None) # noqa
def boolean (data):
return type(data) == type(True) # noqa
def integer (data):
return type(data) == type(0) # noqa
def string (data):
return type(data) == type(u'') or type(data) == type('') # noqa
def array (data):
return type(data) == type([]) # noqa
def hashtable (data):
return type(data) == type({}) # noqa
# XXX: Not very good to redefine the keyword object, but this class uses no OO ...
CHECK_TYPE = {
TYPE.NULL: null,
TYPE.BOOLEAN: boolean,
TYPE.INTEGER: integer,
TYPE.STRING: string,
TYPE.ARRAY: array,
TYPE.HASH: hashtable,
}
def kind (kind, data):
for t in CHECK_TYPE:
if kind & t:
if CHECK_TYPE[t](data):
return True
return False
# DATA CHECK
def nop (data):
return True
def uint8 (data):
return 0 <= data < pow(2,8)
def uint16 (data):
return 0 <= data < pow(2,16)
def uint32 (data):
return 0 <= data < pow(2,32)
def float (data):
return 0 <= data < 3.4 * pow(10,38) # approximation of max from wikipedia
def ip (data):
return ipv4(data) or ipv6(data)
def ipv4 (data): # XXX: improve
return string(data) and data.count('.') == 3
def ipv6 (data): # XXX: improve
return string(data) and ':' in data
def range4 (data):
return 0 < data <= 32
def range6 (data):
return 0 < data <= 128
def ipv4_range (data):
if not data.count('/') == 1:
return False
ip,r = data.split('/')
if not ipv4(ip):
return False
if not r.isdigit():
return False
if not range4(int(r)):
return False
return True
def port (data):
return 0 <= data < pow(2,16)
def asn16 (data):
return 1 <= data < pow(2,16)
def asn32 (data):
return 1 <= data < pow(2,32)
asn = asn32
def md5 (data):
return len(data) <= 18
def localpreference (data):
return uint32(data)
def med (data):
return uint32(data)
def aigp (data):
return uint32(data)
def originator (data):
return ipv4(data)
def distinguisher (data):
parts = data.split(':')
if len(parts) != 2:
return False
_,__ = parts
return (_.isdigit() and asn16(int(_)) and ipv4(__)) or (ipv4(_) and __.isdigit() and asn16(int(__)))
def pathinformation (data):
if integer(data):
return uint32(data)
if string(data):
return ipv4(data)
return False
def watchdog (data):
return ' ' not in data # TODO: improve
def split (data):
return range6(data)
# LIST DATA CHECK
# Those function need to perform type checks before using the data
def aspath (data):
return integer(data) and data < pow(2,32)
def assequence (data):
return integer(data) and data < pow(2,32)
def community (data):
if integer(data):
return uint32(data)
if string(data) and data.lower() in ('no-export', 'no-advertise', 'no-export-subconfed', 'nopeer', 'no-peer'):
return True
return array(data) and len(data) == 2 and \
integer(data[0]) and integer(data[1]) and \
asn16(data[0]) and uint16(data[1])
def extendedcommunity (data): # TODO: improve, incomplete see http://tools.ietf.org/rfc/rfc4360.txt
if integer(data):
return True
if string(data) and data.count(':') == 2:
_,__,___ = data.split(':')
if _.lower() not in ('origin','target'):
return False
return (__.isdigit() and asn16(__) and ipv4(___)) or (ipv4(__) and ___.isdigit() and asn16(___))
return False
def label (data):
return integer(data) and 0 <= data < pow(2, 20) # XXX: SHOULD be taken from Label class
def clusterlist (data):
return integer(data) and uint8(data)
def aggregator (data):
if not array(data):
return False
if len(data) == 0:
return True
if len(data) == 2:
return \
integer(data[0]) and string(data[1]) and \
asn(data[0]) and ipv4(data[1])
return False
def dscp (data):
return integer(data) and uint8(data)
# FLOW DATA CHECK
#
def flow_ipv4_range (data):
if array(data):
for r in data:
if not ipv4_range(r):
return False
if string(data):
return ipv4_range(data)
return False
def _flow_numeric (data, check):
if not array(data):
return False
for et in data:
if not (array(et) and len(et) == 2 and et[0] in ('>', '<', '=','>=', '<=') and integer(et[1]) and check(et[1])):
return False
return True
def flow_port (data):
return _flow_numeric(data,port)
def _length (data):
return uint16(data)
def flow_length (data):
return _flow_numeric(data,_length)
def redirect (data): # TODO: check that we are not too restrictive with our asn() calls
parts = data.split(':')
if len(parts) != 2:
return False
_,__ = parts
if not __.isdigit() and asn16(int(__)):
return False
return ipv4(_) or (_.isdigit() and asn16(int(_)))
| |
import logging
from isobus.common import NumericValue
from isobus.ibsinterface import IBSInterface
from isobus.common import IBSID
from isobus.constants import *
from isobus.log import log
from isobus.common import IBSException
class IBSVTInterface(IBSInterface):
""" Implements ISOBUS part 6 funcationality (Version 3)
Extends the ISOBUS general interface
"""
def WaitForStatusMessage(self, vtsa):
return self._WaitForIBSMessage(PGN_VT2ECU, vtsa, 0xFF, 0xFE)
def SendChangeActiveMask(self, wsid, maskid, sa, da):
candata = ([0xAD]
+ NumericValue(wsid).AsLEBytes(2)
+ NumericValue(maskid).AsLEBytes(2)
+ [0xFF, 0xFF, 0xFF])
self._SendIBSMessage(PGN_ECU2VT, da, sa, candata)
def WaitForChangeActiveMaskResponse(self, vtsa, ecusa):
[received, data] = self._WaitForIBSMessage(PGN_VT2ECU, vtsa, ecusa, 0xAD)
return received, NumericValue.FromLEBytes(data[1:3]).Value(), data[3]
def SendChangeSKMask(self, maskid, skmaskid, alarm, vtsa, ecusa):
candata = [0xFF] * 8
if alarm:
candata = ([0xAE]
+ [0x02]
+ NumericValue(maskid).AsLEBytes(2)
+ NumericValue(skmaskid).AsLEBytes(2)
+ [0xFF, 0xFF])
else:
candata = ([0xAE]
+ [0x01]
+ NumericValue(maskid).AsLEBytes(2)
+ NumericValue(skmaskid).AsLEBytes(2)
+ [0xFF, 0xFF])
self._SendIBSMessage(PGN_ECU2VT, vtsa, ecusa, candata)
def WaitForChangeSKMaskResponse(self, vtsa, ecusa):
""" Wait for the Change Soft Key Mask response message
Return True for received, error code, and new SK mask ID
"""
[received, data] = self._WaitForIBSMessage(PGN_VT2ECU, vtsa, ecusa, 0xAE)
return received, data[5], NumericValue.FromLEBytes(data[3:5]).Value()
def SendChangeAttribute(self, objid, attrid, value, vtsa, ecusa):
candata = ([0xAF]
+ NumericValue(objid).AsLEBytes(2)
+ NumericValue(attrid).AsLEBytes(1)
+ NumericValue(value).AsLEBytes(4))
self._SendIBSMessage(PGN_ECU2VT, vtsa, ecusa, candata)
def WaitChangeAttributeResponse(self, vtsa, ecusa):
"""
Wait for a response for the change attribute command
Return True for received and Error code
"""
[received, data] = self._WaitForIBSMessage(PGN_VT2ECU, vtsa, ecusa, 0xAF)
return received, data[4]
def SendEscCommand(self, vtsa, ecusa):
candata = [0x92] + (7 * [0xFF])
self._SendIBSMessage(PGN_ECU2VT, vtsa, ecusa, candata)
def WaitForESCResponse(self, vtsa, ecusa):
"""
Wait for ESC response
@return True for received, error code and aborted input object ID
"""
[received, data] = self._WaitForIBSMessage(PGN_VT2ECU, vtsa, ecusa, 0x92)
return received, data[3], NumericValue.FromLEBytes(data[1:3]).Value()
def SendWSMaintenance(self, initiating, sa, da):
initBit = 0
if (initiating) :
initBit = 1
candata = [0xFF, (initBit & 0x1), 0x3, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF]
self._SendIBSMessage(PGN_ECU2VT, da, sa, candata)
def StartWSMaintenace(self, sa, da):
candata = [0xFF, 0x00, 0x3, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF]
ibsid = IBSID(sa = sa, da = da, pgn = PGN_ECU2VT, prio = 6)
self.AddPeriodicMessage(ibsid, candata, 1.0)
def StopWSMaintenance(self, sa, da):
# For socketcan_native, bit 32 (MSb) needs to be set for extended ID
# Is fixed in latest python-can though!
ibsid = IBSID(sa = sa, da = da, pgn = PGN_ECU2VT, prio = 6)
self.StopPeriodicMessage(ibsid)
def SendLoadVersionCommand(self, version, sa, da):
if len(version) == 7:
candata = [0xD1] + [ord(x) for x in version]
self._SendIBSMessage(PGN_ECU2VT, da, sa, candata)
else :
raise IBSException("Version {0} is not 7 characters".format(version))
def SendStoreVersioncommand(self, version, da, sa):
if len(version) == 7:
candata = [0xD0] + [ord(x) for x in version]
self._SendIBSMessage(PGN_ECU2VT, da, sa, candata)
else :
raise IBSException("Version {0} is not 7 characters".format(version))
def WaitLoadVersionResponse(self, vtsa, ecusa):
#TODO: Should wait 3 status messages w/parsing bit=0 i.o. 3 seconds
[received, data] = self._WaitForIBSMessage(PGN_VT2ECU, vtsa, ecusa, 0xD1)
return received, data[5]
def WaitStoreVersionResponse(self, vtsa, ecusa):
[received, data] = self._WaitForIBSMessage(PGN_VT2ECU, vtsa, ecusa, 0xD0)
return received, data[5]
def SendGetMemory(self, memRequired, vtsa, ecusa):
candata = ( [0xC0, 0xFF]
+ NumericValue(memRequired).AsLEBytes(4)
+ [0xFF, 0xFF])
self._SendIBSMessage(PGN_ECU2VT, vtsa, ecusa, candata)
def WaitForGetMemoryResponse(self, vtsa, ecusa):
[received, data] = self._WaitForIBSMessage(PGN_VT2ECU, vtsa, ecusa, 0xC0)
version = data[1]
enoughMemory = True
if data[2] == 0x01:
enoughMemory = False
return received, version, enoughMemory
def SendChangeNumericValue(self, objid, value, vtsa, ecusa):
candata =([0xA8]
+ NumericValue(objid).AsLEBytes(2)
+ [0xFF]
+ NumericValue(value).AsLEBytes(4))
self._SendIBSMessage(PGN_ECU2VT, vtsa, ecusa, candata)
def WaitForChangeNumericValueResponse(self, vtsa, ecusa):
"""
Return true for received, error code
"""
[received, data] = self._WaitForIBSMessage(PGN_VT2ECU, vtsa, ecusa, 0xA8)
return received, data[3]
def SendChangeStringValue(self, objid, value, vtsa, ecusa):
# TODO: Check for too large strings!
stringData = [ord(x) for x in value]
if len(stringData) < 3:
stringData = stringData + list([RESERVED] * (3 - len(stringData)))
candata =([0xB3]
+ NumericValue(objid).AsLEBytes(2)
+ NumericValue(len(value)).AsLEBytes(2)
+ stringData)
self._SendIBSMessage(PGN_ECU2VT, vtsa, ecusa, candata)
def WaitForChangeStringValueResponse(self, vtsa, ecusa):
"""
Return true for received, error code
"""
[received, data] = self._WaitForIBSMessage(PGN_VT2ECU, vtsa, ecusa, 0xB3)
return received, data[5]
def SendPoolUpload(self, vtsa, ecusa, pooldata):
self._SendIBSMessage(PGN_ECU2VT, vtsa, ecusa, [0x11] + pooldata)
def SendEndOfObjectPool(self, vtsa, ecusa):
self._SendIBSMessage(PGN_ECU2VT, vtsa, ecusa, [0x12] + [0xFF] * 7)
def WaitEndOfObjectPoolResponse(self, vtsa, ecusa):
[received, data] = self._WaitForIBSMessage(PGN_VT2ECU, vtsa, ecusa, 0x12, 5.0)
return received, data[1]
# TODO: Return error codes + faulty objects?
def SendDeleteObjectPool(self, vtsa, ecusa):
self._SendIBSMessage(PGN_ECU2VT, vtsa, ecusa, [0xB2] + (7 * [0xFF]))
def WaitDeleteObjectPoolResponse(self, vtsa, ecusa):
[received, data] = self._WaitForIBSMessage(PGN_VT2ECU, vtsa, ecusa, 0xB2)
return received, data[1]
def SendChangeListItemCommand(self, vtsa, ecusa, objectid, index, newid):
candata = ([0xB1]
+ NumericValue(objectid).AsLEBytes(2)
+ [index & 0xFF]
+ NumericValue(newid).AsLEBytes(2)
+ [RESERVED] * 2)
self._SendIBSMessage(PGN_ECU2VT, vtsa, ecusa, candata)
def WaitForChangeListItemResponse(self, vtsa, ecusa):
[received, data] = self._WaitForIBSMessage(PGN_VT2ECU, vtsa, ecusa, 0xB1)
return received, data[6]
def SendIdentifyVT(self, sa):
log.debug('Sending identify VT')
self._SendIBSMessage(PGN_ECU2VT, 0xFF, sa, [0xBB] + (7 * [0xFF]))
| |
import numpy
from PyQt4 import QtGui, QtCore
from ilastik.modules.classification.core import classificationMgr
from ilastik.core import overlayMgr
import ilastik.core.overlays.thresholdOverlay as tho
#*******************************************************************************
# F e a t u r e C o m p u t a t i o n *
#*******************************************************************************
class FeatureComputation(QtCore.QObject):
def __init__(self, parent):
QtCore.QObject.__init__(self)
self.ilastik = self.parent = parent
self.featureCompute()
def featureCompute(self):
self.parent.setTabBusy(True)
self.parent.ribbon.getTab('Classification').btnClassifierOptions.setEnabled(False)
self.parent.ribbon.getTab('Classification').btnSelectFeatures.setEnabled(False)
self.parent.project.dataMgr.featureLock.acquire()
self.myTimer = QtCore.QTimer(self)
self.parent.connect(self.myTimer, QtCore.SIGNAL("timeout()"), self.updateFeatureProgress)
self.parent.project.dataMgr.module["Classification"]["classificationMgr"].clearFeaturesAndTraining()
numberOfJobs = self.ilastik.project.dataMgr.Classification.featureMgr.prepareCompute(self.parent.project.dataMgr)
self.initFeatureProgress(numberOfJobs)
self.ilastik.project.dataMgr.Classification.featureMgr.triggerCompute()
self.myTimer.start(200)
def initFeatureProgress(self, numberOfJobs):
statusBar = self.parent.statusBar()
self.myFeatureProgressBar = QtGui.QProgressBar()
self.myFeatureProgressBar.setMinimum(0)
self.myFeatureProgressBar.setMaximum(numberOfJobs)
self.myFeatureProgressBar.setFormat(' Features... %p%')
statusBar.addWidget(self.myFeatureProgressBar)
statusBar.show()
def updateFeatureProgress(self):
val = self.ilastik.project.dataMgr.Classification.featureMgr.getCount()
self.myFeatureProgressBar.setValue(val)
if not self.ilastik.project.dataMgr.Classification.featureMgr.featureProcess.isRunning():
self.terminateFeatureProgressBar()
self.ilastik.project.dataMgr.Classification.featureMgr.joinCompute(self.parent.project.dataMgr)
def terminateFeatureProgressBar(self):
self.myTimer.stop()
del self.myTimer
self.parent.statusBar().removeWidget(self.myFeatureProgressBar)
self.parent.statusBar().hide()
self.parent.project.dataMgr.module["Classification"]["classificationMgr"].buildTrainingMatrix()
self.parent.project.dataMgr.featureLock.release()
if hasattr(self.parent, "classificationInteractive"):
self.parent.classificationInteractive.updateThreadQueues()
self.parent.ribbon.getTab('Classification').btnSelectFeatures.setEnabled(True)
self.parent.ribbon.getTab('Classification').btnTrainPredict.setEnabled(True)
self.parent.ribbon.getTab('Classification').btnStartLive.setEnabled(True)
self.parent.ribbon.getTab('Classification').btnClassifierOptions.setEnabled(True)
self.parent.ribbon.getTab('Classification').btnSelectFeatures.setEnabled(True)
self.parent.setTabBusy(False)
def featureShow(self, item):
pass
#*******************************************************************************
# C l a s s i f i c a t i o n T r a i n *
#*******************************************************************************
class ClassificationTrain(QtCore.QObject):
def __init__(self, parent):
QtCore.QObject.__init__(self)
self.parent = parent
self.ilastik = parent
self.start()
def start(self):
self.parent.setTabBusy(True)
#process all unaccounted label changes
self.parent.ribbon.getTab('Classification').btnTrainPredict.setEnabled(False)
self.parent.ribbon.getTab('Automate').btnBatchProcess.setEnabled(False)
self.parent.ribbon.getTab('Classification').btnClassifierOptions.setEnabled(False)
self.parent.ribbon.getTab('Classification').btnSelectFeatures.setEnabled(False)
newLabels = self.parent.labelWidget.getPendingLabels()
if len(newLabels) > 0:
self.parent.project.dataMgr.Classification.classificationMgr.updateTrainingMatrix(newLabels)
self.classificationTimer = QtCore.QTimer(self)
self.parent.connect(self.classificationTimer, QtCore.SIGNAL("timeout()"), self.updateClassificationProgress)
numberOfJobs = 10
self.initClassificationProgress(numberOfJobs)
self.classificationProcess = classificationMgr.ClassifierTrainThread(numberOfJobs, self.parent.project.dataMgr, classifier = self.parent.project.dataMgr.module["Classification"].classifier)
self.classificationProcess.start()
self.classificationTimer.start(500)
def initClassificationProgress(self, numberOfJobs):
statusBar = self.parent.statusBar()
self.myClassificationProgressBar = QtGui.QProgressBar()
self.myClassificationProgressBar.setMinimum(0)
self.myClassificationProgressBar.setMaximum(numberOfJobs)
self.myClassificationProgressBar.setFormat(' Training... %p%')
statusBar.addWidget(self.myClassificationProgressBar)
statusBar.show()
def updateClassificationProgress(self):
val = self.classificationProcess.count
self.myClassificationProgressBar.setValue(val)
if not self.classificationProcess.isRunning():
self.finalize()
def finalize(self):
self.classificationTimer.stop()
del self.classificationTimer
self.classificationProcess.wait()
self.terminateClassificationProgressBar()
self.parent.setTabBusy(False)
self.emit(QtCore.SIGNAL("trainingFinished()"))
def terminateClassificationProgressBar(self):
self.parent.statusBar().removeWidget(self.myClassificationProgressBar)
self.parent.statusBar().hide()
self.parent.ribbon.getTab('Classification').btnTrainPredict.setEnabled(True)
self.parent.ribbon.getTab('Automate').btnBatchProcess.setEnabled(True)
self.parent.ribbon.getTab('Classification').btnClassifierOptions.setEnabled(True)
self.parent.ribbon.getTab('Classification').btnSelectFeatures.setEnabled(True)
#*******************************************************************************
# C l a s s i f i c a t i o n I n t e r a c t i v e *
#*******************************************************************************
class ClassificationInteractive(object):
def __init__(self, parent):
self.parent = parent
self.stopped = False
self.parent.ribbon.getTab('Classification').btnTrainPredict.setEnabled(False)
self.parent.ribbon.getTab('Automate').btnBatchProcess.setEnabled(False)
self.parent.ribbon.getTab('Classification').btnTrainPredict.setEnabled(False)
self.parent.ribbon.getTab('Classification').btnClassifierOptions.setEnabled(False)
self.parent.ribbon.getTab('Classification').btnSelectFeatures.setEnabled(False)
self.parent.labelWidget.connect(self.parent.labelWidget, QtCore.SIGNAL('newLabelsPending()'), self.updateThreadQueues)
self.parent.labelWidget.connect(self.parent.labelWidget, QtCore.SIGNAL('changedSlice(int, int)'), self.updateThreadQueues)
self.temp_cnt = 0
descriptions = self.parent.project.dataMgr.module["Classification"]["labelDescriptions"]
activeImage = self.parent._activeImage
foregrounds = []
for p_num,pd in enumerate(descriptions):
#create Overlay for _prediction if not there:
if activeImage.overlayMgr["Classification/Prediction/" + descriptions[p_num-1].name] is None:
data = numpy.zeros(activeImage.shape[0:-1] + (1,), 'float32')
ov = overlayMgr.OverlayItem(data, color = QtGui.QColor.fromRgba(long(descriptions[p_num-1].color)), alpha = 0.4, colorTable = None, autoAdd = True, autoVisible = True, min = 0, max = 1.0)
ov.setColorGetter(descriptions[p_num-1].getColor, descriptions[p_num-1])
activeImage.overlayMgr["Classification/Prediction/" + descriptions[p_num-1].name] = ov
ov = activeImage.overlayMgr["Classification/Prediction/" + descriptions[p_num-1].name]
foregrounds.append(ov)
#create Overlay for uncertainty:
if activeImage.overlayMgr["Classification/Uncertainty"] is None:
data = numpy.zeros(activeImage.shape[0:-1] + (1,), 'float32')
ov = overlayMgr.OverlayItem(data, color = QtGui.QColor(255, 0, 0), alpha = 1.0, colorTable = None, autoAdd = True, autoVisible = False, min = 0, max = 1)
activeImage.overlayMgr["Classification/Uncertainty"] = ov
if len(foregrounds) > 1:
if activeImage.overlayMgr["Classification/Segmentation"] is None:
ov = tho.ThresholdOverlay(foregrounds, [], autoAdd = True, autoVisible = False)
activeImage.overlayMgr["Classification/Segmentation"] = ov
else:
ov = activeImage.overlayMgr["Classification/Segmentation"]
ov.setForegrounds(foregrounds)
self.start()
def updateThreadQueues(self, a = 0, b = 0):
if self.classificationInteractive is not None:
self.myInteractionProgressBar.setVisible(True)
self.classificationInteractive.dataPending.set()
def updateLabelWidget(self):
try:
self.myInteractionProgressBar.setVisible(False)
self.parent.labelWidget.repaint()
except IndexError:
pass
def initInteractiveProgressBar(self):
statusBar = self.parent.statusBar()
self.myInteractionProgressBar = QtGui.QProgressBar()
self.myInteractionProgressBar.setVisible(False)
self.myInteractionProgressBar.setMinimum(0)
self.myInteractionProgressBar.setMaximum(0)
statusBar.addWidget(self.myInteractionProgressBar)
statusBar.show()
def terminateClassificationProgressBar(self):
self.parent.statusBar().removeWidget(self.myInteractionProgressBar)
self.parent.statusBar().hide()
def start(self):
self.parent.setTabBusy(True)
self.initInteractiveProgressBar()
self.classificationInteractive = classificationMgr.ClassifierInteractiveThread(self.parent, self.parent.project.dataMgr.module["Classification"]["classificationMgr"],classifier = self.parent.project.dataMgr.module["Classification"].classifier)
self.parent.connect(self.classificationInteractive, QtCore.SIGNAL("resultsPending()"), self.updateLabelWidget)
self.classificationInteractive.start()
self.updateThreadQueues()
def stop(self):
self.classificationInteractive.stopped = True
self.classificationInteractive.dataPending.set() #wake up thread one last time before his death
self.classificationInteractive.wait()
self.finalize()
self.parent.ribbon.getTab('Classification').btnTrainPredict.setEnabled(True)
self.parent.ribbon.getTab('Classification').btnClassifierOptions.setEnabled(True)
self.parent.ribbon.getTab('Classification').btnSelectFeatures.setEnabled(True)
self.terminateClassificationProgressBar()
self.parent.setTabBusy(False)
def finalize(self):
self.parent.ribbon.getTab('Classification').btnTrainPredict.setEnabled(True)
self.parent.ribbon.getTab('Automate').btnBatchProcess.setEnabled(True)
self.parent.project.dataMgr.Classification.classificationMgr.classifiers = list(self.classificationInteractive.classifiers)
self.classificationInteractive = None
#*******************************************************************************
# C l a s s i f i c a t i o n P r e d i c t *
#*******************************************************************************
class ClassificationPredict(QtCore.QObject):
def __init__(self, parent):
QtCore.QObject.__init__(self)
self.parent = parent
self.start()
def start(self):
self.parent.setTabBusy(True)
self.parent.ribbon.getTab('Classification').btnTrainPredict.setEnabled(False)
self.parent.ribbon.getTab('Classification').btnStartLive.setEnabled(False)
self.parent.ribbon.getTab('Classification').btnClassifierOptions.setEnabled(False)
self.parent.ribbon.getTab('Classification').btnSelectFeatures.setEnabled(False)
self.classificationTimer = QtCore.QTimer(self)
self.parent.connect(self.classificationTimer, QtCore.SIGNAL("timeout()"), self.updateClassificationProgress)
self.classificationPredict = classificationMgr.ClassifierPredictThread(self.parent.project.dataMgr)
numberOfJobs = self.classificationPredict.numberOfJobs
self.initClassificationProgress(numberOfJobs)
self.classificationPredict.start()
self.classificationTimer.start(200)
def initClassificationProgress(self, numberOfJobs):
statusBar = self.parent.statusBar()
self.myClassificationProgressBar = QtGui.QProgressBar()
self.myClassificationProgressBar.setMinimum(0)
self.myClassificationProgressBar.setMaximum(numberOfJobs)
self.myClassificationProgressBar.setFormat(' Prediction... %p%')
statusBar.addWidget(self.myClassificationProgressBar)
statusBar.show()
def updateClassificationProgress(self):
val = self.classificationPredict.count
self.myClassificationProgressBar.setValue(val)
if not self.classificationPredict.isRunning():
self.classificationTimer.stop()
self.classificationPredict.wait()
self.finalize()
self.terminateClassificationProgressBar()
def finalize(self):
self.classificationTimer.stop()
del self.classificationTimer
try:
self.classificationPredict.generateOverlays(self.parent._activeImage)
self.parent.labelWidget.repaint()
except MemoryError,e:
print "Out of memory:", e
QtGui.QErrorMessage.qtHandler().showMessage("Not enough memory to create all classification results")
self.parent.setTabBusy(False)
def terminateClassificationProgressBar(self):
self.parent.statusBar().removeWidget(self.myClassificationProgressBar)
self.parent.statusBar().hide()
self.parent.ribbon.getTab('Classification').btnTrainPredict.setEnabled(True)
self.parent.ribbon.getTab('Classification').btnStartLive.setEnabled(True)
self.parent.ribbon.getTab('Classification').btnExportClassifier.setEnabled(True)
self.parent.ribbon.getTab('Classification').btnClassifierOptions.setEnabled(True)
self.parent.ribbon.getTab('Classification').btnSelectFeatures.setEnabled(True)
| |
# -*- coding: utf-8 -*-
from __future__ import print_function, division, unicode_literals, absolute_import
from builtins import str, open
# This tool exports a Nipype interface in the Boutiques (https://github.com/boutiques) JSON format.
# Boutiques tools can be imported in CBRAIN (https://github.com/aces/cbrain) among other platforms.
#
# Limitations:
# * List outputs are not supported.
# * Default values are not extracted from the documentation of the Nipype interface.
# * The following input types must be ignored for the output path template creation (see option -t):
# ** String restrictions, i.e. String inputs that accept only a restricted set of values.
# ** mutually exclusive inputs.
# * Path-templates are wrong when output files are not created in the execution directory (e.g. when a sub-directory is created).
# * Optional outputs, i.e. outputs that not always produced, may not be detected.
import os
import argparse
import sys
import tempfile
import simplejson as json
from ..scripts.instance import import_module
def generate_boutiques_descriptor(module, interface_name, ignored_template_inputs, docker_image, docker_index, verbose, ignore_template_numbers):
'''
Returns a JSON string containing a JSON Boutiques description of a Nipype interface.
Arguments:
* module: module where the Nipype interface is declared.
* interface: Nipype interface.
* ignored_template_inputs: a list of input names that should be ignored in the generation of output path templates.
* ignore_template_numbers: True if numbers must be ignored in output path creations.
'''
if not module:
raise Exception("Undefined module.")
# Retrieves Nipype interface
if isinstance(module, str):
import_module(module)
module_name = str(module)
module = sys.modules[module]
interface = getattr(module, interface_name)()
inputs = interface.input_spec()
outputs = interface.output_spec()
# Tool description
tool_desc = {}
tool_desc['name'] = interface_name
tool_desc['command-line'] = "nipype_cmd " + module_name + " " + interface_name + " "
tool_desc['description'] = interface_name + ", as implemented in Nipype (module: " + module_name + ", interface: " + interface_name + ")."
tool_desc['inputs'] = []
tool_desc['outputs'] = []
tool_desc['tool-version'] = interface.version
tool_desc['schema-version'] = '0.2-snapshot'
if docker_image:
tool_desc['docker-image'] = docker_image
if docker_index:
tool_desc['docker-index'] = docker_index
# Generates tool inputs
for name, spec in sorted(interface.inputs.traits(transient=None).items()):
input = get_boutiques_input(inputs, interface, name, spec, ignored_template_inputs, verbose, ignore_template_numbers)
tool_desc['inputs'].append(input)
tool_desc['command-line'] += input['command-line-key'] + " "
if verbose:
print("-> Adding input " + input['name'])
# Generates tool outputs
for name, spec in sorted(outputs.traits(transient=None).items()):
output = get_boutiques_output(name, interface, tool_desc['inputs'], verbose)
if output['path-template'] != "":
tool_desc['outputs'].append(output)
if verbose:
print("-> Adding output " + output['name'])
elif verbose:
print("xx Skipping output " + output['name'] + " with no path template.")
if tool_desc['outputs'] == []:
raise Exception("Tool has no output.")
# Removes all temporary values from inputs (otherwise they will
# appear in the JSON output)
for input in tool_desc['inputs']:
del input['tempvalue']
return json.dumps(tool_desc, indent=4, separators=(',', ': '))
def get_boutiques_input(inputs, interface, input_name, spec, ignored_template_inputs, verbose, ignore_template_numbers):
"""
Returns a dictionary containing the Boutiques input corresponding to a Nipype intput.
Args:
* inputs: inputs of the Nipype interface.
* interface: Nipype interface.
* input_name: name of the Nipype input.
* spec: Nipype input spec.
* ignored_template_inputs: input names for which no temporary value must be generated.
* ignore_template_numbers: True if numbers must be ignored in output path creations.
Assumes that:
* Input names are unique.
"""
if not spec.desc:
spec.desc = "No description provided."
spec_info = spec.full_info(inputs, input_name, None)
input = {}
input['id'] = input_name
input['name'] = input_name.replace('_', ' ').capitalize()
input['type'] = get_type_from_spec_info(spec_info)
input['list'] = is_list(spec_info)
input['command-line-key'] = "[" + input_name.upper() + "]" # assumes that input names are unique
input['command-line-flag'] = ("--%s" % input_name + " ").strip()
input['tempvalue'] = None
input['description'] = spec_info.capitalize() + ". " + spec.desc.capitalize()
if not input['description'].endswith('.'):
input['description'] += '.'
if not (hasattr(spec, "mandatory") and spec.mandatory):
input['optional'] = True
else:
input['optional'] = False
if spec.usedefault:
input['default-value'] = spec.default_value()[1]
# Create unique, temporary value.
temp_value = must_generate_value(input_name, input['type'], ignored_template_inputs, spec_info, spec, ignore_template_numbers)
if temp_value:
tempvalue = get_unique_value(input['type'], input_name)
setattr(interface.inputs, input_name, tempvalue)
input['tempvalue'] = tempvalue
if verbose:
print("oo Path-template creation using " + input['id'] + "=" + str(tempvalue))
# Now that temp values have been generated, set Boolean types to
# Number (there is no Boolean type in Boutiques)
if input['type'] == "Boolean":
input['type'] = "Number"
return input
def get_boutiques_output(name, interface, tool_inputs, verbose=False):
"""
Returns a dictionary containing the Boutiques output corresponding to a Nipype output.
Args:
* name: name of the Nipype output.
* interface: Nipype interface.
* tool_inputs: list of tool inputs (as produced by method get_boutiques_input).
Assumes that:
* Output names are unique.
* Input values involved in the path template are defined.
* Output files are written in the current directory.
* There is a single output value (output lists are not supported).
"""
output = {}
output['name'] = name.replace('_', ' ').capitalize()
output['id'] = name
output['type'] = "File"
output['path-template'] = ""
output['optional'] = True # no real way to determine if an output is always produced, regardless of the input values.
# Path template creation.
output_value = interface._list_outputs()[name]
if output_value != "" and isinstance(output_value, str): # FIXME: this crashes when there are multiple output values.
# Go find from which input value it was built
for input in tool_inputs:
if not input['tempvalue']:
continue
input_value = input['tempvalue']
if input['type'] == "File":
# Take the base name
input_value = os.path.splitext(os.path.basename(input_value))[0]
if str(input_value) in output_value:
output_value = os.path.basename(output_value.replace(input_value, input['command-line-key'])) # FIXME: this only works if output is written in the current directory
output['path-template'] = os.path.basename(output_value)
return output
def get_type_from_spec_info(spec_info):
'''
Returns an input type from the spec info. There must be a better
way to get an input type in Nipype than to parse the spec info.
'''
if ("an existing file name" in spec_info) or ("input volumes" in spec_info):
return "File"
elif ("an integer" in spec_info or "a float" in spec_info):
return "Number"
elif "a boolean" in spec_info:
return "Boolean"
return "String"
def is_list(spec_info):
'''
Returns True if the spec info looks like it describes a list
parameter. There must be a better way in Nipype to check if an input
is a list.
'''
if "a list" in spec_info:
return True
return False
def get_unique_value(type, id):
'''
Returns a unique value of type 'type', for input with id 'id',
assuming id is unique.
'''
return {
"File": os.path.abspath(create_tempfile()),
"Boolean": True,
"Number": abs(hash(id)), # abs in case input param must be positive...
"String": id
}[type]
def create_tempfile():
'''
Creates a temp file and returns its name.
'''
fileTemp = tempfile.NamedTemporaryFile(delete=False)
fileTemp.write("hello")
fileTemp.close()
return fileTemp.name
def must_generate_value(name, type, ignored_template_inputs, spec_info, spec, ignore_template_numbers):
'''
Return True if a temporary value must be generated for this input.
Arguments:
* name: input name.
* type: input_type.
* ignored_template_inputs: a list of inputs names for which no value must be generated.
* spec_info: spec info of the Nipype input
* ignore_template_numbers: True if numbers must be ignored.
'''
# Return false when type is number and numbers must be ignored.
if ignore_template_numbers and type == "Number":
return False
# Only generate value for the first element of mutually exclusive inputs.
if spec.xor and spec.xor[0] != name:
return False
# Directory types are not supported
if "an existing directory name" in spec_info:
return False
# Don't know how to generate a list.
if "a list" in spec_info or "a tuple" in spec_info:
return False
# Don't know how to generate a dictionary.
if "a dictionary" in spec_info:
return False
# Best guess to detect string restrictions...
if "' or '" in spec_info:
return False
if not ignored_template_inputs:
return True
return not (name in ignored_template_inputs)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Built-in optimizer classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from six.moves import zip # pylint: disable=redefined-builtin
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.optimizer_v2 import adadelta as adadelta_v2
from tensorflow.python.keras.optimizer_v2 import adagrad as adagrad_v2
from tensorflow.python.keras.optimizer_v2 import adam as adam_v2
from tensorflow.python.keras.optimizer_v2 import adamax as adamax_v2
from tensorflow.python.keras.optimizer_v2 import ftrl
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_v2
from tensorflow.python.keras.optimizer_v2 import nadam as nadam_v2
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.keras.optimizer_v2 import rmsprop as rmsprop_v2
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import optimizer as tf_optimizer_module
from tensorflow.python.training import training_util
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util.tf_export import keras_export
class Optimizer(object):
"""Abstract optimizer base class.
Note: this is the parent class of all optimizers, not an actual optimizer
that can be used for training models.
All Keras optimizers support the following keyword arguments:
clipnorm: float >= 0. Gradients will be clipped
when their L2 norm exceeds this value.
clipvalue: float >= 0. Gradients will be clipped
when their absolute value exceeds this value.
"""
def __init__(self, **kwargs):
allowed_kwargs = {'clipnorm', 'clipvalue'}
for k in kwargs:
if k not in allowed_kwargs:
raise TypeError('Unexpected keyword argument '
'passed to optimizer: ' + str(k))
# checks that clipnorm >= 0 and clipvalue >= 0
if kwargs[k] < 0:
raise ValueError('Expected {} >= 0, received: {}'.format(k, kwargs[k]))
self.__dict__.update(kwargs)
self.updates = []
self.weights = []
# Set this to False, indicating `apply_gradients` does not take the
# `experimental_aggregate_gradients` argument.
_HAS_AGGREGATE_GRAD = False
def _create_all_weights(self, params):
"""Creates and sets all optimizer weights.
Args:
params: list or tuple of `Variable` objects that will be minimized
using this optimizer.
Returns:
Specific weight values that are used in `get_updates`
"""
raise NotImplementedError
def get_updates(self, loss, params):
raise NotImplementedError
def get_gradients(self, loss, params):
"""Returns gradients of `loss` with respect to `params`.
Arguments:
loss: Loss tensor.
params: List of variables.
Returns:
List of gradient tensors.
Raises:
ValueError: In case any gradient cannot be computed (e.g. if gradient
function not implemented).
"""
grads = K.gradients(loss, params)
if any(g is None for g in grads):
raise ValueError('An operation has `None` for gradient. '
'Please make sure that all of your ops have a '
'gradient defined (i.e. are differentiable). '
'Common ops without gradient: '
'K.argmax, K.round, K.eval.')
if hasattr(self, 'clipnorm'):
grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]
if hasattr(self, 'clipvalue'):
grads = [
clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue)
for g in grads
]
return grads
def set_weights(self, weights):
"""Sets the weights of the optimizer, from Numpy arrays.
Should only be called after computing the gradients
(otherwise the optimizer has no weights).
Arguments:
weights: a list of Numpy arrays. The number of arrays and their shape
must match number of the dimensions of the weights of the optimizer
(i.e. it should match the output of `get_weights`).
Raises:
ValueError: in case of incompatible weight shapes.
"""
params = self.weights
if len(params) != len(weights):
raise ValueError('Length of the specified weight list (' +
str(len(weights)) +
') does not match the number of weights '
'of the optimizer (' + str(len(params)) + ')')
weight_value_tuples = []
param_values = K.batch_get_value(params)
for pv, p, w in zip(param_values, params, weights):
if pv.shape != w.shape:
raise ValueError('Optimizer weight shape ' + str(pv.shape) +
' not compatible with '
'provided weight shape ' + str(w.shape))
weight_value_tuples.append((p, w))
K.batch_set_value(weight_value_tuples)
def get_weights(self):
"""Returns the current value of the weights of the optimizer.
Returns:
A list of numpy arrays.
"""
return K.batch_get_value(self.weights)
def get_config(self):
config = {}
if hasattr(self, 'clipnorm'):
config['clipnorm'] = self.clipnorm
if hasattr(self, 'clipvalue'):
config['clipvalue'] = self.clipvalue
return config
@classmethod
def from_config(cls, config):
return cls(**config)
class SGD(Optimizer):
"""Stochastic gradient descent optimizer.
Includes support for momentum,
learning rate decay, and Nesterov momentum.
Arguments:
lr: float >= 0. Learning rate.
momentum: float >= 0. Parameter that accelerates SGD in the relevant
direction and dampens oscillations.
decay: float >= 0. Learning rate decay over each update.
nesterov: boolean. Whether to apply Nesterov momentum.
"""
def __init__(self, lr=0.01, momentum=0., decay=0., nesterov=False, **kwargs):
super(SGD, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.momentum = K.variable(momentum, name='momentum')
self.decay = K.variable(decay, name='decay')
self.initial_decay = decay
self.nesterov = nesterov
def _create_all_weights(self, params):
shapes = [K.int_shape(p) for p in params]
moments = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + moments
return moments
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. /
(1. +
self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))
# momentum
moments = self._create_all_weights(params)
for p, g, m in zip(params, grads, moments):
v = self.momentum * m - lr * g # velocity
self.updates.append(state_ops.assign(m, v))
if self.nesterov:
new_p = p + self.momentum * v - lr * g
else:
new_p = p + v
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'momentum': float(K.get_value(self.momentum)),
'decay': float(K.get_value(self.decay)),
'nesterov': self.nesterov
}
base_config = super(SGD, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class RMSprop(Optimizer):
"""RMSProp optimizer.
It is recommended to leave the parameters of this optimizer
at their default values
(except the learning rate, which can be freely tuned).
Arguments:
lr: float >= 0. Learning rate.
rho: float >= 0.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
"""
def __init__(self, lr=0.001, rho=0.9, epsilon=None, decay=0., **kwargs):
super(RMSprop, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.lr = K.variable(lr, name='lr')
self.rho = K.variable(rho, name='rho')
self.decay = K.variable(decay, name='decay')
self.iterations = K.variable(0, dtype='int64', name='iterations')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
def _create_all_weights(self, params):
accumulators = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
self.weights = accumulators
return accumulators
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
accumulators = self._create_all_weights(params)
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. /
(1. +
self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))
for p, g, a in zip(params, grads, accumulators):
# update accumulator
new_a = self.rho * a + (1. - self.rho) * math_ops.square(g)
self.updates.append(state_ops.assign(a, new_a))
new_p = p - lr * g / (K.sqrt(new_a) + self.epsilon)
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'rho': float(K.get_value(self.rho)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(RMSprop, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adagrad(Optimizer):
"""Adagrad optimizer.
Adagrad is an optimizer with parameter-specific learning rates,
which are adapted relative to how frequently a parameter gets
updated during training. The more updates a parameter receives,
the smaller the updates.
It is recommended to leave the parameters of this optimizer
at their default values.
# Arguments
lr: float >= 0. Initial learning rate.
epsilon: float >= 0. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
# References
- [Adaptive Subgradient Methods for Online Learning and Stochastic
Optimization](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)
"""
def __init__(self, lr=0.01, epsilon=None, decay=0., **kwargs):
super(Adagrad, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.lr = K.variable(lr, name='lr')
self.decay = K.variable(decay, name='decay')
self.iterations = K.variable(0, dtype='int64', name='iterations')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
def _create_all_weights(self, params):
shapes = [K.int_shape(p) for p in params]
accumulators = [K.zeros(shape) for shape in shapes]
self.weights = accumulators
return accumulators
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
accumulators = self._create_all_weights(params)
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. /
(1. +
self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))
for p, g, a in zip(params, grads, accumulators):
new_a = a + math_ops.square(g) # update accumulator
self.updates.append(state_ops.assign(a, new_a))
new_p = p - lr * g / (K.sqrt(new_a) + self.epsilon)
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(Adagrad, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adadelta(Optimizer):
"""Adadelta optimizer.
Adadelta is a more robust extension of Adagrad
that adapts learning rates based on a moving window of gradient updates,
instead of accumulating all past gradients. This way, Adadelta continues
learning even when many updates have been done. Compared to Adagrad, in the
original version of Adadelta you don't have to set an initial learning
rate. In this version, initial learning rate and decay factor can
be set, as in most other Keras optimizers.
It is recommended to leave the parameters of this optimizer
at their default values.
# Arguments
lr: float >= 0. Initial learning rate, defaults to 1.
It is recommended to leave it at the default value.
rho: float >= 0. Adadelta decay factor, corresponding to fraction of
gradient to keep at each time step.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Initial learning rate decay.
# References
- [Adadelta - an adaptive learning rate
method](http://arxiv.org/abs/1212.5701)
"""
def __init__(self, lr=1.0, rho=0.95, epsilon=None, decay=0., **kwargs):
super(Adadelta, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.lr = K.variable(lr, name='lr')
self.decay = K.variable(decay, name='decay')
self.iterations = K.variable(0, dtype='int64', name='iterations')
if epsilon is None:
epsilon = K.epsilon()
self.rho = rho
self.epsilon = epsilon
self.initial_decay = decay
def _create_all_weights(self, params):
shapes = [K.int_shape(p) for p in params]
accumulators = [K.zeros(shape) for shape in shapes]
delta_accumulators = [K.zeros(shape) for shape in shapes]
self.weights = accumulators + delta_accumulators
return accumulators, delta_accumulators
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [state_ops.assign_add(self.iterations, 1)]
accumulators, delta_accumulators = self._create_all_weights(params)
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. /
(1. +
self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))
for p, g, a, d_a in zip(params, grads, accumulators, delta_accumulators):
# update accumulator
new_a = self.rho * a + (1. - self.rho) * math_ops.square(g)
self.updates.append(state_ops.assign(a, new_a))
# use the new accumulator and the *old* delta_accumulator
update = g * K.sqrt(d_a + self.epsilon) / K.sqrt(new_a + self.epsilon)
new_p = p - lr * update
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
# update delta_accumulator
new_d_a = self.rho * d_a + (1 - self.rho) * math_ops.square(update)
self.updates.append(state_ops.assign(d_a, new_d_a))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'rho': self.rho,
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(Adadelta, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adam(Optimizer):
"""Adam optimizer.
Default parameters follow those provided in the original paper.
Arguments:
lr: float >= 0. Learning rate.
beta_1: float, 0 < beta < 1. Generally close to 1.
beta_2: float, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
amsgrad: boolean. Whether to apply the AMSGrad variant of this algorithm
from the paper "On the Convergence of Adam and Beyond".
"""
def __init__(self,
lr=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=None,
decay=0.,
amsgrad=False,
**kwargs):
super(Adam, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.decay = K.variable(decay, name='decay')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
self.amsgrad = amsgrad
def _create_all_weights(self, params):
ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
if self.amsgrad:
vhats = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
else:
vhats = [K.zeros(1) for _ in params]
self.weights = [self.iterations] + ms + vs + vhats
return ms, vs, vhats
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = []
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. /
(1. +
self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))
with ops.control_dependencies([state_ops.assign_add(self.iterations, 1)]):
t = math_ops.cast(self.iterations, K.floatx())
lr_t = lr * (
K.sqrt(1. - math_ops.pow(self.beta_2, t)) /
(1. - math_ops.pow(self.beta_1, t)))
ms, vs, vhats = self._create_all_weights(params)
for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
v_t = (self.beta_2 * v) + (1. - self.beta_2) * math_ops.square(g)
if self.amsgrad:
vhat_t = math_ops.maximum(vhat, v_t)
p_t = p - lr_t * m_t / (K.sqrt(vhat_t) + self.epsilon)
self.updates.append(state_ops.assign(vhat, vhat_t))
else:
p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
self.updates.append(state_ops.assign(m, m_t))
self.updates.append(state_ops.assign(v, v_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon,
'amsgrad': self.amsgrad
}
base_config = super(Adam, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adamax(Optimizer):
"""Adamax optimizer from Adam paper's Section 7.
It is a variant of Adam based on the infinity norm.
Default parameters follow those provided in the paper.
Arguments:
lr: float >= 0. Learning rate.
beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
"""
def __init__(self,
lr=0.002,
beta_1=0.9,
beta_2=0.999,
epsilon=None,
decay=0.,
**kwargs):
super(Adamax, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.decay = K.variable(decay, name='decay')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
def _create_all_weights(self, params):
shapes = [K.int_shape(p) for p in params]
# zero init of 1st moment
ms = [K.zeros(shape) for shape in shapes]
# zero init of exponentially weighted infinity norm
us = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + ms + us
return ms, us
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = []
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. /
(1. +
self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))
with ops.control_dependencies([state_ops.assign_add(self.iterations, 1)]):
t = math_ops.cast(self.iterations, K.floatx())
lr_t = lr / (1. - math_ops.pow(self.beta_1, t))
ms, us = self._create_all_weights(params)
for p, g, m, u in zip(params, grads, ms, us):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
u_t = math_ops.maximum(self.beta_2 * u, math_ops.abs(g))
p_t = p - lr_t * m_t / (u_t + self.epsilon)
self.updates.append(state_ops.assign(m, m_t))
self.updates.append(state_ops.assign(u, u_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(Adamax, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Nadam(Optimizer):
"""Nesterov Adam optimizer.
Much like Adam is essentially RMSprop with momentum,
Nadam is Adam RMSprop with Nesterov momentum.
Default parameters follow those provided in the paper.
It is recommended to leave the parameters of this optimizer
at their default values.
Arguments:
lr: float >= 0. Learning rate.
beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
"""
def __init__(self,
lr=0.002,
beta_1=0.9,
beta_2=0.999,
epsilon=None,
schedule_decay=0.004,
**kwargs):
super(Nadam, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.m_schedule = K.variable(1., name='m_schedule')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.schedule_decay = schedule_decay
def _create_all_weights(self, params):
shapes = [K.int_shape(p) for p in params]
ms = [K.zeros(shape) for shape in shapes]
vs = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations, self.m_schedule] + ms + vs
return ms, vs
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = []
with ops.control_dependencies([state_ops.assign_add(self.iterations, 1)]):
t = math_ops.cast(self.iterations, K.floatx())
# Due to the recommendations in [2], i.e. warming momentum schedule
momentum_cache_t = self.beta_1 * (
1. - 0.5 *
(math_ops.pow(K.cast_to_floatx(0.96), t * self.schedule_decay)))
momentum_cache_t_1 = self.beta_1 * (
1. - 0.5 *
(math_ops.pow(K.cast_to_floatx(0.96), (t + 1) * self.schedule_decay)))
m_schedule_new = self.m_schedule * momentum_cache_t
m_schedule_next = self.m_schedule * momentum_cache_t * momentum_cache_t_1
self.updates.append((self.m_schedule, m_schedule_new))
ms, vs = self._create_all_weights(params)
for p, g, m, v in zip(params, grads, ms, vs):
# the following equations given in [1]
g_prime = g / (1. - m_schedule_new)
m_t = self.beta_1 * m + (1. - self.beta_1) * g
m_t_prime = m_t / (1. - m_schedule_next)
v_t = self.beta_2 * v + (1. - self.beta_2) * math_ops.square(g)
v_t_prime = v_t / (1. - math_ops.pow(self.beta_2, t))
m_t_bar = (1. -
momentum_cache_t) * g_prime + momentum_cache_t_1 * m_t_prime
self.updates.append(state_ops.assign(m, m_t))
self.updates.append(state_ops.assign(v, v_t))
p_t = p - self.lr * m_t_bar / (K.sqrt(v_t_prime) + self.epsilon)
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'epsilon': self.epsilon,
'schedule_decay': self.schedule_decay
}
base_config = super(Nadam, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class TFOptimizer(Optimizer, trackable.Trackable):
"""Wrapper class for native TensorFlow optimizers."""
def __init__(self, optimizer, iterations=None): # pylint: disable=super-init-not-called
self.optimizer = optimizer
self._track_trackable(optimizer, name='optimizer')
if iterations is None:
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
else:
self.iterations = iterations
self._track_trackable(self.iterations, name='global_step')
def _clip_gradients(self, grads):
"""Clip gradients according to the clipnorm and clipvalue attributes."""
# TFOptimizer wrapper has no gradient clipping options.
return grads
def apply_gradients(self, grads):
self.optimizer.apply_gradients(grads, global_step=self.iterations)
def get_grads(self, loss, params):
return self.optimizer.compute_gradients(loss, params)
def get_updates(self, loss, params):
if distribution_strategy_context.has_strategy():
self.updates = []
if not params:
# After the model vars have been created, the second call to get_updates
# is called with params as an empty list. This ensures that we call
# compute_gradients with params=None.
grads = self.optimizer.compute_gradients(loss)
else:
grads = self.optimizer.compute_gradients(loss, params)
global_step = training_util.get_global_step()
opt_update = self.optimizer.apply_gradients(grads, global_step)
else:
if not params:
self.updates = [state_ops.assign_add(self.iterations, 1)]
return self.updates
# Updates list starts out empty because the iterations variable is
# incremented in optimizer.apply_gradients()
self.updates = []
grads = self.optimizer.compute_gradients(loss, params)
opt_update = self.optimizer.apply_gradients(
grads, global_step=self.iterations)
self.updates.append(opt_update)
return self.updates
@property
def weights(self):
raise NotImplementedError
def get_config(self):
raise NotImplementedError
def from_config(self, config):
raise NotImplementedError
# Aliases.
sgd = SGD
rmsprop = RMSprop
adagrad = Adagrad
adadelta = Adadelta
adam = Adam
adamax = Adamax
nadam = Nadam
@keras_export('keras.optimizers.serialize')
def serialize(optimizer):
return serialize_keras_object(optimizer)
@keras_export('keras.optimizers.deserialize')
def deserialize(config, custom_objects=None):
"""Inverse of the `serialize` function.
Arguments:
config: Optimizer configuration dictionary.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during deserialization.
Returns:
A Keras Optimizer instance.
"""
# loss_scale_optimizer has a direct dependency of optimizer, import here
# rather than top to avoid the cyclic dependency.
from tensorflow.python.keras.mixed_precision.experimental import loss_scale_optimizer # pylint: disable=g-import-not-at-top
all_classes = {
'adadelta': adadelta_v2.Adadelta,
'adagrad': adagrad_v2.Adagrad,
'adam': adam_v2.Adam,
'adamax': adamax_v2.Adamax,
'nadam': nadam_v2.Nadam,
'rmsprop': rmsprop_v2.RMSprop,
'sgd': gradient_descent_v2.SGD,
'ftrl': ftrl.Ftrl,
'lossscaleoptimizer': loss_scale_optimizer.LossScaleOptimizer,
}
# Make deserialization case-insensitive for built-in optimizers.
if config['class_name'].lower() in all_classes:
config['class_name'] = config['class_name'].lower()
return deserialize_keras_object(
config,
module_objects=all_classes,
custom_objects=custom_objects,
printable_module_name='optimizer')
@keras_export('keras.optimizers.get')
def get(identifier):
"""Retrieves a Keras Optimizer instance.
Arguments:
identifier: Optimizer identifier, one of
- String: name of an optimizer
- Dictionary: configuration dictionary. - Keras Optimizer instance (it
will be returned unchanged). - TensorFlow Optimizer instance (it
will be wrapped as a Keras Optimizer).
Returns:
A Keras Optimizer instance.
Raises:
ValueError: If `identifier` cannot be interpreted.
"""
if isinstance(identifier, (Optimizer, optimizer_v2.OptimizerV2)):
return identifier
# Wrap TF optimizer instances
elif isinstance(identifier, tf_optimizer_module.Optimizer):
opt = TFOptimizer(identifier)
K.track_tf_optimizer(opt)
return opt
elif isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, six.string_types):
config = {'class_name': str(identifier), 'config': {}}
return deserialize(config)
else:
raise ValueError('Could not interpret optimizer identifier:', identifier)
| |
#!/usr/bin/python2
'''
The MIT License (MIT)
Copyright (c) 2013 Quentin Gibert
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
This product includes GeoLite2 data created by MaxMind, available from http://www.maxmind.com
'''
from gi.repository import Gtk, Gdk, GLib
import threading, subprocess, re, os, time, Image, sys, shutil
import geoip2.database
import geoip2.models
import geoip2.errors
const_work_dir = "/tmp/pyvisualtrace_" + str(time.time()) + "/"
os.mkdir(const_work_dir)
os.chdir(const_work_dir)
class MyWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Traceroute")
self.frame_log = Gtk.Frame(label='Nodes')
self.frame_log.set_label_align(0.5, 0.5)
self.frame_log.set_shadow_type(Gtk.ShadowType.IN)
self.hbox0 = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=6)
self.add(self.hbox0)
self.hbox0.pack_start(self.frame_log, True, True, 0)
self.scrolled_window = Gtk.ScrolledWindow()
self.frame_log.add(self.scrolled_window)
self.scrolled_window.set_size_request(400, -1)
self.textview = Gtk.TextView()
self.textview.set_editable(False)
self.textview.set_cursor_visible(False)
self.scrolled_window.add(self.textview)
self.vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)
self.hbox0.pack_start(self.vbox, False, False, 6)
self.img = Gtk.Image.new_from_file(str(sys.path[0]) + "/default.bmp")
self.frame_rgb = Gtk.Frame(label='Map')
self.frame_rgb.set_label_align(0.5, 0.5)
self.frame_rgb.set_shadow_type(Gtk.ShadowType.IN)
self.frame_rgb.add(self.img)
self.vbox.pack_start(self.frame_rgb, True, True, 0)
self.frame_rgb.set_size_request(1024, 530)
self.hbox1 = Gtk.Box(spacing=0)
self.vbox.pack_start(self.hbox1, False, False, 0)
self.check_v4 = Gtk.RadioButton(None, "Use ipv4")
self.check_v6 = Gtk.RadioButton.new_from_widget(self.check_v4)
self.check_v6.set_label("Use ipv6")
self.hbox1.pack_start(self.check_v4, False, False, 6)
self.hbox1.pack_start(self.check_v6, False, False, 0)
self.hbox = Gtk.Box(spacing=0)
self.vbox.pack_start(self.hbox, False, False, 0)
self.spinner = Gtk.Spinner()
self.hbox.pack_start(self.spinner, False, True, 6)
self.entry = Gtk.Entry()
self.entry.set_text("www.example.com")
self.hbox.pack_start(self.entry, True, True, 6)
self.button1 = Gtk.Button(label="Trace")
self.button1.connect("clicked", self.on_button_clicked)
self.hbox.pack_start(self.button1, False, False, 6)
self.statusbar = Gtk.Statusbar()
self.vbox.pack_start(self.statusbar, False, False, 0)
def on_button_clicked(self, widget):
if self.check_v6.get_active():
ipv = 6
else:
ipv = 4
text = str(self.entry.get_text())
self.button1.set_sensitive(False)
self.spinner.start()
self.statusbar.push(self.statusbar.get_context_id("statusbar"), "tracing " + str(text) + "... (IPv" + str(ipv) + ")")
threading.Thread(target=self.worker_trace, args=(text, ipv)).start()
threading.Thread(target=self.update_image, args=(text, )).start()
def worker_trace(self, text, ipv):
gtxtbuff = Gtk.TextBuffer()
try:
try:
os.remove("result.bmp")
except Exception as e:
print str(e)
ip_list = trace_route(str(text), ipv)
nodes_log = locate_nodes(ip_list)
gtxtbuff.set_text(nodes_log)
trace_map()
garbage = ["points.dat", "start_stop.dat", "map.ps", "map.bmp"]
for f in garbage:
os.remove(f)
except Exception as e:
print str(e)
shutil.copyfile(str(sys.path[0]) + "/failure.bmp", "./result.bmp")
self.statusbar.push(self.statusbar.get_context_id("statusbar"), "Failed to trace " + str(text))
self.button1.set_sensitive(True)
self.spinner.stop()
return
self.textview.set_buffer(gtxtbuff)
self.statusbar.push(self.statusbar.get_context_id("statusbar"), "done tracing " + str(text) + " ")
return
def update_image(self, text):
while True:
time.sleep(1)
try:
with open("result.bmp"):
time.sleep(1)
self.img.set_from_file("result.bmp")
self.button1.set_sensitive(True)
self.spinner.stop()
os.remove("result.bmp")
return
except Exception:
continue
def trace_route(host, ipv):
if ipv == 6:
pattern = "\((.*:.*)\)"
traceroute = "traceroute6"
else:
pattern = "\((\d+\.\d+\.\d+\.\d+)\)"
traceroute = "traceroute"
output = subprocess.check_output([traceroute, host])
regexp = re.compile(pattern)
ip_list = regexp.findall(output)
del ip_list[0]
del ip_list[0]
return ip_list
def locate_nodes(ip_list):
output = ""
points = open("points.dat", "w")
start_stop = open("start_stop.dat", "w")
points_list = []
reader = geoip2.database.Reader(str(sys.path[0]) + "/GeoLite2-City.mmdb")
for ip in ip_list:
try:
response = reader.city(ip)
except Exception as e:
print(str(e))
if response != None:
output += '''
%s
Country: %s
City: %s
Latitude: %s
Longitude: %s
''' % (ip, response.country.iso_code,
response.city.name,
response.location.latitude,
response.location.longitude)
pts_output = '''%s %s 0.1\n''' % (response.location.longitude, response.location.latitude)
points.write(pts_output)
points_list.append(pts_output)
stop = points_list.pop()
start = points_list.pop(0)
start_stop.write(str(start) + "\n" + str(stop))
start_stop.close()
points.close()
print output
return output
def trace_map():
ps_file = open("map.ps", "w")
os.environ['PATH'] = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/lib/gmt/bin"
ps_map = subprocess.check_output(['pscoast', '-W', '-N1/thinner',
'-N2/faint', '-Rd', '-JN0/20c', '-Bg30', '-Dc',
'-A10000', '-Ggray', '-P', '-X0.5c', '-Y10c', '-K'])
ps_lines = subprocess.check_output(['psxy', 'points.dat', '-O', '-Rd', '-JN', '-Dc', '-A10000', '-P', '-Wthick/red', '-K'])
ps_points = subprocess.check_output(['psxy', 'points.dat', '-O', '-Rd', '-JN', '-Dc', '-A10000', '-P', '-Sc', '-G0', '-K'])
ps_start_stop = subprocess.check_output(['psxy', 'start_stop.dat', '-O', '-Rd', '-JN', '-Dc', '-A10000', '-P', '-Sc0.05', '-G255/0/0'])
ps_file.write(ps_map)
ps_file.write(ps_lines)
ps_file.write(ps_points)
ps_file.write(ps_start_stop)
ps_file.close()
subprocess.call(['ps2raster', 'map.ps', '-Tb', '-A'])
image = Image.open("map.bmp")
resized_img = image.resize((1024, 520), Image.ANTIALIAS)
resized_img.save("result.bmp")
win = MyWindow()
win.set_resizable(False)
win.connect("delete-event", Gtk.main_quit)
win.show_all()
GLib.threads_init()
Gdk.threads_init()
Gdk.threads_enter()
Gtk.main()
Gdk.threads_leave()
| |
"""ML-Ensemble
:author: Sebastian Flennerhag
:copyright: 2017-2018
:license: MIT
Estimator wrappers around base classes.
"""
from .. import config
from .base import BaseParallel, OutputMixin
from .backend import ParallelProcessing
from ..utils.exceptions import ParallelProcessingError, NotFittedError
class EstimatorMixin(object):
"""Estimator mixin
Mixin class to build an estimator from a :mod:`mlens.parallel` backend
class. The backend class should be set as the ``_backend`` attribute
of the estimator during a ``fit`` call via a ``_build`` method. E.g::
Foo(EstimatorMixin, Learner):
def __init__(self, ...):
self._backend = None
def _build(self):
self._backend = Learner(...)
It is recommended to combine :class:`EstimatorMixin` with
`parallel.base.ParamMixin`.
"""
def fit(self, X, y, proba=False, refit=True):
"""Fit
Fit estimator.
Parameters
----------
X: array of size [n_samples, n_features]
input data
y: array of size [n_features,]
targets
proba: bool, optional
whether to fit for later predict_proba calls. Will register number
of classes to expect in later predict and transform calls.
refit: bool (default = True)
Whether to refit already fitted sub-learners.
Returns
-------
self: instance
fitted estimator.
"""
if hasattr(self, '_build'):
self._build()
run(get_backend(self), 'fit', X, y,
proba=proba, refit=refit, return_preds=False)
return self
def fit_transform(self, X, y, proba=False, refit=True):
"""Fit
Fit estimator and return cross-validated predictions.
Parameters
----------
X: array of size [n_samples, n_features]
input data
y: array of size [n_features,]
targets
proba: bool, optional
whether to fit for later predict_proba calls. Will register number
of classes to expect in later predict and transform calls.
refit: bool (default = True)
Whether to refit already fitted sub-learners.
Returns
-------
P: array of size [n_samples, n_prediction_features]
prediction generated by cross-validation.
"""
if hasattr(self, '_build'):
self._build()
return run(get_backend(self), 'fit', X, y, proba=proba,
refit=refit, return_preds=True)
def predict(self, X, proba=False):
"""Predict
Predict using full-fold estimator (fitted on all data).
Parameters
----------
X: array of size [n_samples, n_features]
input data
proba: bool, optional
whether to predict class probabilities
Returns
-------
P: array of size [n_samples, n_prediction_features]
prediction with full-fold estimator.
"""
if hasattr(self, '__fitted__'):
if not self.__fitted__:
raise NotFittedError(
"Instance not fitted (with current params).")
return run(
get_backend(self), 'predict', X, proba=proba, return_preds=True)
def transform(self, X, proba=False):
"""Transform
Use cross-validated estimators to generate predictions.
Parameters
----------
X: array of size [n_samples, n_features]
input data
proba: bool, optional
whether to predict class probabilities
Returns
-------
P: array of size [n_samples, n_prediction_features]
prediction generated by cross-validation.
"""
if hasattr(self, '__fitted__'):
if not self.__fitted__:
raise NotFittedError(
"Instance not fitted (with current params).")
return run(get_backend(self), 'transform', X, proba=proba,
return_preds=True)
def get_backend(instance):
"""Check whether backend exists and return"""
_backend = getattr(instance, '_backend', None)
if _backend:
instance = _backend
if issubclass(instance.__class__, BaseParallel):
return instance
raise ParallelProcessingError(
"The estimator does not have a backend. Cannot process.")
def set_flags(backend, flags):
"""Set proba on backend"""
resets = list()
if 'layer' in backend.__class__.__name__.lower():
updates = [backend] + backend.learners
elif 'group' in backend.__class__.__name__.lower():
updates = backend.learners
elif not isinstance(backend, list):
updates = [backend]
else:
updates = backend
for obj in updates:
_res = dict()
for key, val in flags.items():
if hasattr(obj, key):
_res[key] = getattr(obj, key)
setattr(obj, key, val)
resets.append((obj, _res))
return resets
def reset_flags(resets):
"""Reset proba on backend"""
for obj, _res in resets:
for k, v in _res.items():
setattr(obj, k, v)
def set_predict(kwargs):
"""Set attr argument and proba"""
out = dict()
proba = kwargs.pop('proba', False)
if proba:
out['proba'] = proba
out['attr'] = 'predict_proba' if proba else 'predict'
return out
def set_output(kwargs, job, map):
"""Set the __no_output__ flag"""
if 'return_preds' in kwargs:
if kwargs['return_preds']:
__no_output__ = False
else:
__no_output__ = True
else:
__no_output__ = job == 'fit'
kwargs['return_preds'] = job != 'fit'
if not map:
# Need to ensure outputs always generated for stacking
__no_output__ = False
return __no_output__
def run(caller, job, X, y=None, map=True, **kwargs):
"""Utility for running a ParallelProcessing job on a set of callers.
Run is a utility mapping for setting up a ParallelProcessing job and
executing across a set of callers. By default run executes::
out = mgr.map(caller, job, X, y, **kwargs)
:func:`run` handles temporary parameter changes, for instance running
a learner with ``proba=True`` that has ``proba=False`` as default.
Similarly, instances destined to not produce output can be forced to
yield predictions by passing ``return_preds=True`` as a keyword argument.
.. note:: To run a learner with a ``preprocessing`` dependency, the
instances need to be wrapped in a :class:`Group` ::
run(Group(learner, transformer), 'predict', X, y)
Parameters
----------
caller: instance, list
A runnable instance, or a list of instances.
job: str
type of job to run. One of ``'fit'``, ``'transform'``, ``'predict'``.
X: array-like
input
y: array-like, optional
targets
map: bool (default=True)
whether to run a :func:`ParallelProcessing.map` job. If ``False``,
will instead run a :func:`ParallelProcessing.stack` job.
**kwargs: optional
Keyword arguments. :func:`run` searches for
``proba`` and ``return_preds`` to temporarily update callers to run
desired job and return desired output. Other ``kwargs`` are passed
to either ``map`` or ``stack``.
"""
flags = set_predict(kwargs)
flags['__no_output__'] = set_output(kwargs, job, map)
resets = set_flags(caller, flags)
try:
verbose = max(getattr(caller, 'verbose', 0) - 4, 0)
_backend = getattr(caller, 'backend', config.get_backend())
n_jobs = getattr(caller, 'n_jobs', -1)
with ParallelProcessing(_backend, n_jobs, verbose) as mgr:
if map:
out = mgr.map(caller, job, X, y, **kwargs)
else:
out = mgr.stack(caller, job, X, y, **kwargs)
finally:
reset_flags(resets)
return out
| |
# -*- coding: utf-8 -*-
"""
sync_wikimedia_commons_categories.py
superlachaise_api
Created by Maxime Le Moine on 31/05/2015.
Copyright (c) 2015 Maxime Le Moine.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http:www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json, os, re, requests, sys, traceback
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.utils import timezone, translation
from django.utils.translation import ugettext as _
from superlachaise_api.models import *
def print_unicode(str):
print str.encode('utf-8')
class Command(BaseCommand):
def request_wikimedia_commons_categories(self, wikimedia_commons_categories):
pages = {}
last_continue = {
'continue': '',
}
categories = '|'.join(wikimedia_commons_categories).encode('utf8')
while True:
# Request properties
params = {
'action': 'query',
'prop': 'title|revisions',
'rvprop': 'content',
'format': 'json',
'titles': categories,
}
params.update(last_continue)
if settings.MEDIAWIKI_USER_AGENT:
headers = {"User-Agent" : settings.MEDIAWIKI_USER_AGENT}
else:
raise 'no USER_AGENT defined in settings.py'
json_result = requests.get('https://commons.wikimedia.org/w/api.php', params=params, headers=headers).json()
if 'pages' in json_result['query']:
pages.update(json_result['query']['pages'])
if 'continue' not in json_result: break
last_continue = json_result['continue']
return pages
def request_category_members(self, wikimedia_commons_category):
category_members = []
last_continue = {
'continue': '',
}
category = wikimedia_commons_category.encode('utf8')
while True:
# Request properties
params = {
'action': 'query',
'list': 'categorymembers',
'cmtype': 'file',
'format': 'json',
'cmtitle': category,
}
params.update(last_continue)
if settings.MEDIAWIKI_USER_AGENT:
headers = {"User-Agent" : settings.MEDIAWIKI_USER_AGENT}
else:
raise 'no USER_AGENT defined in settings.py'
json_result = requests.get('https://commons.wikimedia.org/w/api.php', params=params, headers=headers).json()
if 'categorymembers' in json_result['query']:
category_members.extend(json_result['query']['categorymembers'])
if 'continue' not in json_result: break
last_continue = json_result['continue']
return [category_member['title'] for category_member in category_members]
def get_main_image(self, page):
try:
if len(page['revisions']) != 1:
raise BaseException
wikitext = page['revisions'][0]['*']
main_image = u''
for line in wikitext.split('\n'):
match_obj = re.search(r'^.*[iI]mage.*\=[\s]*(.*)[\s]*$', line)
if match_obj:
main_image = match_obj.group(1).strip()
break
if main_image:
main_image = u'File:' + main_image
return main_image
except:
return u''
def get_redirect(self, page):
try:
if len(page['revisions']) != 1:
raise BaseException
wikitext = page['revisions'][0]['*']
for line in wikitext.split('\n'):
match_obj = re.search(r'^[\s]*{{Category redirect\|(.*)}}[\s]*$', line)
if match_obj:
redirect = match_obj.group(1).strip()
self.errors.append(_('{title} is a redirection for {redirect}').format(title=page['title'], redirect=redirect))
pages = self.request_wikimedia_commons_categories([redirect])
if len(pages.values()) != 1:
raise BaseException
redirect_page = pages.values()[0]
return redirect_page
return page
except:
print_unicode(traceback.format_exc())
return page
def handle_wikimedia_commons_category(self, page):
computed_page = self.get_redirect(page)
# Get values
values_dict = {
'main_image': self.get_main_image(computed_page),
'category_members': '|'.join(self.request_category_members(computed_page['title'])),
}
# Get or create object in database
target_object_id_dict = {"wikimedia_commons_id": page['title']}
wikimedia_commons_category, created = WikimediaCommonsCategory.objects.get_or_create(**target_object_id_dict)
self.fetched_objects_pks.append(wikimedia_commons_category.pk)
modified = False
if created:
self.created_objects = self.created_objects + 1
else:
# Search for modifications
for field, value in values_dict.iteritems():
if value != getattr(wikimedia_commons_category, field):
modified = True
self.modified_objects = self.modified_objects + 1
break
if created or modified:
for field, value in values_dict.iteritems():
setattr(wikimedia_commons_category, field, value)
wikimedia_commons_category.save()
def sync_wikimedia_commons_categories(self, param_wikimedia_commons_categories):
# Get wikimedia commons categories
wikimedia_commons_categories = []
if param_wikimedia_commons_categories:
wikimedia_commons_categories = param_wikimedia_commons_categories.split('|')
else:
for openstreetmap_element in OpenStreetMapElement.objects.filter(wikimedia_commons__startswith='Category:'):
link = openstreetmap_element.wikimedia_commons
if not link in wikimedia_commons_categories:
wikimedia_commons_categories.append(link)
for wikidata_entry in WikidataEntry.objects.exclude(wikimedia_commons_category__exact=''):
sync_category = False
for instance_of in wikidata_entry.instance_of.split(';'):
if instance_of in self.synced_instance_of:
sync_category = True
break
if sync_category:
link = 'Category:' + wikidata_entry.wikimedia_commons_category
if not link in wikimedia_commons_categories:
wikimedia_commons_categories.append(link)
for wikidata_entry in WikidataEntry.objects.exclude(wikimedia_commons_grave_category=''):
link = 'Category:' + wikidata_entry.wikimedia_commons_grave_category
if not link in wikimedia_commons_categories:
wikimedia_commons_categories.append(link)
print_unicode(_('Requesting Wikimedia Commons...'))
wikimedia_commons_categories = list(set(wikimedia_commons_categories))
total = len(wikimedia_commons_categories)
count = 0
max_count_per_request = 25
self.fetched_objects_pks = []
for chunk in [wikimedia_commons_categories[i:i+max_count_per_request] for i in range(0,len(wikimedia_commons_categories),max_count_per_request)]:
print_unicode(str(count) + u'/' + str(total))
count += len(chunk)
pages = self.request_wikimedia_commons_categories(chunk)
for page in pages.values():
self.handle_wikimedia_commons_category(page)
print_unicode(str(count) + u'/' + str(total))
if not param_wikimedia_commons_categories:
# Look for deleted elements
for wikimedia_commons_category in WikimediaCommonsCategory.objects.exclude(pk__in=self.fetched_objects_pks):
self.deleted_objects = self.deleted_objects + 1
wikimedia_commons_category.delete()
def add_arguments(self, parser):
parser.add_argument('--wikimedia_commons_categories',
action='store',
dest='wikimedia_commons_categories')
def handle(self, *args, **options):
try:
self.synchronization = Synchronization.objects.get(name=os.path.basename(__file__).split('.')[0].split('sync_')[-1])
except:
raise CommandError(sys.exc_info()[1])
error = None
try:
translation.activate(settings.LANGUAGE_CODE)
self.synced_instance_of = json.loads(Setting.objects.get(key=u'wikimedia_commons:synced_instance_of').value)
self.created_objects = 0
self.modified_objects = 0
self.deleted_objects = 0
self.errors = []
print_unicode(_('== Start %s ==') % self.synchronization.name)
self.sync_wikimedia_commons_categories(options['wikimedia_commons_categories'])
print_unicode(_('== End %s ==') % self.synchronization.name)
self.synchronization.created_objects = self.created_objects
self.synchronization.modified_objects = self.modified_objects
self.synchronization.deleted_objects = self.deleted_objects
self.synchronization.errors = ', '.join(self.errors)
translation.deactivate()
except:
print_unicode(traceback.format_exc())
error = sys.exc_info()[1]
self.synchronization.errors = traceback.format_exc()
self.synchronization.last_executed = timezone.now()
self.synchronization.save()
if error:
raise CommandError(error)
| |
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for classes and methods relating to user rights."""
from core.domain import collection_services
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import rights_manager
from core.tests import test_utils
import feconf
class ExplorationRightsTests(test_utils.GenericTestBase):
"""Test that rights for actions on explorations work as expected."""
EXP_ID = 'exp_id'
def setUp(self):
super(ExplorationRightsTests, self).setUp()
self.signup('a@example.com', 'A')
self.signup('b@example.com', 'B')
self.signup('c@example.com', 'C')
self.signup('d@example.com', 'D')
self.signup('e@example.com', 'E')
self.signup(self.ADMIN_EMAIL, username=self.ADMIN_USERNAME)
self.user_id_a = self.get_user_id_from_email('a@example.com')
self.user_id_b = self.get_user_id_from_email('b@example.com')
self.user_id_c = self.get_user_id_from_email('c@example.com')
self.user_id_d = self.get_user_id_from_email('d@example.com')
self.user_id_e = self.get_user_id_from_email('e@example.com')
self.user_id_admin = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.set_admins([self.ADMIN_EMAIL])
def test_get_exploration_rights_for_nonexistent_exploration(self):
non_exp_id = 'this_exp_does_not_exist_id'
with self.assertRaisesRegexp(
Exception,
'Entity for class ExplorationRightsModel with id '
'this_exp_does_not_exist_id not found'
):
rights_manager.get_exploration_rights(non_exp_id)
self.assertIsNone(
rights_manager.get_exploration_rights(non_exp_id, strict=False))
def test_demo_exploration(self):
exp_services.load_demo('1')
rights_manager.release_ownership_of_exploration(
feconf.SYSTEM_COMMITTER_ID, '1')
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_play(
rights_manager.ACTIVITY_TYPE_EXPLORATION, '1'))
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_view(
rights_manager.ACTIVITY_TYPE_EXPLORATION, '1'))
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_edit(
rights_manager.ACTIVITY_TYPE_EXPLORATION, '1'))
self.assertFalse(
rights_manager.Actor(self.user_id_a).can_delete(
rights_manager.ACTIVITY_TYPE_EXPLORATION, '1'))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_play(
rights_manager.ACTIVITY_TYPE_EXPLORATION, '1'))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_view(
rights_manager.ACTIVITY_TYPE_EXPLORATION, '1'))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_edit(
rights_manager.ACTIVITY_TYPE_EXPLORATION, '1'))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_delete(
rights_manager.ACTIVITY_TYPE_EXPLORATION, '1'))
def test_non_splash_page_demo_exploration(self):
# Note: there is no difference between permissions for demo
# explorations, whether or not they are on the splash page.
exp_services.load_demo('3')
rights_manager.release_ownership_of_exploration(
feconf.SYSTEM_COMMITTER_ID, '3')
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_play(
rights_manager.ACTIVITY_TYPE_EXPLORATION, '3'))
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_view(
rights_manager.ACTIVITY_TYPE_EXPLORATION, '3'))
self.assertTrue(rights_manager.Actor(
self.user_id_a).can_edit(
rights_manager.ACTIVITY_TYPE_EXPLORATION, '3'))
self.assertFalse(rights_manager.Actor(
self.user_id_a).can_delete(
rights_manager.ACTIVITY_TYPE_EXPLORATION, '3'))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_play(
rights_manager.ACTIVITY_TYPE_EXPLORATION, '3'))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_view(
rights_manager.ACTIVITY_TYPE_EXPLORATION, '3'))
self.assertTrue(rights_manager.Actor(
self.user_id_admin).can_edit(
rights_manager.ACTIVITY_TYPE_EXPLORATION, '3'))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_delete(
rights_manager.ACTIVITY_TYPE_EXPLORATION, '3'))
def test_ownership_of_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, 'A title', 'A category')
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.assign_role_for_exploration(
self.user_id_a, self.EXP_ID, self.user_id_b,
rights_manager.ROLE_EDITOR)
self.assertTrue(
rights_manager.Actor(self.user_id_a).is_owner(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).is_owner(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_admin).is_owner(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
def test_newly_created_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, 'A title', 'A category')
exp_services.save_new_exploration(self.user_id_a, exp)
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_play(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_view(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_edit(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_delete(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_play(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_view(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_admin).can_edit(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_admin).can_delete(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_play(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_edit(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
def test_inviting_collaborator_to_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, 'A title', 'A category')
exp_services.save_new_exploration(self.user_id_a, exp)
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_play(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_edit(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
rights_manager.assign_role_for_exploration(
self.user_id_a, self.EXP_ID, self.user_id_b,
rights_manager.ROLE_EDITOR)
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_play(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_view(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_edit(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
def test_inviting_playtester_to_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, 'A title', 'A category')
exp_services.save_new_exploration(self.user_id_a, exp)
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_play(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_edit(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
rights_manager.assign_role_for_exploration(
self.user_id_a, self.EXP_ID, self.user_id_b,
rights_manager.ROLE_VIEWER)
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_play(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_view(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_edit(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
def test_setting_rights_of_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, 'A title', 'A category')
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.assign_role_for_exploration(
self.user_id_a, self.EXP_ID, self.user_id_b,
rights_manager.ROLE_VIEWER)
with self.assertRaisesRegexp(Exception, 'Could not assign new role.'):
rights_manager.assign_role_for_exploration(
self.user_id_b, self.EXP_ID, self.user_id_c,
rights_manager.ROLE_VIEWER)
rights_manager.assign_role_for_exploration(
self.user_id_a, self.EXP_ID, self.user_id_b,
rights_manager.ROLE_EDITOR)
with self.assertRaisesRegexp(Exception, 'Could not assign new role.'):
rights_manager.assign_role_for_exploration(
self.user_id_b, self.EXP_ID, self.user_id_c,
rights_manager.ROLE_VIEWER)
rights_manager.assign_role_for_exploration(
self.user_id_a, self.EXP_ID, self.user_id_b,
rights_manager.ROLE_OWNER)
rights_manager.assign_role_for_exploration(
self.user_id_b, self.EXP_ID, self.user_id_c,
rights_manager.ROLE_OWNER)
rights_manager.assign_role_for_exploration(
self.user_id_b, self.EXP_ID, self.user_id_d,
rights_manager.ROLE_EDITOR)
rights_manager.assign_role_for_exploration(
self.user_id_b, self.EXP_ID, self.user_id_e,
rights_manager.ROLE_VIEWER)
def test_publishing_and_unpublishing_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, 'A title', 'A category')
exp_services.save_new_exploration(self.user_id_a, exp)
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_play(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
rights_manager.publish_exploration(self.user_id_a, self.EXP_ID)
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_play(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_view(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_a).can_unpublish(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
rights_manager.unpublish_exploration(self.user_id_admin, self.EXP_ID)
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_play(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_view(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_play(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
def test_can_only_delete_unpublished_explorations(self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, 'A title', 'A category')
exp_services.save_new_exploration(self.user_id_a, exp)
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_delete(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
rights_manager.publish_exploration(self.user_id_a, self.EXP_ID)
self.assertFalse(
rights_manager.Actor(self.user_id_a).can_delete(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
rights_manager.unpublish_exploration(self.user_id_admin, self.EXP_ID)
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_delete(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
def test_can_publicize_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, 'A title', 'A category')
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.publish_exploration(self.user_id_a, self.EXP_ID)
self.assertFalse(
rights_manager.Actor(self.user_id_a).can_publicize(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_publicize(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
def test_changing_viewability_of_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, 'A title', 'A category')
exp_services.save_new_exploration(self.user_id_a, exp)
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertTrue(rights_manager.Actor(
self.user_id_a).can_change_private_viewability(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(rights_manager.Actor(
self.user_id_b).can_change_private_viewability(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertTrue(rights_manager.Actor(
self.user_id_admin).can_change_private_viewability(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
with self.assertRaisesRegexp(Exception, 'already the current value'):
rights_manager.set_private_viewability_of_exploration(
self.user_id_a, self.EXP_ID, False)
with self.assertRaisesRegexp(Exception, 'cannot be changed'):
rights_manager.set_private_viewability_of_exploration(
self.user_id_b, self.EXP_ID, True)
rights_manager.set_private_viewability_of_exploration(
self.user_id_a, self.EXP_ID, True)
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_view(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_view(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
rights_manager.set_private_viewability_of_exploration(
self.user_id_a, self.EXP_ID, False)
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_view(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
rights_manager.publish_exploration(self.user_id_a, self.EXP_ID)
self.assertFalse(rights_manager.Actor(
self.user_id_a).can_change_private_viewability(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
rights_manager.unpublish_exploration(self.user_id_admin, self.EXP_ID)
self.assertTrue(rights_manager.Actor(
self.user_id_a).can_change_private_viewability(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertFalse(rights_manager.Actor(
self.user_id_b).can_change_private_viewability(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
self.assertTrue(rights_manager.Actor(
self.user_id_admin).can_change_private_viewability(
rights_manager.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID))
class CollectionRightsTests(test_utils.GenericTestBase):
"""Test that rights for actions on collections work as expected."""
COLLECTION_ID = 'collection_id'
EXP_ID_FOR_COLLECTION = 'exp_id_for_collection'
def setUp(self):
super(CollectionRightsTests, self).setUp()
self.signup('a@example.com', 'A')
self.signup('b@example.com', 'B')
self.signup('c@example.com', 'C')
self.signup('d@example.com', 'D')
self.signup('e@example.com', 'E')
self.signup(self.ADMIN_EMAIL, username=self.ADMIN_USERNAME)
self.user_id_a = self.get_user_id_from_email('a@example.com')
self.user_id_b = self.get_user_id_from_email('b@example.com')
self.user_id_c = self.get_user_id_from_email('c@example.com')
self.user_id_d = self.get_user_id_from_email('d@example.com')
self.user_id_e = self.get_user_id_from_email('e@example.com')
self.user_id_admin = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.set_admins([self.ADMIN_EMAIL])
def test_get_collection_rights_for_nonexistent_collection(self):
non_col_id = 'this_collection_does_not_exist_id'
with self.assertRaisesRegexp(
Exception,
'Entity for class CollectionRightsModel with id '
'this_collection_does_not_exist_id not found'
):
rights_manager.get_collection_rights(non_col_id)
self.assertIsNone(
rights_manager.get_collection_rights(non_col_id, strict=False))
def test_demo_collection(self):
collection_services.load_demo('0')
rights_manager.release_ownership_of_collection(
feconf.SYSTEM_COMMITTER_ID, '0')
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_play(
rights_manager.ACTIVITY_TYPE_COLLECTION, '0'))
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_view(
rights_manager.ACTIVITY_TYPE_COLLECTION, '0'))
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_edit(
rights_manager.ACTIVITY_TYPE_COLLECTION, '0'))
self.assertFalse(
rights_manager.Actor(self.user_id_a).can_delete(
rights_manager.ACTIVITY_TYPE_COLLECTION, '0'))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_play(
rights_manager.ACTIVITY_TYPE_COLLECTION, '0'))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_view(
rights_manager.ACTIVITY_TYPE_COLLECTION, '0'))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_edit(
rights_manager.ACTIVITY_TYPE_COLLECTION, '0'))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_delete(
rights_manager.ACTIVITY_TYPE_COLLECTION, '0'))
def test_ownership_of_collection(self):
self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a)
rights_manager.assign_role_for_collection(
self.user_id_a, self.COLLECTION_ID, self.user_id_b,
rights_manager.ROLE_EDITOR)
self.assertTrue(
rights_manager.Actor(self.user_id_a).is_owner(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).is_owner(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_admin).is_owner(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
def test_newly_created_collection(self):
self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a)
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_play(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_view(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_edit(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_delete(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_play(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_view(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_admin).can_edit(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_admin).can_delete(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_play(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_edit(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
def test_inviting_collaborator_to_collection(self):
self.save_new_valid_collection(
self.COLLECTION_ID, self.user_id_a,
exploration_id=self.EXP_ID_FOR_COLLECTION)
# Verify initial editor permissions for the collection.
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_play(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_edit(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
# Verify initial editor permissions for the exploration within the
# collection.
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_play(
rights_manager.ACTIVITY_TYPE_EXPLORATION,
self.EXP_ID_FOR_COLLECTION))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(
rights_manager.ACTIVITY_TYPE_EXPLORATION,
self.EXP_ID_FOR_COLLECTION))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_edit(
rights_manager.ACTIVITY_TYPE_EXPLORATION,
self.EXP_ID_FOR_COLLECTION))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(
rights_manager.ACTIVITY_TYPE_EXPLORATION,
self.EXP_ID_FOR_COLLECTION))
# User A adds user B to the collection as an editor.
rights_manager.assign_role_for_collection(
self.user_id_a, self.COLLECTION_ID, self.user_id_b,
rights_manager.ROLE_EDITOR)
# Ensure User B is now an editor of the collection.
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_play(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_view(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_edit(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
# Ensure User B is not an editor of the exploration within the
# collection.
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_play(
rights_manager.ACTIVITY_TYPE_EXPLORATION,
self.EXP_ID_FOR_COLLECTION))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(
rights_manager.ACTIVITY_TYPE_EXPLORATION,
self.EXP_ID_FOR_COLLECTION))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_edit(
rights_manager.ACTIVITY_TYPE_EXPLORATION,
self.EXP_ID_FOR_COLLECTION))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(
rights_manager.ACTIVITY_TYPE_EXPLORATION,
self.EXP_ID_FOR_COLLECTION))
def test_inviting_playtester_to_collection(self):
self.save_new_valid_collection(
self.COLLECTION_ID, self.user_id_a,
exploration_id=self.EXP_ID_FOR_COLLECTION)
# Verify initial viewer permissions for the collection.
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_play(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_edit(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
# Verify initial viewer permissions for the exploration within the
# collection.
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_play(
rights_manager.ACTIVITY_TYPE_EXPLORATION,
self.EXP_ID_FOR_COLLECTION))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(
rights_manager.ACTIVITY_TYPE_EXPLORATION,
self.EXP_ID_FOR_COLLECTION))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_edit(
rights_manager.ACTIVITY_TYPE_EXPLORATION,
self.EXP_ID_FOR_COLLECTION))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(
rights_manager.ACTIVITY_TYPE_EXPLORATION,
self.EXP_ID_FOR_COLLECTION))
# User A adds user B to the collection as a viewer.
rights_manager.assign_role_for_collection(
self.user_id_a, self.COLLECTION_ID, self.user_id_b,
rights_manager.ROLE_VIEWER)
# Ensure User B is now a viewer of the collection.
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_play(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_view(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_edit(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
# Ensure User B cannot view the exploration just because he/she has
# access to the collection containing it.
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_play(
rights_manager.ACTIVITY_TYPE_EXPLORATION,
self.EXP_ID_FOR_COLLECTION))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(
rights_manager.ACTIVITY_TYPE_EXPLORATION,
self.EXP_ID_FOR_COLLECTION))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_edit(
rights_manager.ACTIVITY_TYPE_EXPLORATION,
self.EXP_ID_FOR_COLLECTION))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_delete(
rights_manager.ACTIVITY_TYPE_EXPLORATION,
self.EXP_ID_FOR_COLLECTION))
def test_setting_rights_of_collection(self):
self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a)
rights_manager.assign_role_for_collection(
self.user_id_a, self.COLLECTION_ID, self.user_id_b,
rights_manager.ROLE_VIEWER)
with self.assertRaisesRegexp(Exception, 'Could not assign new role.'):
rights_manager.assign_role_for_collection(
self.user_id_b, self.COLLECTION_ID, self.user_id_c,
rights_manager.ROLE_VIEWER)
rights_manager.assign_role_for_collection(
self.user_id_a, self.COLLECTION_ID, self.user_id_b,
rights_manager.ROLE_EDITOR)
with self.assertRaisesRegexp(Exception, 'Could not assign new role.'):
rights_manager.assign_role_for_collection(
self.user_id_b, self.COLLECTION_ID, self.user_id_c,
rights_manager.ROLE_VIEWER)
rights_manager.assign_role_for_collection(
self.user_id_a, self.COLLECTION_ID, self.user_id_b,
rights_manager.ROLE_OWNER)
rights_manager.assign_role_for_collection(
self.user_id_b, self.COLLECTION_ID, self.user_id_c,
rights_manager.ROLE_OWNER)
rights_manager.assign_role_for_collection(
self.user_id_b, self.COLLECTION_ID, self.user_id_d,
rights_manager.ROLE_EDITOR)
rights_manager.assign_role_for_collection(
self.user_id_b, self.COLLECTION_ID, self.user_id_e,
rights_manager.ROLE_VIEWER)
def test_publishing_and_unpublishing_collection(self):
self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a)
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_play(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
rights_manager.publish_collection(self.user_id_a, self.COLLECTION_ID)
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_play(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_b).can_view(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_a).can_unpublish(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
rights_manager.unpublish_collection(
self.user_id_admin, self.COLLECTION_ID)
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_play(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_view(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_play(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertFalse(
rights_manager.Actor(self.user_id_b).can_view(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
def test_can_only_delete_unpublished_collections(self):
self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a)
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_delete(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
rights_manager.publish_collection(self.user_id_a, self.COLLECTION_ID)
self.assertFalse(
rights_manager.Actor(self.user_id_a).can_delete(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
rights_manager.unpublish_collection(
self.user_id_admin, self.COLLECTION_ID)
self.assertTrue(
rights_manager.Actor(self.user_id_a).can_delete(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
def test_can_publicize_collection(self):
self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a)
rights_manager.publish_collection(self.user_id_a, self.COLLECTION_ID)
self.assertFalse(
rights_manager.Actor(self.user_id_a).can_publicize(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
self.assertTrue(
rights_manager.Actor(self.user_id_admin).can_publicize(
rights_manager.ACTIVITY_TYPE_COLLECTION, self.COLLECTION_ID))
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
"""Unit tests for Superset"""
import json
import prison
from sqlalchemy.sql import func
from superset import db, security_manager
from superset.connectors.sqla.models import SqlaTable
from superset.models.core import Database
from superset.utils.core import get_example_database, get_main_database
from tests.base_tests import SupersetTestCase
from tests.fixtures.certificates import ssl_certificate
from tests.test_app import app
class TestDatabaseApi(SupersetTestCase):
def insert_database(
self,
database_name: str,
sqlalchemy_uri: str,
extra: str = "",
encrypted_extra: str = "",
server_cert: str = "",
expose_in_sqllab: bool = False,
) -> Database:
database = Database(
database_name=database_name,
sqlalchemy_uri=sqlalchemy_uri,
extra=extra,
encrypted_extra=encrypted_extra,
server_cert=server_cert,
expose_in_sqllab=expose_in_sqllab,
)
db.session.add(database)
db.session.commit()
return database
def test_get_items(self):
"""
Database API: Test get items
"""
self.login(username="admin")
uri = "api/v1/database/"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 200)
response = json.loads(rv.data.decode("utf-8"))
expected_columns = [
"allow_csv_upload",
"allow_ctas",
"allow_cvas",
"allow_dml",
"allow_multi_schema_metadata_fetch",
"allow_run_async",
"allows_cost_estimate",
"allows_subquery",
"allows_virtual_table_explore",
"backend",
"changed_on",
"changed_on_delta_humanized",
"created_by",
"database_name",
"explore_database_id",
"expose_in_sqllab",
"force_ctas_schema",
"function_names",
"id",
]
self.assertEqual(response["count"], 2)
self.assertEqual(list(response["result"][0].keys()), expected_columns)
def test_get_items_filter(self):
"""
Database API: Test get items with filter
"""
example_db = get_example_database()
test_database = self.insert_database(
"test-database", example_db.sqlalchemy_uri_decrypted, expose_in_sqllab=True
)
dbs = db.session.query(Database).filter_by(expose_in_sqllab=True).all()
self.login(username="admin")
arguments = {
"keys": ["none"],
"filters": [{"col": "expose_in_sqllab", "opr": "eq", "value": True}],
"order_columns": "database_name",
"order_direction": "asc",
"page": 0,
"page_size": -1,
}
uri = f"api/v1/database/?q={prison.dumps(arguments)}"
rv = self.client.get(uri)
response = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 200)
self.assertEqual(response["count"], len(dbs))
# Cleanup
db.session.delete(test_database)
db.session.commit()
def test_get_items_not_allowed(self):
"""
Database API: Test get items not allowed
"""
self.login(username="gamma")
uri = f"api/v1/database/"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 200)
response = json.loads(rv.data.decode("utf-8"))
self.assertEqual(response["count"], 0)
def test_create_database(self):
"""
Database API: Test create
"""
extra = {
"metadata_params": {},
"engine_params": {},
"metadata_cache_timeout": {},
"schemas_allowed_for_csv_upload": [],
}
self.login(username="admin")
example_db = get_example_database()
if example_db.backend == "sqlite":
return
database_data = {
"database_name": "test-database",
"sqlalchemy_uri": example_db.sqlalchemy_uri_decrypted,
"server_cert": ssl_certificate,
"extra": json.dumps(extra),
}
uri = "api/v1/database/"
rv = self.client.post(uri, json=database_data)
response = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 201)
# Cleanup
model = db.session.query(Database).get(response.get("id"))
db.session.delete(model)
db.session.commit()
def test_create_database_server_cert_validate(self):
"""
Database API: Test create server cert validation
"""
example_db = get_example_database()
if example_db.backend == "sqlite":
return
self.login(username="admin")
database_data = {
"database_name": "test-database",
"sqlalchemy_uri": example_db.sqlalchemy_uri_decrypted,
"server_cert": "INVALID CERT",
}
uri = "api/v1/database/"
rv = self.client.post(uri, json=database_data)
response = json.loads(rv.data.decode("utf-8"))
expected_response = {"message": {"server_cert": ["Invalid certificate"]}}
self.assertEqual(rv.status_code, 400)
self.assertEqual(response, expected_response)
def test_create_database_json_validate(self):
"""
Database API: Test create encrypted extra and extra validation
"""
example_db = get_example_database()
if example_db.backend == "sqlite":
return
self.login(username="admin")
database_data = {
"database_name": "test-database",
"sqlalchemy_uri": example_db.sqlalchemy_uri_decrypted,
"encrypted_extra": '{"A": "a", "B", "C"}',
"extra": '["A": "a", "B", "C"]',
}
uri = "api/v1/database/"
rv = self.client.post(uri, json=database_data)
response = json.loads(rv.data.decode("utf-8"))
expected_response = {
"message": {
"encrypted_extra": [
"Field cannot be decoded by JSON. Expecting ':' "
"delimiter: line 1 column 15 (char 14)"
],
"extra": [
"Field cannot be decoded by JSON. Expecting ','"
" delimiter: line 1 column 5 (char 4)"
],
}
}
self.assertEqual(rv.status_code, 400)
self.assertEqual(response, expected_response)
def test_create_database_extra_metadata_validate(self):
"""
Database API: Test create extra metadata_params validation
"""
example_db = get_example_database()
if example_db.backend == "sqlite":
return
extra = {
"metadata_params": {"wrong_param": "some_value"},
"engine_params": {},
"metadata_cache_timeout": {},
"schemas_allowed_for_csv_upload": [],
}
self.login(username="admin")
database_data = {
"database_name": "test-database",
"sqlalchemy_uri": example_db.sqlalchemy_uri_decrypted,
"extra": json.dumps(extra),
}
uri = "api/v1/database/"
rv = self.client.post(uri, json=database_data)
response = json.loads(rv.data.decode("utf-8"))
expected_response = {
"message": {
"extra": [
"The metadata_params in Extra field is not configured correctly."
" The key wrong_param is invalid."
]
}
}
self.assertEqual(rv.status_code, 400)
self.assertEqual(response, expected_response)
def test_create_database_unique_validate(self):
"""
Database API: Test create database_name already exists
"""
example_db = get_example_database()
if example_db.backend == "sqlite":
return
self.login(username="admin")
database_data = {
"database_name": "examples",
"sqlalchemy_uri": example_db.sqlalchemy_uri_decrypted,
}
uri = "api/v1/database/"
rv = self.client.post(uri, json=database_data)
response = json.loads(rv.data.decode("utf-8"))
expected_response = {
"message": {"database_name": "A database with the same name already exists"}
}
self.assertEqual(rv.status_code, 422)
self.assertEqual(response, expected_response)
def test_create_database_uri_validate(self):
"""
Database API: Test create fail validate sqlalchemy uri
"""
self.login(username="admin")
database_data = {
"database_name": "test-database",
"sqlalchemy_uri": "wrong_uri",
}
uri = "api/v1/database/"
rv = self.client.post(uri, json=database_data)
response = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 400)
expected_response = {
"message": {
"sqlalchemy_uri": [
"Invalid connection string, a valid string usually "
"follows:'DRIVER://USER:PASSWORD@DB-HOST/DATABASE-NAME'"
"<p>Example:'postgresql://user:password@your-postgres-db/database'"
"</p>"
]
}
}
self.assertEqual(response, expected_response)
def test_create_database_fail_sqllite(self):
"""
Database API: Test create fail with sqllite
"""
database_data = {
"database_name": "test-database",
"sqlalchemy_uri": "sqlite:////some.db",
}
uri = "api/v1/database/"
self.login(username="admin")
response = self.client.post(uri, json=database_data)
response_data = json.loads(response.data.decode("utf-8"))
expected_response = {
"message": {
"sqlalchemy_uri": [
"SQLite database cannot be used as a data source "
"for security reasons."
]
}
}
self.assertEqual(response.status_code, 400)
self.assertEqual(response_data, expected_response)
def test_create_database_conn_fail(self):
"""
Database API: Test create fails connection
"""
example_db = get_example_database()
if example_db.backend in ("sqlite", "hive", "presto"):
return
example_db.password = "wrong_password"
database_data = {
"database_name": "test-database",
"sqlalchemy_uri": example_db.sqlalchemy_uri_decrypted,
}
uri = "api/v1/database/"
self.login(username="admin")
response = self.client.post(uri, json=database_data)
response_data = json.loads(response.data.decode("utf-8"))
expected_response = {"message": "Could not connect to database."}
self.assertEqual(response.status_code, 422)
self.assertEqual(response_data, expected_response)
def test_update_database(self):
"""
Database API: Test update
"""
example_db = get_example_database()
test_database = self.insert_database(
"test-database", example_db.sqlalchemy_uri_decrypted
)
self.login(username="admin")
database_data = {"database_name": "test-database-updated"}
uri = f"api/v1/database/{test_database.id}"
rv = self.client.put(uri, json=database_data)
self.assertEqual(rv.status_code, 200)
# Cleanup
model = db.session.query(Database).get(test_database.id)
db.session.delete(model)
db.session.commit()
def test_update_database_conn_fail(self):
"""
Database API: Test update fails connection
"""
example_db = get_example_database()
if example_db.backend in ("sqlite", "hive", "presto"):
return
test_database = self.insert_database(
"test-database1", example_db.sqlalchemy_uri_decrypted
)
example_db.password = "wrong_password"
database_data = {
"sqlalchemy_uri": example_db.sqlalchemy_uri_decrypted,
}
uri = f"api/v1/database/{test_database.id}"
self.login(username="admin")
rv = self.client.put(uri, json=database_data)
response = json.loads(rv.data.decode("utf-8"))
expected_response = {"message": "Could not connect to database."}
self.assertEqual(rv.status_code, 422)
self.assertEqual(response, expected_response)
# Cleanup
model = db.session.query(Database).get(test_database.id)
db.session.delete(model)
db.session.commit()
def test_update_database_uniqueness(self):
"""
Database API: Test update uniqueness
"""
example_db = get_example_database()
test_database1 = self.insert_database(
"test-database1", example_db.sqlalchemy_uri_decrypted
)
test_database2 = self.insert_database(
"test-database2", example_db.sqlalchemy_uri_decrypted
)
self.login(username="admin")
database_data = {"database_name": "test-database2"}
uri = f"api/v1/database/{test_database1.id}"
rv = self.client.put(uri, json=database_data)
response = json.loads(rv.data.decode("utf-8"))
expected_response = {
"message": {"database_name": "A database with the same name already exists"}
}
self.assertEqual(rv.status_code, 422)
self.assertEqual(response, expected_response)
# Cleanup
db.session.delete(test_database1)
db.session.delete(test_database2)
db.session.commit()
def test_update_database_invalid(self):
"""
Database API: Test update invalid request
"""
self.login(username="admin")
database_data = {"database_name": "test-database-updated"}
uri = f"api/v1/database/invalid"
rv = self.client.put(uri, json=database_data)
self.assertEqual(rv.status_code, 404)
def test_update_database_uri_validate(self):
"""
Database API: Test update sqlalchemy_uri validate
"""
example_db = get_example_database()
test_database = self.insert_database(
"test-database", example_db.sqlalchemy_uri_decrypted
)
self.login(username="admin")
database_data = {
"database_name": "test-database-updated",
"sqlalchemy_uri": "wrong_uri",
}
uri = f"api/v1/database/{test_database.id}"
rv = self.client.put(uri, json=database_data)
response = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 400)
expected_response = {
"message": {
"sqlalchemy_uri": [
"Invalid connection string, a valid string usually "
"follows:'DRIVER://USER:PASSWORD@DB-HOST/DATABASE-NAME'"
"<p>Example:'postgresql://user:password@your-postgres-db/database'"
"</p>"
]
}
}
self.assertEqual(response, expected_response)
def test_delete_database(self):
"""
Database API: Test delete
"""
database_id = self.insert_database("test-database", "test_uri").id
self.login(username="admin")
uri = f"api/v1/database/{database_id}"
rv = self.delete_assert_metric(uri, "delete")
self.assertEqual(rv.status_code, 200)
model = db.session.query(Database).get(database_id)
self.assertEqual(model, None)
def test_delete_database_not_found(self):
"""
Database API: Test delete not found
"""
max_id = db.session.query(func.max(Database.id)).scalar()
self.login(username="admin")
uri = f"api/v1/database/{max_id + 1}"
rv = self.delete_assert_metric(uri, "delete")
self.assertEqual(rv.status_code, 404)
def test_delete_database_with_datasets(self):
"""
Database API: Test delete fails because it has depending datasets
"""
database_id = (
db.session.query(Database).filter_by(database_name="examples").one()
).id
self.login(username="admin")
uri = f"api/v1/database/{database_id}"
rv = self.delete_assert_metric(uri, "delete")
self.assertEqual(rv.status_code, 422)
def test_get_table_metadata(self):
"""
Database API: Test get table metadata info
"""
example_db = get_example_database()
self.login(username="admin")
uri = f"api/v1/database/{example_db.id}/table/birth_names/null/"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 200)
response = json.loads(rv.data.decode("utf-8"))
self.assertEqual(response["name"], "birth_names")
self.assertIsNone(response["comment"])
self.assertTrue(len(response["columns"]) > 5)
self.assertTrue(response.get("selectStar").startswith("SELECT"))
def test_get_invalid_database_table_metadata(self):
"""
Database API: Test get invalid database from table metadata
"""
database_id = 1000
self.login(username="admin")
uri = f"api/v1/database/{database_id}/table/some_table/some_schema/"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 404)
uri = f"api/v1/database/some_database/table/some_table/some_schema/"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 404)
def test_get_invalid_table_table_metadata(self):
"""
Database API: Test get invalid table from table metadata
"""
example_db = get_example_database()
uri = f"api/v1/database/{example_db.id}/wrong_table/null/"
self.login(username="admin")
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 404)
def test_get_table_metadata_no_db_permission(self):
"""
Database API: Test get table metadata from not permitted db
"""
self.login(username="gamma")
example_db = get_example_database()
uri = f"api/v1/database/{example_db.id}/birth_names/null/"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 404)
def test_get_select_star(self):
"""
Database API: Test get select star
"""
self.login(username="admin")
example_db = get_example_database()
uri = f"api/v1/database/{example_db.id}/select_star/birth_names/"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 200)
response = json.loads(rv.data.decode("utf-8"))
self.assertIn("gender", response["result"])
def test_get_select_star_not_allowed(self):
"""
Database API: Test get select star not allowed
"""
self.login(username="gamma")
example_db = get_example_database()
uri = f"api/v1/database/{example_db.id}/select_star/birth_names/"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 404)
def test_get_select_star_datasource_access(self):
"""
Database API: Test get select star with datasource access
"""
session = db.session
table = SqlaTable(
schema="main", table_name="ab_permission", database=get_main_database()
)
session.add(table)
session.commit()
tmp_table_perm = security_manager.find_permission_view_menu(
"datasource_access", table.get_perm()
)
gamma_role = security_manager.find_role("Gamma")
security_manager.add_permission_role(gamma_role, tmp_table_perm)
self.login(username="gamma")
main_db = get_main_database()
uri = f"api/v1/database/{main_db.id}/select_star/ab_permission/"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 200)
# rollback changes
security_manager.del_permission_role(gamma_role, tmp_table_perm)
db.session.delete(table)
db.session.delete(main_db)
db.session.commit()
def test_get_select_star_not_found_database(self):
"""
Database API: Test get select star not found database
"""
self.login(username="admin")
max_id = db.session.query(func.max(Database.id)).scalar()
uri = f"api/v1/database/{max_id + 1}/select_star/birth_names/"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 404)
def test_get_select_star_not_found_table(self):
"""
Database API: Test get select star not found database
"""
self.login(username="admin")
example_db = get_example_database()
# sqllite will not raise a NoSuchTableError
if example_db.backend == "sqlite":
return
uri = f"api/v1/database/{example_db.id}/select_star/table_does_not_exist/"
rv = self.client.get(uri)
# TODO(bkyryliuk): investigate why presto returns 500
self.assertEqual(rv.status_code, 404 if example_db.backend != "presto" else 500)
def test_database_schemas(self):
"""
Database API: Test database schemas
"""
self.login("admin")
database = db.session.query(Database).first()
schemas = database.get_all_schema_names()
rv = self.client.get(f"api/v1/database/{database.id}/schemas/")
response = json.loads(rv.data.decode("utf-8"))
self.assertEqual(schemas, response["result"])
rv = self.client.get(
f"api/v1/database/{database.id}/schemas/?q={prison.dumps({'force': True})}"
)
response = json.loads(rv.data.decode("utf-8"))
self.assertEqual(schemas, response["result"])
def test_database_schemas_not_found(self):
"""
Database API: Test database schemas not found
"""
self.logout()
self.login(username="gamma")
example_db = get_example_database()
uri = f"api/v1/database/{example_db.id}/schemas/"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 404)
def test_database_schemas_invalid_query(self):
"""
Database API: Test database schemas with invalid query
"""
self.login("admin")
database = db.session.query(Database).first()
rv = self.client.get(
f"api/v1/database/{database.id}/schemas/?q={prison.dumps({'force': 'nop'})}"
)
self.assertEqual(rv.status_code, 400)
def test_test_connection(self):
"""
Database API: Test test connection
"""
extra = {
"metadata_params": {},
"engine_params": {},
"metadata_cache_timeout": {},
"schemas_allowed_for_csv_upload": [],
}
# need to temporarily allow sqlite dbs, teardown will undo this
app.config["PREVENT_UNSAFE_DB_CONNECTIONS"] = False
self.login("admin")
example_db = get_example_database()
# validate that the endpoint works with the password-masked sqlalchemy uri
data = {
"database_name": "examples",
"encrypted_extra": "{}",
"extra": json.dumps(extra),
"impersonate_user": False,
"sqlalchemy_uri": example_db.safe_sqlalchemy_uri(),
"server_cert": ssl_certificate,
}
url = f"api/v1/database/test_connection"
rv = self.post_assert_metric(url, data, "test_connection")
self.assertEqual(rv.status_code, 200)
self.assertEqual(rv.headers["Content-Type"], "application/json; charset=utf-8")
# validate that the endpoint works with the decrypted sqlalchemy uri
data = {
"sqlalchemy_uri": example_db.sqlalchemy_uri_decrypted,
"database_name": "examples",
"impersonate_user": False,
"extra": json.dumps(extra),
"server_cert": None,
}
rv = self.post_assert_metric(url, data, "test_connection")
self.assertEqual(rv.status_code, 200)
self.assertEqual(rv.headers["Content-Type"], "application/json; charset=utf-8")
def test_test_connection_failed(self):
"""
Database API: Test test connection failed
"""
self.login("admin")
data = {
"sqlalchemy_uri": "broken://url",
"database_name": "examples",
"impersonate_user": False,
"server_cert": None,
}
url = f"api/v1/database/test_connection"
rv = self.post_assert_metric(url, data, "test_connection")
self.assertEqual(rv.status_code, 400)
self.assertEqual(rv.headers["Content-Type"], "application/json; charset=utf-8")
response = json.loads(rv.data.decode("utf-8"))
expected_response = {
"driver_name": "broken",
"message": "Could not load database driver: broken",
}
self.assertEqual(response, expected_response)
data = {
"sqlalchemy_uri": "mssql+pymssql://url",
"database_name": "examples",
"impersonate_user": False,
"server_cert": None,
}
rv = self.post_assert_metric(url, data, "test_connection")
self.assertEqual(rv.status_code, 400)
self.assertEqual(rv.headers["Content-Type"], "application/json; charset=utf-8")
response = json.loads(rv.data.decode("utf-8"))
expected_response = {
"driver_name": "mssql+pymssql",
"message": "Could not load database driver: mssql+pymssql",
}
self.assertEqual(response, expected_response)
def test_test_connection_unsafe_uri(self):
"""
Database API: Test test connection with unsafe uri
"""
self.login("admin")
app.config["PREVENT_UNSAFE_DB_CONNECTIONS"] = True
data = {
"sqlalchemy_uri": "sqlite:///home/superset/unsafe.db",
"database_name": "unsafe",
"impersonate_user": False,
"server_cert": None,
}
url = f"api/v1/database/test_connection"
rv = self.post_assert_metric(url, data, "test_connection")
self.assertEqual(rv.status_code, 400)
response = json.loads(rv.data.decode("utf-8"))
expected_response = {
"message": {
"sqlalchemy_uri": [
"SQLite database cannot be used as a data source for security reasons."
]
}
}
self.assertEqual(response, expected_response)
def test_get_database_related_objects(self):
"""
Database API: Test get chart and dashboard count related to a database
:return:
"""
self.login(username="admin")
database = get_example_database()
uri = f"api/v1/database/{database.id}/related_objects/"
rv = self.get_assert_metric(uri, "related_objects")
self.assertEqual(rv.status_code, 200)
response = json.loads(rv.data.decode("utf-8"))
self.assertEqual(response["charts"]["count"], 33)
self.assertEqual(response["dashboards"]["count"], 6)
def test_get_database_related_objects_not_found(self):
"""
Database API: Test related objects not found
"""
max_id = db.session.query(func.max(Database.id)).scalar()
# id does not exist and we get 404
invalid_id = max_id + 1
uri = f"api/v1/database/{invalid_id}/related_objects/"
self.login(username="admin")
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 404)
self.logout()
self.login(username="gamma")
database = get_example_database()
uri = f"api/v1/database/{database.id}/related_objects/"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 404)
| |
import sublime
import os
from .regex import RE
from .state_property import StateProperty
from .settings import Settings
class JavaClass:
"""
A class represents a Java class
"""
def __init__(self, jclass=None):
self.jclass = jclass or ""
def is_empty(self):
"""
Returns whether a class is empty
"""
return not self.jclass
def get(self):
"""
Returns a class
"""
return self.jclass
class JavaPackage:
"""
A class represents a Java package
"""
def __init__(self, jpackage=None):
self.package_paths = []
if isinstance(jpackage, str) and jpackage:
match = RE().search("package_path_match", jpackage)
if match:
self.package_paths = JavaUtils().normalize_package_path(
match.group(0)
).split(".")
elif isinstance(jpackage, list) or isinstance(jpackage, tuple):
self.package_paths = [com for com in jpackage if com]
def join(self, package):
"""
Returns a joined package
@param package: a package to join with
"""
return JavaPackage(self.package_paths + package.package_paths)
def is_empty(self):
"""
Returns whether a package is empty
"""
return not self.package_paths
def as_list(self):
"""
Returns package as a component list
"""
return self.package_paths
def as_path(self):
"""
Returns package as a file path
"""
if self.is_empty():
return ""
return os.path.join(*self.package_paths)
def as_class_path(self):
"""
Returns a package as a class path
"""
return ".".join(self.package_paths)
class JavaClassPath:
"""
A class represents a Java class path
"""
def __init__(self, class_path=None):
self.package = JavaPackage()
self.jclass = JavaClass()
if isinstance(class_path, str) and class_path:
match = RE().match("class_path_match", class_path)
if match:
self.package = JavaPackage(
JavaUtils().normalize_package_path(
match.group(1)
).split(".")
)
self.jclass = JavaClass(
JavaUtils().normalize_package_path(match.group(3))
)
def get_package(self):
"""
Returns a package within class path
"""
return self.package
def get_class(self):
"""
Returns a class within class path
"""
return self.jclass
def as_path(self):
"""
Returns class path as a file path
"""
return os.path.join(self.package.as_path(), self.jclass.get())
def as_class_path(self):
"""
Returns a proper class path
"""
if self.package.is_empty():
return self.jclass.get()
elif self.jclass.is_empty():
return self.package.as_class_path()
return ".".join([
x for x in
[self.package.as_class_path(), self.jclass.get()]
if x
])
class _JavaUtils:
"""
Java-related utilities
"""
CREATE_SUCCESS = 0
CREATE_EXISTS = 1
CREATE_ERROR = 2
@classmethod
def instance(cls):
if not hasattr(cls, "_instance"):
cls._instance = cls()
return cls._instance
def to_readable_class_path(self, path, as_class_path=False):
"""
Returns a class path that can be read easily by human
@param path: an original path to be parsed
@param as_class_path: a boolean indicated if the path is already
a class path or not
"""
if not as_class_path:
path = self.to_package(path).as_class_path()
if not path:
if StateProperty().is_project():
return "(Default Package)"
else:
return "(Unknown Package)"
return path
def is_java(self, view=None):
"""
Returns whether specified view is a Java file or not
@param view: a view to be validated
"""
view = view or sublime.active_window().active_view()
if not view:
return False
if view.file_name():
return self.is_java_file(view.file_name())
return view.find_by_selector(Settings().get("java_source_selector"))
def is_java_file(self, file_path):
"""
Returns whether specified file path is a Java file
@param file_path: a file path to be validated
"""
if file_path is None:
return False
_, ext = os.path.splitext(os.path.basename(file_path))
return ext in Settings().get("java_extensions")
def is_class_path(self, class_path, special=False):
"""
Returns whether specified class path is a valid class path
@param class_path: a class path to be validated
@param special: a boolean indicated if the class path is a special case
(contains inheritance selectors) or not
"""
match = RE().match(
"special_class_path_match" if special else "class_path_match",
class_path
)
return match is not None
def normalize_package_path(self, class_path):
"""
Returns a dot-trimmed class path
@param class_path: a class path to be trimmed
"""
return RE().get("normalize_package_path", "^\\.*|\\.*$").sub(
"", class_path
)
def to_package(self, path, relative=True):
"""
Returns a Java package from specified path
@param path: a path to be converted
@param relative: a boolean indicated if the path should be converted to
relative path or not
"""
from ..utils import Utils
if relative:
convert = False
for source_folder in StateProperty().get_source_folders():
if Utils().contains_file(source_folder, path):
convert = True
path = os.path.relpath(path, source_folder)
break
if not convert:
path = os.path.relpath(
path, StateProperty().get_source_folder()
)
class_path = ".".join(Utils.split_path(path))
return JavaPackage(
self.normalize_package_path(class_path).split(".")
)
def create_package_path(self, path, silent=False):
"""
Creates a directory for specified path and returns the status
@param path: a path to be created
@param silent: a boolean indicated if the operation should be silent
or not
"""
if not os.path.exists(path):
try:
os.makedirs(path)
except BaseException as e:
sublime.error_message(
"Error while create a package" +
" \"{package}\": {exception}".format_map({
"package": path,
"exception": e
})
)
return self.CREATE_ERROR
else:
if not silent:
sublime.message_dialog("Package is already exists")
return self.CREATE_EXISTS
return self.CREATE_SUCCESS
def JavaUtils():
return _JavaUtils.instance()
| |
from __future__ import division,print_function
import math, os, json, sys, re
import cPickle as pickle
from glob import glob
import numpy as np
from matplotlib import pyplot as plt
from operator import itemgetter, attrgetter, methodcaller
from collections import OrderedDict
import itertools
from itertools import chain
import pandas as pd
import PIL
from PIL import Image
from numpy.random import random, permutation, randn, normal, uniform, choice
from numpy import newaxis
import scipy
from scipy import misc, ndimage
from scipy.ndimage.interpolation import zoom
from scipy.ndimage import imread
from sklearn.metrics import confusion_matrix
import bcolz
from sklearn.preprocessing import OneHotEncoder
from sklearn.manifold import TSNE
from IPython.lib.display import FileLink
import theano
from theano import shared, tensor as T
from theano.tensor.nnet import conv2d, nnet
from theano.tensor.signal import pool
import keras
from keras import backend as K
from keras.utils.data_utils import get_file
from keras.utils import np_utils
from keras.utils.np_utils import to_categorical
from keras.models import Sequential, Model
from keras.layers import Input, Embedding, Reshape, merge, LSTM, Bidirectional
from keras.layers import TimeDistributed, Activation, SimpleRNN, GRU
from keras.layers.core import Flatten, Dense, Dropout, Lambda
from keras.regularizers import l2, activity_l2, l1, activity_l1
from keras.layers.normalization import BatchNormalization
from keras.optimizers import SGD, RMSprop, Adam
from keras.utils.layer_utils import layer_from_config
from keras.metrics import categorical_crossentropy, categorical_accuracy
from keras.layers.convolutional import *
from keras.preprocessing import image, sequence
from keras.preprocessing.text import Tokenizer
from vgg16 import *
from vgg16bn import *
np.set_printoptions(precision=4, linewidth=100)
to_bw = np.array([0.299, 0.587, 0.114])
def gray(img):
if K.image_dim_ordering() == 'tf':
return np.rollaxis(img, 0, 1).dot(to_bw)
else:
return np.rollaxis(img, 0, 3).dot(to_bw)
def to_plot(img):
if K.image_dim_ordering() == 'tf':
return np.rollaxis(img, 0, 1).astype(np.uint8)
else:
return np.rollaxis(img, 0, 3).astype(np.uint8)
def plot(img):
plt.imshow(to_plot(img))
def floor(x):
return int(math.floor(x))
def ceil(x):
return int(math.ceil(x))
def plots(ims, figsize=(12,6), rows=1, interp=False, titles=None):
if type(ims[0]) is np.ndarray:
ims = np.array(ims).astype(np.uint8)
if (ims.shape[-1] != 3):
ims = ims.transpose((0,2,3,1))
f = plt.figure(figsize=figsize)
for i in range(len(ims)):
sp = f.add_subplot(rows, len(ims)//rows, i+1)
sp.axis('Off')
if titles is not None:
sp.set_title(titles[i], fontsize=16)
plt.imshow(ims[i], interpolation=None if interp else 'none')
def do_clip(arr, mx):
clipped = np.clip(arr, (1-mx)/1, mx)
return clipped/clipped.sum(axis=1)[:, np.newaxis]
def get_batches(dirname, gen=image.ImageDataGenerator(), shuffle=True, batch_size=4, class_mode='categorical',
target_size=(224,224)):
return gen.flow_from_directory(dirname, target_size=target_size,
class_mode=class_mode, shuffle=shuffle, batch_size=batch_size)
def onehot(x):
return to_categorical(x)
def wrap_config(layer):
return {'class_name': layer.__class__.__name__, 'config': layer.get_config()}
def copy_layer(layer): return layer_from_config(wrap_config(layer))
def copy_layers(layers): return [copy_layer(layer) for layer in layers]
def copy_weights(from_layers, to_layers):
for from_layer,to_layer in zip(from_layers, to_layers):
to_layer.set_weights(from_layer.get_weights())
def copy_model(m):
res = Sequential(copy_layers(m.layers))
copy_weights(m.layers, res.layers)
return res
def insert_layer(model, new_layer, index):
res = Sequential()
for i,layer in enumerate(model.layers):
if i==index: res.add(new_layer)
copied = layer_from_config(wrap_config(layer))
res.add(copied)
copied.set_weights(layer.get_weights())
return res
def adjust_dropout(weights, prev_p, new_p):
scal = (1-prev_p)/(1-new_p)
return [o*scal for o in weights]
def get_data(path, target_size=(224,224)):
batches = get_batches(path, shuffle=False, batch_size=1, class_mode=None, target_size=target_size)
return np.concatenate([batches.next() for i in range(batches.nb_sample)])
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
(This function is copied from the scikit docs.)
"""
plt.figure()
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j], horizontalalignment="center", color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def save_array(fname, arr):
c=bcolz.carray(arr, rootdir=fname, mode='w')
c.flush()
def load_array(fname):
return bcolz.open(fname)[:]
def mk_size(img, r2c):
r,c,_ = img.shape
curr_r2c = r/c
new_r, new_c = r,c
if r2c>curr_r2c:
new_r = floor(c*r2c)
else:
new_c = floor(r/r2c)
arr = np.zeros((new_r, new_c, 3), dtype=np.float32)
r2=(new_r-r)//2
c2=(new_c-c)//2
arr[floor(r2):floor(r2)+r,floor(c2):floor(c2)+c] = img
return arr
def mk_square(img):
x,y,_ = img.shape
maxs = max(img.shape[:2])
y2=(maxs-y)//2
x2=(maxs-x)//2
arr = np.zeros((maxs,maxs,3), dtype=np.float32)
arr[floor(x2):floor(x2)+x,floor(y2):floor(y2)+y] = img
return arr
def vgg_ft(out_dim):
vgg = Vgg16()
vgg.ft(out_dim)
model = vgg.model
return model
def vgg_ft_bn(out_dim):
vgg = Vgg16BN()
vgg.ft(out_dim)
model = vgg.model
return model
def get_classes(path):
batches = get_batches(path+'train', shuffle=False, batch_size=1)
val_batches = get_batches(path+'valid', shuffle=False, batch_size=1)
test_batches = get_batches(path+'test', shuffle=False, batch_size=1)
return (val_batches.classes, batches.classes, onehot(val_batches.classes), onehot(batches.classes),
val_batches.filenames, batches.filenames, test_batches.filenames)
def split_at(model, layer_type):
layers = model.layers
layer_idx = [index for index,layer in enumerate(layers)
if type(layer) is layer_type][-1]
return layers[:layer_idx+1], layers[layer_idx+1:]
# NOTE: commented out until later
# # WNX - 2017-Jul-05 16:53
# def get_conv_feat(fname, conv_model, batches, batch_size, inc=4096):
# """
# Function generates an array of convolutional features, independent of
# system memory limits by running `model.predict` on data in batches.
# """
# # NOTE: could I just use predict_generator on gen.flow_from_directory ?
# idx = 0
# preds = []
#
# conv_feat = bcolz.open(fname)[:idx]
# preds = conv_model.predict(conv_feat, batch_size=batch_size, verbose=0)
#
# while idx < batches.n - inc:
# conv_feat = bcolz.open(fname)[idx:idx+inc]
# idx += inc
# next_preds = conv_model.predict(conv_feat, batch_size=batch_size, verbose=0)
#
# conv_feat = bcolz.open(fname)[idx:]
# next_preds = conv_model.predict(conv_feat, batch_size=batch_size, verbose=0)
# preds = np.concatenate([preds, next_preds])
#
# return preds
class MixIterator(object):
def __init__(self, iters):
self.iters = iters
self.multi = type(iters) is list
if self.multi:
self.N = sum([it[0].N for it in self.iters])
else:
self.N = sum([it.N for it in self.iters])
def reset(self):
for it in self.iters: it.reset()
def __iter__(self):
return self
def next(self, *args, **kwargs):
if self.multi:
nexts = [[next(it) for it in o] for o in self.iters]
n0 = np.concatenate([n[0] for n in nexts])
n1 = np.concatenate([n[1] for n in nexts])
return (n0, n1)
else:
nexts = [next(it) for it in self.iters]
n0 = np.concatenate([n[0] for n in nexts])
n1 = np.concatenate([n[1] for n in nexts])
return (n0, n1)
| |
from django.http import HttpResponse, HttpResponseRedirect
from django.views.decorators.http import require_http_methods
from django.shortcuts import render, redirect
from django.template.context_processors import csrf
from django.forms import widgets
from flash.models import Collection, Deck, Card, Decks_Cards, Users_Collections
from flash.forms import DeckImportForm
from flash.decorators import check_role
from flash.lti_service import LTIService
from flash import services, queries, analytics
import logging
log = logging.getLogger(__name__)
def deck_view_helper(request, current_collection, deck_cards):
role_bucket = services.get_or_update_role_bucket(request)
canvas_course_collections = LTIService(request).getCourseCollections()
collection_list = queries.getCollectionList(role_bucket, collection_ids=canvas_course_collections)
is_quiz_mode = request.GET.get('mode') == 'quiz'
is_deck_admin = current_collection.id in (set(role_bucket['ADMINISTRATOR']) | set(role_bucket['INSTRUCTOR']))
card_id = request.GET.get('card_id', '')
cards = []
for dcard in deck_cards:
card_fields = {'show':[],'reveal':[]}
for cfield in dcard.card.cards_fields_set.all():
if cfield.field.display:
bucket = 'show'
else:
bucket = 'reveal'
card_fields[bucket].append({
'type': cfield.field.field_type,
'label': cfield.field.label,
'show_label': cfield.field.show_label,
'value': cfield.value,
})
cards.append({
'card_id': dcard.card.id,
'color': dcard.card.color,
'fields': card_fields
})
return [cards, collection_list, is_quiz_mode, is_deck_admin, card_id]
def index(request, deck_id=None):
"""Displays the deck of cards for review/quiz."""
deck = Deck.objects.get(id=deck_id)
deck_cards = Decks_Cards.objects.filter(deck=deck).order_by('sort_order').prefetch_related('card__cards_fields_set__field')
current_collection = deck.collection
[cards, collection_list, is_quiz_mode, is_deck_admin, card_id] = deck_view_helper(request, current_collection, deck_cards)
context = {
"collection": current_collection,
"nav_collections": collection_list,
"deck": deck,
"cards": cards,
"is_quiz_mode": is_quiz_mode,
"is_deck_admin": is_deck_admin,
"card_id": card_id,
}
analytics.track(
actor=request.user,
verb=analytics.VERBS.viewed,
object=analytics.OBJECTS.deck,
context={"deck_id": deck_id},
)
return render(request, "deck_view.html", context)
def all_cards(request, collection_id):
collection_id = int(collection_id)
decks = queries.getDecksByCollection(collection_ids = [collection_id])
decks = decks[collection_id]
current_collection = Collection.objects.get(id=collection_id)
deck_cards = []
for deck in decks:
deck_cards += Decks_Cards.objects.filter(deck=deck).order_by('sort_order').prefetch_related('card__cards_fields_set__field')
[cards, collection_list, is_quiz_mode, is_deck_admin, card_id] = deck_view_helper(request, current_collection, deck_cards)
context = {
"collection": current_collection,
"nav_collections": collection_list,
"deck": {'id': -collection_id, 'title': 'All Cards'},
"cards": cards,
"is_quiz_mode": is_quiz_mode,
"is_deck_admin": is_deck_admin,
"card_id": card_id,
}
analytics.track(
actor=request.user,
verb=analytics.VERBS.viewed,
object=analytics.OBJECTS.deck,
context={"collection_id": collection_id, 'type': 'All Cards Deck'},
)
return render(request, "deck_view.html", context)
@check_role([Users_Collections.ADMINISTRATOR, Users_Collections.INSTRUCTOR, Users_Collections.TEACHING_ASSISTANT, Users_Collections.CONTENT_DEVELOPER], 'deck')
def delete(request, deck_id=None):
"""Deletes a deck."""
d = {'user': request.user}
collection_id = queries.getDeckCollectionId(deck_id)
success = services.delete_deck(deck_id)
if success:
log.info('Deck %(d)s deleted from collection %(c)s' %{'d':deck_id, 'c':collection_id}, extra=d)
else:
log.info('Deck %(d)s could not be deleted from collection %(c)s' %{'d':deck_id, 'c':collection_id}, extra=d)
response = redirect('collectionIndex', collection_id)
response['Location'] += '?instructor=edit'
analytics.track(
actor=request.user,
verb=analytics.VERBS.deleted,
object=analytics.OBJECTS.deck,
context={"deck_id": deck_id},
)
return response
@check_role([Users_Collections.ADMINISTRATOR, Users_Collections.INSTRUCTOR, Users_Collections.TEACHING_ASSISTANT, Users_Collections.CONTENT_DEVELOPER], 'deck')
def upload_deck(request, deck_id=None):
'''
Imports a deck of cards from an excel spreadsheet.
'''
upload_error = ''
deck = Deck.objects.get(id=deck_id)
current_collection = deck.collection
role_bucket = services.get_or_update_role_bucket(request)
canvas_course_collections = LTIService(request).getCourseCollections()
collection_list = queries.getCollectionList(role_bucket, collection_ids=canvas_course_collections)
if request.method == 'POST':
d = {'user': request.user}
log.info('The user is uploading a new deck.', extra=d)
deck_form = DeckImportForm(request.POST, request.FILES)
if deck_form.is_valid():
if 'file' in request.FILES:
try:
services.handle_uploaded_deck_file(deck, request.FILES['file'])
log.info('New deck successfully added to the collection %(c)s.' %{'c': str(deck.collection.id)}, extra=d)
analytics.track(
actor=request.user,
verb=analytics.VERBS.uploaded,
object=analytics.OBJECTS.deck,
context={"deck_id": deck_id},
)
return redirect(deck)
except Exception, e:
upload_error = str(e)
msg = 'The following error occurred when the user tried uploading a deck: '
log.error(msg + upload_error, extra=d)
else:
log.info('No file selected.', extra=d)
else:
log.error('Deck Form is not valid.', extra=d)
else:
deck_form = DeckImportForm()
context = {
"deck": deck,
"deck_form": deck_form,
"nav_collections": collection_list,
"collection": current_collection,
"upload_error": upload_error
}
return render(request, 'decks/upload.html', context)
def download_deck(request, deck_id=None):
'''
Downloads a ZIP containing the excel spreadsheet of the deck of cards
along with any associated media files like images or audio.
'''
deck = Deck.objects.get(id=deck_id)
zfile_output = services.create_zip_deck_file(deck)
log.info('Deck %(d)s from the collection %(c)s downloaded by the user.'
%{'d': str(deck.id), 'c': str(deck.collection.id)}, extra={'user': request.user})
response = HttpResponse(zfile_output, content_type='application/x-zip-compressed')
response['Content-Disposition'] = 'attachment; filename=deck.zip'
analytics.track(
actor=request.user,
verb=analytics.VERBS.downloaded,
object=analytics.OBJECTS.deck,
context={"deck_id": deck_id}
)
return response
@check_role([Users_Collections.ADMINISTRATOR, Users_Collections.INSTRUCTOR, Users_Collections.TEACHING_ASSISTANT, Users_Collections.CONTENT_DEVELOPER], 'deck')
def create_edit_card(request, deck_id=None):
"""Create a new card or edit an existing one from the collection card template."""
deck = Deck.objects.get(id=deck_id)
current_collection = deck.collection
card_color_select = widgets.Select(attrs=None, choices=Card.COLOR_CHOICES)
role_bucket = services.get_or_update_role_bucket(request)
canvas_course_collections = LTIService(request).getCourseCollections()
collection_list = queries.getCollectionList(role_bucket, collection_ids=canvas_course_collections)
# Only has card_id if we are editing a card
card_id = request.GET.get('card_id', '')
if card_id:
card = Card.objects.get(id=card_id)
card_color = card.color
else:
card_color = Card.DEFAULT_COLOR
if card_id:
field_list = [{
"id":cfield.field.id,
"type": cfield.field.field_type,
"label": cfield.field.label,
"bucket": "show" if cfield.field.display else "reveal",
"show_label": cfield.field.show_label,
"value": cfield.value
} for cfield in card.cards_fields_set.all()]
else:
field_list = [{
"id": field.id,
"type": field.field_type,
"bucket": "show" if field.display else "reveal",
"label": field.label,
"show_label": field.show_label,
"value": ""
} for field in current_collection.card_template.fields.all()]
card_fields = {'show':[], 'reveal':[]}
for field in field_list:
card_fields[field['bucket']].append(field)
is_all_cards = request.GET.get('is_all_cards', 0)
context = {
"is_all_cards": int(is_all_cards),
"deck": deck,
"card_id": card_id if card_id else '',
"collection": current_collection,
"nav_collections": collection_list,
"card_fields": card_fields,
"card_color_select": card_color_select.render("card_color", card_color)
}
return render(request, 'decks/edit_card.html', context)
@check_role([Users_Collections.ADMINISTRATOR, Users_Collections.INSTRUCTOR, Users_Collections.TEACHING_ASSISTANT, Users_Collections.CONTENT_DEVELOPER], 'collection')
def edit_card_collection(request, collection_id=None):
collection_id = Collection.objects.get(id=collection_id)
card_id = request.GET.get('card_id', '')
deck_id = queries.getDeckIdCard(card_id, collection_id)
response = redirect('deckEditCard', deck_id)
response['Location'] += '?card_id=%(c)s&deck_id=%(d)s&is_all_cards=%(a)s' % {'c':card_id, 'd':deck_id, 'a':1}
return response
def log_analytics_delete(success, entity_type, entity_id, card_id, user):
d = {'user': user}
if success:
log.info('Card deleted from the %(t) %(id)s' %{'t': entity_type, 'id': str(entity_id)}, extra=d)
else:
log.error('Card could not be deleted from the %(t) %(id)s' %{'t': entity_type, 'id': str(entity_id)}, extra=d)
analytics.track(
actor=user,
verb=analytics.VERBS.deleted,
object=analytics.OBJECTS.card,
context={entity_type+"_id": entity_id, "card_id": card_id}
)
@check_role([Users_Collections.ADMINISTRATOR, Users_Collections.INSTRUCTOR, Users_Collections.TEACHING_ASSISTANT, Users_Collections.CONTENT_DEVELOPER], 'deck')
def delete_card(request, deck_id=None):
"""Deletes a card."""
deck = Deck.objects.get(id=deck_id)
card_id = request.GET.get('card_id', None)
success = services.check_delete_card(card_id, [deck_id])
log_analytics_delete(success, 'deck', deck_id, card_id, request.user)
return redirect(deck)
@check_role([Users_Collections.ADMINISTRATOR, Users_Collections.INSTRUCTOR, Users_Collections.TEACHING_ASSISTANT, Users_Collections.CONTENT_DEVELOPER], 'collection')
def delete_card_collection(request, collection_id=None):
"""Deletes a card."""
deck_ids = queries.getDeckIds(collection_id)
card_id = request.GET.get('card_id', None)
success = services.check_delete_card(card_id, deck_ids)
log_analytics_delete(success, 'collection', collection_id, card_id, request.user)
return redirect('allCards', collection_id=collection_id)
| |
# -*- coding: utf-8 -*-
#
# Copyright 2014-2021 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Options for BigMLer main subcommand processing
"""
def get_main_options(defaults=None, constants=None):
"""Main subcommand-related options
"""
if defaults is None:
defaults = {}
if constants is None:
constants = {}
max_models = constants.get('MAX_MODELS')
plurality = constants.get('PLURALITY')
last = constants.get('LAST_PREDICTION')
options = {
# If a BigML model is provided, the script will use it to generate
# predictions.
'--model': {
'action': 'store',
'dest': 'model',
'default': defaults.get('model', None),
'help': "BigML model Id."},
# Use it to compute predictions remotely.
'--remote': {
'action': 'store_true',
'dest': 'remote',
'default': defaults.get('remote', False),
'help': "Compute predictions remotely."},
# The path to a file containing model ids.
'--models': {
'action': 'store',
'dest': 'models',
'default': defaults.get('models', None),
'help': ("Path to a file containing model/ids. One model"
" per line (e.g., model/50a206a8035d0706dc000376"
").")},
# If a BigML json file containing a model structure is provided,
# the script will use it.
'--model-file': {
'action': 'store',
'dest': 'model_file',
'default': defaults.get('model_file', None),
'help': "BigML model JSON structure file."},
# Sets pruning.
'--pruning': {
'action': 'store',
'default': defaults.get('pruning', "smart"),
'choices': ["smart", "statistical", "no-pruning"],
'help': ("Set pruning type: smart, statistical,"
" no-pruning.")},
# Number of models to create when using ensembles.
'--number-of-models': {
'action': 'store',
'dest': 'number_of_models',
'default': defaults.get('number_of_models', 1),
'type': int,
'help': ("Number of models to create when using"
" ensembles.")},
# Replacement to use when sampling.
'--replacement': {
'action': 'store_true',
'default': defaults.get('replacement', False),
'help': "Use replacement when sampling."},
# Max number of models to predict from in parallel.
'--max-batch-models': {
'action': 'store',
'dest': 'max_batch_models',
'default': defaults.get('max_batch_models', max_models),
'type': int,
'help': ("Max number of models to predict from"
" in parallel.")},
# Randomize feature selection at each split.
'--randomize': {
'action': 'store_true',
'dest': 'randomize',
'default': defaults.get('randomize', False),
'help': "Randomize feature selection at each split."},
# Make model a public black-box model.
'--black-box': {
'action': 'store_true',
'dest': 'black_box',
'default': defaults.get('black_box', False),
'help': "Make generated model black-box."},
# Make model a public white-box model.
'--white-box': {
'action': 'store_true',
'dest': 'white_box',
'default': defaults.get('white_box', False),
'help': "Make generated model white-box."},
# Set a price tag to your white-box model.
'--model-price': {
'action': 'store',
'dest': 'model_price',
'type': float,
'default': defaults.get('model_price', 0.0),
'help': ("The price other users must pay to clone your"
" model.")},
# Set credits per prediction to your white box or black box models.
'--cpp': {
'action': 'store',
'type': float,
'default': defaults.get('cpp', 0.0),
'help': ("The number of credits that other users will"
" consume to make a prediction with your"
" model.")},
# Does not create a model just a dataset.
'--no-model': {
'action': 'store_true',
'dest': 'no_model',
'default': defaults.get('no_model', False),
'help': "Do not create a model."},
# Prediction directories to be combined.
'--combine-votes': {
'action': 'store',
'dest': 'votes_dirs',
'default': defaults.get('combine_votes', None),
'help': ("Comma separated list of"
" directories that contain models' votes"
" for the same test set.")},
# Method to combine votes in multiple models predictions
'--method': {
'action': 'store',
'dest': 'method',
'default': defaults.get('method', plurality),
'choices': ["plurality", "confidence weighted",
"probability weighted", "threshold",
"combined"],
'help': ("Method to combine votes from ensemble"
" predictions. Allowed methods: plurality"
", \"confidence weighted\", "
" \"probability weighted\", threshold. Also"
" \"combined\" for datasets with subsets of"
" categories")},
# Evaluate a model
'--evaluate': {
'action': 'store_true',
'help': "Evaluate command."},
# Max number of models to create in parallel.
'--max-parallel-models': {
"action": 'store',
"dest": 'max_parallel_models',
"default": defaults.get('max_parallel_models', 1),
"type": int,
"help": "Max number of models to create in parallel."},
# Max number of evaluations to create in parallel.
'--max-parallel-evaluations': {
"action": 'store',
"dest": 'max_parallel_evaluations',
"default": defaults.get('max_parallel_evaluations', 1),
"type": int,
"help": ("Max number of evaluations to create in"
" parallel.")},
# The name of the field that represents the objective field (i.e.,
# class or label) or its column number.
'--objective': {
"action": 'store',
"dest": 'objective_field',
"default": defaults.get('objective', None),
"help": ("The column number of the Objective Field"
" or its name, if headers are given.")},
# The path to a file containing the mapping of fields' ids from
# the test dataset fields to the model fields.
'--fields-map': {
'action': 'store',
'dest': 'fields_map',
'default': defaults.get('fields_map', None),
'help': ("Path to a csv file describing fields mapping. "
"One definition per line (e.g., 00000,"
"00000a).")},
# Set the part of training data to be held out for cross-validation
'--cross-validation-rate': {
'action': 'store',
'dest': 'cross_validation_rate',
'type': float,
'default': defaults.get('cross_validation_rate', 0.0),
'help': ("Part of training data to be held out for "
"cross-validation.")},
# Number of evaluations used in cross-validation
'--number-of-evaluations': {
'action': 'store',
'dest': 'number_of_evaluations',
'type': int,
'default': defaults.get('number_of_evaluations', 0),
'help': ("Number of evaluations used for"
" cross-validation.")},
# If a BigML ensemble is provided, the script will use it to generate
# predictions.
'--ensemble': {
'action': 'store',
'dest': 'ensemble',
'default': defaults.get('ensemble', None),
'help': "BigML ensemble Id."},
# Prediction log format: `short` will only log predictions, `long` will
# log also confidence information
'--prediction-info': {
'action': 'store',
'dest': 'prediction_info',
'default': defaults.get('prediction_info', 'normal'),
'choices': ["brief", "normal", "full", "full data"],
'help': ("Prediction log format: 'brief' will only "
"log predictions, 'normal' will write confidence"
" too, 'full' will write in a row the"
" input data that generates the prediction"
" followed by the latter.")},
# Multi-label. The objective field has multiple labels.
'--multi-label': {
'action': 'store_true',
'dest': 'multi_label',
'default': defaults.get('multi_label', False),
'help': ("The objective field has multiple labels that"
" should be treated independently.")},
# Prediction header. If set, headers are added to the prediction file.
'--prediction-header': {
'action': 'store_true',
'dest': 'prediction_header',
'default': defaults.get('prediction_header', False),
'help': "Headers are added to the prediction file."},
# Prediction fields. A comma-separated list of the fields that should
# be included in the prediction file.
'--prediction-fields': {
'action': 'store',
'dest': 'prediction_fields',
'default': defaults.get('prediction_fields', None),
'help': "Fields added to the prediction file."},
# Probability: Includes the probability associated to the prediction
'--probability': {
'action': 'store_true',
'dest': 'probability',
'default': defaults.get('probability', False),
'help': ("Adding the probability to predictions.")},
# No_probability: Does not include the probability of the prediction
'--no-probability': {
'action': 'store_false',
'dest': 'probability',
'default': defaults.get('probability', False),
'help': ("Predictions don't include probability.")},
# Max number of ensembles to create in parallel.
'--max-parallel-ensembles': {
'action': 'store',
'dest': 'max_parallel_ensembles',
'default': defaults.get('max_parallel_ensembles', 1),
'type': int,
'help': "Max number of ensembles to create in parallel."},
# The path to a file containing ensemble ids.
'--ensembles': {
'action': 'store',
'dest': 'ensembles',
'default': defaults.get('ensembles', None),
'help': ("Path to a file containing ensemble/ids. One "
"ensemble per line (e.g., "
"ensemble/50a206a8035d0706dc000376).")},
# If a BigML json file containing a model structure is provided,
# the script will use it.
'--ensemble-file': {
'action': 'store',
'dest': 'ensemble_file',
'default': defaults.get('ensemble_file', None),
'help': "BigML ensemble JSON structure file."},
# Threshold. Minimum necessary number of votes to issue a prediction.
'--threshold': {
'action': 'store',
'dest': 'threshold',
'default': defaults.get('threshold', 1),
'type': int,
'help': ("Minimum number of votes to issue a prediction"
" for the threshold combiner.")},
# Class. Label for the category used in threshold voting predictions.
'--class': {
'action': 'store',
'dest': 'threshold_class',
'default': defaults.get('threshold_class', None),
'help': "Category used in threshold combiner method."},
# Max number of categories to be included in a model
'--max-categories': {
'action': 'store',
'dest': 'max_categories',
'default': defaults.get('max_categories', 0),
'type': int,
'help': ("Max number of categories to be included in"
" a model.")},
# No batch predictions. Remote predictions are created individually.
'--no-batch': {
'action': 'store_true',
'dest': 'no_batch',
'default': defaults.get('no_batch', False),
'help': "Create remote predictions individually."},
# Evaluations flag: excluding one dataset from the datasets list to
# test
'--dataset-off': {
'action': 'store_true',
'dest': 'dataset_off',
'default': defaults.get('dataset_off', False),
'help': ("Excluding one dataset at a time from the"
" datasets list to test.")},
# The path to a file containing model attributes.
'--model-attributes': {
'action': 'store',
'dest': 'model_attributes',
'default': defaults.get('model_attributes', None),
'help': ("Path to a json file describing model"
" attributes.")},
# Input fields to include in the model.
'--model-fields': {
"action": 'store',
"dest": 'model_fields',
"default": defaults.get('model_fields', None),
"help": ("Comma-separated list of input fields"
" (predictors) to create the model.")},
# Balance. Automatically balance all the classes evenly.
'--balance': {
"action": 'store_true',
"dest": 'balance',
"default": defaults.get('balance', False),
"help": ("Automatically balance all objective classes"
" evenly.")},
# Balance. Do not automatically balance all the classes evenly.
# (opposed to balance)
'--no-balance': {
"action": 'store_false',
"dest": 'balance',
"default": defaults.get('balance', False),
"help": ("Do not automatically balance all objective"
" classes evenly.")},
# Node threshold. Maximum number of nodes in the tree.
'--node-threshold': {
'action': 'store',
'dest': 'node_threshold',
'default': defaults.get('node_threshold', 0),
'type': int,
'help': "Maximum number of nodes in the model."},
# The path to a file containing ensemble attributes.
'--ensemble-attributes': {
'action': 'store',
'dest': 'ensemble_attributes',
'default': defaults.get('ensemble_attributes', None),
'help': ("Path to a json file describing ensemble"
" attributes.")},
# The path to a file containing evaluation attributes.
'--evaluation-attributes': {
'action': 'store',
'dest': 'evaluation_attributes',
'default': defaults.get('evaluation_attributes', None),
'help': ("Path to a json file describing evaluation"
" attributes.")},
# The path to a file containing batch prediction attributes.
'--batch-prediction-attributes': {
'action': 'store',
'dest': 'batch_prediction_attributes',
'default': defaults.get('batch_prediction_attributes', None),
'help': ("Path to a json file describing batch prediction"
" attributes.")},
# The path to a file containing prediction attributes.
'--prediction-attributes': {
'action': 'store',
'dest': 'prediction_attributes',
'default': defaults.get('prediction_attributes', None),
'help': ("Path to a json file describing prediction"
" attributes.")},
# Weight-field. Use the contents of the given field as weights.
'--weight-field': {
'action': 'store',
'dest': 'weight_field',
'default': defaults.get('weight_field', None),
'help': ("Sets the name (or column) of the field"
" that contains the weights for the instances.")},
# Objective-weights. Path a to a CSV file of class, weight pairs.
'--objective-weights': {
'action': 'store',
'dest': 'objective_weights',
'default': defaults.get('objective_weights', None),
'help': "Path to a CSV file of class, weight pairs."},
# Strategy used in predictions when a missing value is found for the
# field used to split the node.
'--missing-strategy': {
'action': 'store',
'dest': 'missing_strategy',
'default': defaults.get('missing_strategy', last),
'choices': ["last", "proportional"],
'help': ("Strategy used when the field used in the split"
" to next nodes is missing in the input data."
" Allowed values: last or proportional")},
# Default value to use for missings in numeric fields
'--default-numeric-value': {
'action': 'store',
'dest': 'default_numeric_value',
'default': defaults.get('default_numeric_value'),
'choices': ["mean", "median", "minimum", "maximum", "zero"],
'help': ("Value set by default when a numeric field is missing."
" Allowed values: mean, median, minimum, maximum or"
" zero.")},
# Report. Additional output report formats
'--reports': {
'action': 'store',
'dest': 'reports',
'nargs': '*',
'default': defaults.get('reports', []),
'choices': ["gazibit"],
'help': "Output report formats."},
# Set it to use the missing splits operators: including missing values
# in tree branches.
'--missing-splits': {
'action': 'store_true',
'dest': 'missing_splits',
'default': defaults.get('missing_splits', False),
'help': ("Accept missing values as valid in some branches of the"
"tree.")},
# Set it to use the fields and the first node will split in one
# branch per category (only for categorical fields)
'--split-field': {
'action': 'store',
'dest': 'split_field',
'default': defaults.get('split_field', False),
'help': ("Name of the field that should be used in the first"
" split of the model. One branch per category will"
" be created.")},
# Set it to use the fields and the first node will split in one
# branch per category using binary splits (only for categorical fields)
'--focus-field': {
'action': 'store',
'dest': 'focus_field',
'default': defaults.get('focus_field', False),
'help': ("Name of the field that should be used in the first"
" split of the model. One branch per category will"
" be created.")},
# Random candidates: Number of fields to be selected at random in
# ensembles construction
'--random-candidates': {
'action': 'store',
'dest': 'random_candidates',
'default': defaults.get('random_candidates', 0),
'type': int,
'help': ("Number of fields selected at random in ensembles'"
" construction.")},
# Ensemble seed. The value used in ensembles as seed
'--ensemble-sample-seed': {
'action': 'store',
'dest': 'ensemble_sample_seed',
'default': defaults.get('ensemble_sample_seed', None),
'help': "Value used as seed in ensembles."},
# Ensemble sampling to use when using bagging.
'--ensemble-sample-rate': {
'action': 'store',
'dest': 'ensemble_sample_rate',
'default': defaults.get('ensemble_sample_rate', 1.0),
'type': float,
'help': "Ensemble sampling rate for bagging."},
# Ensemble replacement to use when using bagging.
'--ensemble-sample-no-replacement': {
'action': 'store_false',
'dest': 'ensemble_sample_replacement',
'default': defaults.get('ensemble_sample_replacement', True),
'help': "Don't use replacement when bagging."},
# Create a boosting ensemble
'--boosting': {
'action': 'store_true',
'dest': 'boosting',
'default': defaults.get('boosting', False),
'help': "Create a boosted ensemble"},
# Maximum number of iterations used in boosted ensembles.
'--boosting-iterations': {
'action': 'store',
'dest': 'iterations',
'default': defaults.get('iterations', None),
'type': int,
'help': ("Maximum number of iterations used in boosted"
" ensembles.")},
# The portion of the dataset that will be held out for testing
# at the end of every iteration.
'--early-holdout': {
'action': 'store',
'dest': 'early_holdout',
'default': defaults.get('early_holdout', None),
'type': float,
'help': ("The portion of the dataset that will be held out for"
" testing at the end of every iteration in boosted"
" ensembles (between 0 and 1).")},
# Boosted ensemble: Causes the out of bag samples to be tested after
# every iteration.
'--no-early-out-of-bag': {
'action': 'store_false',
'dest': 'early_out_of_bag',
'default': defaults.get('early_out_of_bag', True),
'help': ("Causes the out of bag samples not to be tested after"
" every iteration in boosted ensembles.")},
# It controls how aggressively the boosting algorithm will fit the data
'--learning-rate': {
'action': 'store',
'dest': 'learning_rate',
'default': defaults.get('learning_rate', None),
'type': float,
'help': ("It controls how aggressively the boosting algorithm"
" will fit the data in boosted"
" ensembles (between 0 and 1).")},
# Boosted ensemble: the out_of_bag samples are tested after every
# iteration to choose the gradient step size.
'--no-step-out-of-bag': {
'action': 'store_false',
'dest': 'step_out_of_bag',
'default': defaults.get('step_out_of_bag', True),
'help': ("Causes the out of bag samples not to be tested after"
" every iteration to choose the gradient step size"
" in boosted ensembles.")},
# Disables reports upload.
'--no-upload': {
'action': 'store_false',
'dest': 'upload',
'default': defaults.get('upload', True),
'help': "Disables upload for reports"},
# Use it to compute predictions locally.
'--local': {
'action': 'store_false',
'dest': 'remote',
'default': defaults.get('remote', False),
'help': "Compute predictions locally"},
# Deactivate replacement to use when using sampling.
'--no-replacement': {
'action': 'store_false',
'dest': 'replacement',
'default': defaults.get('replacement', False),
'help': "Don't use replacement when sampling."},
# Doesn't randomize feature selection at each split.
'--no-randomize': {
'action': 'store_false',
'dest': 'randomize',
'default': defaults.get('randomize', False),
'help': ("Doesn't randomize feature selection at each"
" split.")},
# Doesn't make model a public black-box model.
'--no-black-box': {
'action': 'store_false',
'dest': 'black_box',
'default': defaults.get('black_box', False),
'help': "Doesn't make generated model black-box."},
# Doesn't make model a public white-box model.
'--no-white-box': {
'action': 'store_false',
'dest': 'white_box',
'default': defaults.get('white_box', False),
'help': "Doesn't make generated model white-box."},
# Create a model just a dataset.
'--no-no-model': {
'action': 'store_false',
'dest': 'no_model',
'default': defaults.get('no_model', False),
'help': "Create a model."},
# Don't clear global bigmler log files
'--no-clear-logs': {
'action': 'store_false',
'dest': 'clear_logs',
'default': defaults.get('clear_logs', False),
'help': "Don't clear global bigmler log files."},
# Don't store the retrieved resources in the output directory
'--no-store': {
'action': 'store_false',
'dest': 'store',
'default': defaults.get('store', False),
'help': ("Don't store the retrieved resources in the"
" output directory.")},
# Multi-label. The objective field hasn't multiple labels.
'--no-multi-label': {
'action': 'store_false',
'dest': 'multi_label',
'default': defaults.get('multi_label', False),
'help': "The objective field has not multiple labels."},
# Prediction-header.
'--no-prediction-header': {
'action': 'store_false',
'dest': 'prediction_header',
'default': defaults.get('prediction_header', False),
'help': "Headers are not added to the prediction file."},
# Batch predictions. Remote predictions are created in batch mode.
'--batch': {
'action': 'store_false',
'dest': 'no_batch',
'default': defaults.get('no_batch', False),
'help': "Create remote predictions in batch."},
# Multi-dataset. Generating a new dataset from a list of existing
# datasets.
'--no-multi-dataset': {
'action': 'store_false',
'dest': 'multi_dataset',
'default': defaults.get('multi_dataset', False),
'help': "Do not generate a new dataset."},
# Shared. Shares all shareable resources and uses its shared links in
# reports
'--unshared': {
'action': 'store_false',
'dest': 'shared',
'default': defaults.get('shared', False),
'help': ("Share resources and use its shared urls "
" in reports.")},
# Enables reports upload.
'--upload': {
'action': 'store_true',
'dest': 'upload',
'default': defaults.get('upload', True),
'help': "Enables upload for reports"},
# Dataset-off. Turning off the dataset-off flag.
'--no-dataset-off': {
'action': 'store_false',
'dest': 'dataset_off',
'default': defaults.get('dataset_off', False),
'help': "Turning off the dataset-off flag."},
# No missing_splits used: Don't include missing values in branches
# of the tree.
'--no-missing-splits': {
'action': 'store_false',
'dest': 'missing_splits',
'default': defaults.get('missing_splits', False),
'help': ("Turning off the --missing-splits flag: don't include"
" missing values in branches of the tree.")},
# Used in models combinations, ensembles predictions. Keeps prediction
# in memory to be combined and no partial results are stored in files.
'--fast': {
'action': 'store_true',
'dest': 'fast',
'default': defaults.get('fast', True),
'help': ("Enables fast ensemble's predictions with no partial"
" results files.")},
# Used in models combinations, ensembles predictions. Stores
# predictions for each model in files that can be used and combined
# later
'--no-fast': {
'action': 'store_false',
'dest': 'fast',
'default': defaults.get('fast', True),
'help': ("Enables fast ensemble's predictions with partial"
" results files.")},
# Does not create a csv as output of a batch prediction.
'--no-csv': {
'action': 'store_true',
'dest': 'no_csv',
'default': defaults.get('no_csv', False),
'help': ("Do not create a csv file as output of a batch"
" prediction.")},
# Create a csv as output (as opposed to --no-csv).
'--no-no-csv': {
'action': 'store_false',
'dest': 'no_csv',
'default': defaults.get('no_csv', False),
'help': ("Create a csv file as output of a batch"
" prediction (as opposed to --no-csv)")},
# Create a dataset as ouput of a batch prediction
'--to-dataset': {
'action': 'store_true',
'dest': 'to_dataset',
'default': defaults.get('to_dataset', False),
'help': ("Create a dataset as ouput of a batch"
" prediction.")},
# The path to a file containing the operating point description.
'--operating-point': {
'action': 'store',
'dest': 'operating_point',
'default': defaults.get('operating_point', None),
'help': ("Path to a json file containing the operating "
"point description.")},
# Use median as predicted value in local models predictions
'--median': {
'action': 'store_true',
'dest': 'median',
'default': defaults.get('median', False),
'help': ("Use medtan instead on mean as node"
" prediction.")},
# Use mean as predicted value in local models predictions
'--no-median': {
'action': 'store_false',
'dest': 'median',
'default': defaults.get('median', False),
'help': ("Use mean instead on median as node"
" prediction.")}}
return options
| |
# Copyright (c) 2016 Clinton Knight
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Performance metrics functions and cache for NetApp systems.
"""
import copy
from oslo_log import log as logging
from manila import exception
from manila.i18n import _
from manila.share.drivers.netapp.dataontap.client import api as netapp_api
LOG = logging.getLogger(__name__)
DEFAULT_UTILIZATION = 50
class PerformanceLibrary(object):
def __init__(self, zapi_client):
self.zapi_client = zapi_client
self.performance_counters = {}
self.pool_utilization = {}
self._init_counter_info()
def _init_counter_info(self):
"""Set a few counter names based on Data ONTAP version."""
self.system_object_name = None
self.avg_processor_busy_base_counter_name = None
try:
if self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS:
self.system_object_name = 'system:constituent'
self.avg_processor_busy_base_counter_name = (
self._get_base_counter_name('system:constituent',
'avg_processor_busy'))
elif self.zapi_client.features.SYSTEM_METRICS:
self.system_object_name = 'system'
self.avg_processor_busy_base_counter_name = (
self._get_base_counter_name('system',
'avg_processor_busy'))
except netapp_api.NaApiError:
if self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS:
self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time'
else:
self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time1'
LOG.exception('Could not get performance base counter '
'name. Performance-based scheduler '
'functions may not be available.')
def update_performance_cache(self, flexvol_pools, aggregate_pools):
"""Called periodically to update per-pool node utilization metrics."""
# Nothing to do on older systems
if not (self.zapi_client.features.SYSTEM_METRICS or
self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS):
return
# Get aggregates and nodes for all known pools
aggr_names = self._get_aggregates_for_pools(flexvol_pools,
aggregate_pools)
node_names, aggr_node_map = self._get_nodes_for_aggregates(aggr_names)
# Update performance counter cache for each node
node_utilization = {}
for node_name in node_names:
if node_name not in self.performance_counters:
self.performance_counters[node_name] = []
# Get new performance counters and save only the last 10
counters = self._get_node_utilization_counters(node_name)
if not counters:
continue
self.performance_counters[node_name].append(counters)
self.performance_counters[node_name] = (
self.performance_counters[node_name][-10:])
# Update utilization for each node using newest & oldest sample
counters = self.performance_counters[node_name]
if len(counters) < 2:
node_utilization[node_name] = DEFAULT_UTILIZATION
else:
node_utilization[node_name] = self._get_node_utilization(
counters[0], counters[-1], node_name)
# Update pool utilization map atomically
pool_utilization = {}
all_pools = copy.deepcopy(flexvol_pools)
all_pools.update(aggregate_pools)
for pool_name, pool_info in all_pools.items():
aggr_name = pool_info.get('netapp_aggregate', 'unknown')
node_name = aggr_node_map.get(aggr_name)
if node_name:
pool_utilization[pool_name] = node_utilization.get(
node_name, DEFAULT_UTILIZATION)
else:
pool_utilization[pool_name] = DEFAULT_UTILIZATION
self.pool_utilization = pool_utilization
def get_node_utilization_for_pool(self, pool_name):
"""Get the node utilization for the specified pool, if available."""
return self.pool_utilization.get(pool_name, DEFAULT_UTILIZATION)
def update_for_failover(self, zapi_client, flexvol_pools, aggregate_pools):
"""Change API client after a whole-backend failover event."""
self.zapi_client = zapi_client
self.update_performance_cache(flexvol_pools, aggregate_pools)
def _get_aggregates_for_pools(self, flexvol_pools, aggregate_pools):
"""Get the set of aggregates that contain the specified pools."""
aggr_names = set()
for pool_name, pool_info in aggregate_pools.items():
if pool_info.get('netapp_flexgroup', False):
continue
aggr_names.add(pool_info.get('netapp_aggregate'))
for pool_name, pool_info in flexvol_pools.items():
if pool_info.get('netapp_flexgroup', False):
continue
aggr_names.add(pool_info.get('netapp_aggregate'))
return list(aggr_names)
def _get_nodes_for_aggregates(self, aggr_names):
"""Get the cluster nodes that own the specified aggregates."""
node_names = set()
aggr_node_map = {}
for aggr_name in aggr_names:
node_name = self.zapi_client.get_node_for_aggregate(aggr_name)
if node_name:
node_names.add(node_name)
aggr_node_map[aggr_name] = node_name
return list(node_names), aggr_node_map
def _get_node_utilization(self, counters_t1, counters_t2, node_name):
"""Get node utilization from two sets of performance counters."""
try:
# Time spent in the single-threaded Kahuna domain
kahuna_percent = self._get_kahuna_utilization(counters_t1,
counters_t2)
# If Kahuna is using >60% of the CPU, the controller is fully busy
if kahuna_percent > 60:
return 100.0
# Average CPU busyness across all processors
avg_cpu_percent = 100.0 * self._get_average_cpu_utilization(
counters_t1, counters_t2)
# Total Consistency Point (CP) time
total_cp_time_msec = self._get_total_consistency_point_time(
counters_t1, counters_t2)
# Time spent in CP Phase 2 (buffer flush)
p2_flush_time_msec = self._get_consistency_point_p2_flush_time(
counters_t1, counters_t2)
# Wall-clock time between the two counter sets
poll_time_msec = self._get_total_time(counters_t1,
counters_t2,
'total_cp_msecs')
# If two polls happened in quick succession, use CPU utilization
if total_cp_time_msec == 0 or poll_time_msec == 0:
return max(min(100.0, avg_cpu_percent), 0)
# Adjusted Consistency Point time
adjusted_cp_time_msec = self._get_adjusted_consistency_point_time(
total_cp_time_msec, p2_flush_time_msec)
adjusted_cp_percent = (100.0 *
adjusted_cp_time_msec / poll_time_msec)
# Utilization is the greater of CPU busyness & CP time
node_utilization = max(avg_cpu_percent, adjusted_cp_percent)
return max(min(100.0, node_utilization), 0)
except Exception:
LOG.exception('Could not calculate node utilization for '
'node %s.', node_name)
return DEFAULT_UTILIZATION
def _get_kahuna_utilization(self, counters_t1, counters_t2):
"""Get time spent in the single-threaded Kahuna domain."""
# Note(cknight): Because Kahuna is single-threaded, running only on
# one CPU at a time, we can safely sum the Kahuna CPU usage
# percentages across all processors in a node.
return sum(self._get_performance_counter_average_multi_instance(
counters_t1, counters_t2, 'domain_busy:kahuna',
'processor_elapsed_time')) * 100.0
def _get_average_cpu_utilization(self, counters_t1, counters_t2):
"""Get average CPU busyness across all processors."""
return self._get_performance_counter_average(
counters_t1, counters_t2, 'avg_processor_busy',
self.avg_processor_busy_base_counter_name)
def _get_total_consistency_point_time(self, counters_t1, counters_t2):
"""Get time spent in Consistency Points in msecs."""
return float(self._get_performance_counter_delta(
counters_t1, counters_t2, 'total_cp_msecs'))
def _get_consistency_point_p2_flush_time(self, counters_t1, counters_t2):
"""Get time spent in CP Phase 2 (buffer flush) in msecs."""
return float(self._get_performance_counter_delta(
counters_t1, counters_t2, 'cp_phase_times:p2_flush'))
def _get_total_time(self, counters_t1, counters_t2, counter_name):
"""Get wall clock time between two successive counters in msecs."""
timestamp_t1 = float(self._find_performance_counter_timestamp(
counters_t1, counter_name))
timestamp_t2 = float(self._find_performance_counter_timestamp(
counters_t2, counter_name))
return (timestamp_t2 - timestamp_t1) * 1000.0
def _get_adjusted_consistency_point_time(self, total_cp_time,
p2_flush_time):
"""Get adjusted CP time by limiting CP phase 2 flush time to 20%."""
return (total_cp_time - p2_flush_time) * 1.20
def _get_performance_counter_delta(self, counters_t1, counters_t2,
counter_name):
"""Calculate a delta value from two performance counters."""
counter_t1 = int(
self._find_performance_counter_value(counters_t1, counter_name))
counter_t2 = int(
self._find_performance_counter_value(counters_t2, counter_name))
return counter_t2 - counter_t1
def _get_performance_counter_average(self, counters_t1, counters_t2,
counter_name, base_counter_name,
instance_name=None):
"""Calculate an average value from two performance counters."""
counter_t1 = float(self._find_performance_counter_value(
counters_t1, counter_name, instance_name))
counter_t2 = float(self._find_performance_counter_value(
counters_t2, counter_name, instance_name))
base_counter_t1 = float(self._find_performance_counter_value(
counters_t1, base_counter_name, instance_name))
base_counter_t2 = float(self._find_performance_counter_value(
counters_t2, base_counter_name, instance_name))
return (counter_t2 - counter_t1) / (base_counter_t2 - base_counter_t1)
def _get_performance_counter_average_multi_instance(self, counters_t1,
counters_t2,
counter_name,
base_counter_name):
"""Calculate an average value from multiple counter instances."""
averages = []
instance_names = []
for counter in counters_t1:
if counter_name in counter:
instance_names.append(counter['instance-name'])
for instance_name in instance_names:
average = self._get_performance_counter_average(
counters_t1, counters_t2, counter_name, base_counter_name,
instance_name)
averages.append(average)
return averages
def _find_performance_counter_value(self, counters, counter_name,
instance_name=None):
"""Given a counter set, return the value of a named instance."""
for counter in counters:
if counter_name in counter:
if (instance_name is None
or counter['instance-name'] == instance_name):
return counter[counter_name]
else:
raise exception.NotFound(_('Counter %s not found') % counter_name)
def _find_performance_counter_timestamp(self, counters, counter_name,
instance_name=None):
"""Given a counter set, return the timestamp of a named instance."""
for counter in counters:
if counter_name in counter:
if (instance_name is None
or counter['instance-name'] == instance_name):
return counter['timestamp']
else:
raise exception.NotFound(_('Counter %s not found') % counter_name)
def _expand_performance_array(self, object_name, counter_name, counter):
"""Get array labels and expand counter data array."""
# Get array labels for counter value
counter_info = self.zapi_client.get_performance_counter_info(
object_name, counter_name)
array_labels = [counter_name + ':' + label.lower()
for label in counter_info['labels']]
array_values = counter[counter_name].split(',')
# Combine labels and values, and then mix into existing counter
array_data = dict(zip(array_labels, array_values))
counter.update(array_data)
def _get_base_counter_name(self, object_name, counter_name):
"""Get the name of the base counter for the specified counter."""
counter_info = self.zapi_client.get_performance_counter_info(
object_name, counter_name)
return counter_info['base-counter']
def _get_node_utilization_counters(self, node_name):
"""Get all performance counters for calculating node utilization."""
try:
return (self._get_node_utilization_system_counters(node_name) +
self._get_node_utilization_wafl_counters(node_name) +
self._get_node_utilization_processor_counters(node_name))
except netapp_api.NaApiError:
LOG.exception('Could not get utilization counters from node '
'%s', node_name)
return None
def _get_node_utilization_system_counters(self, node_name):
"""Get the system counters for calculating node utilization."""
system_instance_uuids = (
self.zapi_client.get_performance_instance_uuids(
self.system_object_name, node_name))
system_counter_names = [
'avg_processor_busy',
self.avg_processor_busy_base_counter_name,
]
if 'cpu_elapsed_time1' in system_counter_names:
system_counter_names.append('cpu_elapsed_time')
system_counters = self.zapi_client.get_performance_counters(
self.system_object_name, system_instance_uuids,
system_counter_names)
return system_counters
def _get_node_utilization_wafl_counters(self, node_name):
"""Get the WAFL counters for calculating node utilization."""
wafl_instance_uuids = self.zapi_client.get_performance_instance_uuids(
'wafl', node_name)
wafl_counter_names = ['total_cp_msecs', 'cp_phase_times']
wafl_counters = self.zapi_client.get_performance_counters(
'wafl', wafl_instance_uuids, wafl_counter_names)
# Expand array data so we can use wafl:cp_phase_times[P2_FLUSH]
for counter in wafl_counters:
if 'cp_phase_times' in counter:
self._expand_performance_array(
'wafl', 'cp_phase_times', counter)
return wafl_counters
def _get_node_utilization_processor_counters(self, node_name):
"""Get the processor counters for calculating node utilization."""
processor_instance_uuids = (
self.zapi_client.get_performance_instance_uuids('processor',
node_name))
processor_counter_names = ['domain_busy', 'processor_elapsed_time']
processor_counters = self.zapi_client.get_performance_counters(
'processor', processor_instance_uuids, processor_counter_names)
# Expand array data so we can use processor:domain_busy[kahuna]
for counter in processor_counters:
if 'domain_busy' in counter:
self._expand_performance_array(
'processor', 'domain_busy', counter)
return processor_counters
| |
#!/usr/bin/env python3
# Copyright 2019 The GraphicsFuzz Project Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib2
import pytest
import sys
from typing import List, Optional
HERE = os.path.abspath(__file__)
sys.path.insert(0, os.path.dirname(os.path.dirname(HERE)) + os.sep + "drivers")
import inspect_compute_results
def test_unknown_command_rejected(tmp_path: pathlib2.Path):
with pytest.raises(ValueError) as value_error:
inspect_compute_results.main_helper(['unknown', '1.json', '2.json'])
assert 'ValueError: Unknown command' in str(value_error)
def test_show_rejects_multiple_args(tmp_path: pathlib2.Path):
with pytest.raises(ValueError) as value_error:
inspect_compute_results.main_helper(['show', '1.json', '2.json'])
assert 'ValueError: Command "show" requires exactly 1 input; 2 provided' in str(value_error)
def test_exactdiff_rejects_one_arg(tmp_path: pathlib2.Path):
with pytest.raises(ValueError) as value_error:
inspect_compute_results.main_helper(['exactdiff', '1.json'])
assert 'ValueError: Command "exactdiff" requires exactly 2 inputs; 1 provided' in str(value_error)
def test_exactdiff_rejects_three_args(tmp_path: pathlib2.Path):
with pytest.raises(ValueError) as value_error:
inspect_compute_results.main_helper(['exactdiff', '1.json', '2.json', '3.json'])
assert 'ValueError: Command "exactdiff" requires exactly 2 inputs; 3 provided' in str(value_error)
def test_fuzzydiff_rejects_one_arg(tmp_path: pathlib2.Path):
with pytest.raises(ValueError) as value_error:
inspect_compute_results.main_helper(['fuzzydiff', '1.json'])
assert 'ValueError: Command "fuzzydiff" requires exactly 2 inputs; 1 provided' in str(value_error)
def test_fuzzydiff_rejects_three_args(tmp_path: pathlib2.Path):
with pytest.raises(ValueError) as value_error:
inspect_compute_results.main_helper(['fuzzydiff', '1.json', '2.json', '3.json'])
assert 'ValueError: Command "fuzzydiff" requires exactly 2 inputs; 3 provided' in str(value_error)
def test_show_handles_file_not_found(tmp_path: pathlib2.Path):
with pytest.raises(FileNotFoundError) as file_not_found_error:
inspect_compute_results.main_helper(['show', 'nofile.json'])
assert 'FileNotFoundError: Input file "nofile.json" not found' in str(file_not_found_error)
def test_exactdiff_handles_first_file_not_found(tmp_path: pathlib2.Path):
onefile = tmp_path / 'something.json'
onefile.touch(exist_ok=False)
with pytest.raises(FileNotFoundError) as file_not_found_error:
inspect_compute_results.main_helper(['exactdiff', 'nofile.json', str(onefile)])
assert 'FileNotFoundError: Input file "nofile.json" not found' in str(file_not_found_error)
def test_exactdiff_handles_second_file_not_found(tmp_path: pathlib2.Path):
onefile = tmp_path / 'something.json'
onefile.touch(exist_ok=False)
with pytest.raises(FileNotFoundError) as file_not_found_error:
inspect_compute_results.main_helper(['exactdiff', str(onefile), 'nofile.json'])
assert 'FileNotFoundError: Input file "nofile.json" not found' in str(file_not_found_error)
def test_fuzzydiff_handles_first_file_not_found(tmp_path: pathlib2.Path):
onefile = tmp_path / 'something.json'
onefile.touch(exist_ok=False)
with pytest.raises(FileNotFoundError) as file_not_found_error:
inspect_compute_results.main_helper(['fuzzydiff', 'nofile.json', str(onefile)])
assert 'FileNotFoundError: Input file "nofile.json" not found' in str(file_not_found_error)
def test_fuzzydiff_handles_second_file_not_found(tmp_path: pathlib2.Path):
onefile = tmp_path / 'something.json'
onefile.touch(exist_ok=False)
with pytest.raises(FileNotFoundError) as file_not_found_error:
inspect_compute_results.main_helper(['fuzzydiff', str(onefile), 'nofile.json'])
assert 'FileNotFoundError: Input file "nofile.json" not found' in str(file_not_found_error)
def check_diff(tmp_path: pathlib2.Path, output1: str, output2: str, is_exact: bool,
extra_args: Optional[List[str]]=None) -> int:
results1_path = tmp_path / '1.info.json'
results2_path = tmp_path / '2.info.json'
with results1_path.open(mode='w') as results1_file:
results1_file.write(output1)
with results2_path.open(mode='w') as results2_file:
results2_file.write(output2)
args = ['exactdiff' if is_exact else 'fuzzydiff',
str(results1_path),
str(results2_path)]
if extra_args:
args += extra_args
return inspect_compute_results.main_helper(args)
def check_exact_diff(tmp_path: pathlib2.Path, output1: str, output2: str) -> int:
return check_diff(tmp_path, output1, output2, is_exact=True)
def check_fuzzy_diff(tmp_path: pathlib2.Path, output1: str, output2: str,
extra_args: Optional[List[str]]=None) -> int:
return check_diff(tmp_path, output1, output2, is_exact=False, extra_args=extra_args)
def test_exactdiff_pass1(tmp_path: pathlib2.Path):
assert 0 == check_exact_diff(tmp_path, (
'{"status": "SUCCESS", "log": "#### Start compute shader", "outputs": '
'{"ssbo":[[88],[28,12,14,14,18,16,18,18,28,22,24,24,28,26]]}}'), (
'{"status": "IGNORED_DURING_DIFF", "log": "#### Different stuff", "outputs": '
'{"ssbo":[[88],[28,12,14,14,18,16,18,18,28,22,24,24,28,26]]}}'))
def test_exactdiff_pass2(tmp_path: pathlib2.Path):
assert 0 == check_exact_diff(tmp_path, (
'{"status": "SUCCESS", "log": "#### Start compute shader", "outputs": '
'{"ssbo":[[2.0]]}}'), (
'{"status": "IGNORED_DURING_DIFF", "log": "#### Different stuff", "outputs": '
'{"ssbo": [ [2.0] ] } }'))
def test_exactdiff_pass3(tmp_path: pathlib2.Path):
assert 0 == check_exact_diff(tmp_path, (
'{"status": "SUCCESS", "log": "#### Start compute shader", "outputs": '
'{"ssbo":[[88.0, 12.3],[28,12,14],[1]]}}'), (
'{"status": "IGNORED_DURING_DIFF", "log": "#### Different stuff", "outputs": '
'{"ssbo":[[88.0, 12.3],[28,12,14],[1]]}}'))
def test_exactdiff_fail_first_invalid(tmp_path: pathlib2.Path):
with pytest.raises(ValueError) as value_error:
check_exact_diff(tmp_path, (
'not_json'), (
'{"status": "IGNORED_DURING_DIFF", "log": "#### Different stuff", "outputs": '
'{"ssbo": [ [2.0] ] } }'))
assert 'ValueError: First input file did not contain valid SSBO data' in str(value_error)
def test_exactdiff_fail_second_invalid(tmp_path: pathlib2.Path):
with pytest.raises(ValueError) as value_error:
check_exact_diff(tmp_path, (
'{"status": "IGNORED_DURING_DIFF", "log": "#### Different stuff", "outputs": '
'{"ssbo": [ [2.0] ] } }'), (
'not_json'))
assert 'ValueError: Second input file did not contain valid SSBO data' in str(value_error)
def test_exactdiff_fail_mismatched_number_of_fields(tmp_path: pathlib2.Path):
assert 0 != check_exact_diff(tmp_path, (
'{"status": "SUCCESS", "log": "...", "outputs": '
'{"ssbo":[[88],[28,12,14,14,18,16,18,18,28,22,24,24,28,26]]}}'), (
'{"status": "SUCCESS", "log": "...", "outputs": '
'{"ssbo":[[88]]}}'))
def test_exactdiff_fail_mismatched_field_length(tmp_path: pathlib2.Path):
assert 0 != check_exact_diff(tmp_path, (
'{"status": "SUCCESS", "log": "...", "outputs": '
'{"ssbo":[[88],[28,12,14,14,18,16,18,18,28,22,24,24,28,26]]}}'), (
'{"status": "SUCCESS", "log": "...", "outputs": '
'{"ssbo":[[88],[28,12,14,14,18,16,18,18,28,22,24,24,28]]}}'))
def test_exactdiff_fail_mismatched_field_element(tmp_path: pathlib2.Path):
assert 0 != check_exact_diff(tmp_path, (
'{"status": "SUCCESS", "log": "...", "outputs": '
'{"ssbo":[[88],[28,12,14,14,18,16,18,18,28,22,24,24,28]]}}'), (
'{"status": "SUCCESS", "log": "...", "outputs": '
'{"ssbo":[[88],[28,12,14,14,18,16,18,17,28,22,24,24,28]]}}'))
def test_fuzzydiff_pass1(tmp_path: pathlib2.Path):
float1 = 88.0
float2 = 1e+6
float3 = 1.3e-6
float4 = 0.0
float1ish = float1 + 0.00000001
float2ish = float2 + 0.0001
float3ish = float3 + 1.3e-15
float4ish = float4 + 1e-20
assert float1 != float1ish
assert float2 != float2ish
assert float3 != float3ish
assert float4 != float4ish
output1 = ('{"status": "SUCCESS", "log": "...", "outputs": {"ssbo":[[' + str(float1) + '],['
+ str(float2) + ',' + str(float3) + ',' + str(float4) + ']]}}')
output2 = ('{"status": "SUCCESS", "log": "...", "outputs": {"ssbo":[[' + str(float1ish) + '],['
+ str(float2ish) + ',' + str(float3ish) + ',' + str(float4ish) + ']]}}')
assert 0 != check_exact_diff(tmp_path, output1, output2)
assert 0 == check_fuzzy_diff(tmp_path, output1, output2)
def test_fuzzydiff_pass2(tmp_path: pathlib2.Path):
float1 = 88.0
float2 = 1e+6
float3 = 1.3e-6
float4 = 0.0
float1ish = float1 + 0.00009
float2ish = float2 + 0.00009
float3ish = float3 + 0.00009
float4ish = float4 + 0.00009
assert float1 != float1ish
assert float2 != float2ish
assert float3 != float3ish
assert float4 != float4ish
output1 = ('{"status": "SUCCESS", "log": "...", "outputs": {"ssbo":[[' + str(float1) + '],['
+ str(float2) + ',' + str(float3) + ',' + str(float4) + ']]}}')
output2 = ('{"status": "SUCCESS", "log": "...", "outputs": {"ssbo":[[' + str(float1ish) + '],['
+ str(float2ish) + ',' + str(float3ish) + ',' + str(float4ish) + ']]}}')
assert 0 != check_exact_diff(tmp_path, output1, output2)
assert 0 == check_fuzzy_diff(tmp_path, output1, output2, extra_args=['--abs_tol=0.0001'])
def test_fuzzydiff_pass3(tmp_path: pathlib2.Path):
float1 = 88.0
float2 = 1e+6
float3 = 1.3e-6
float4 = 0.0
float1ish = float1 + 0.0000001
float2ish = float2 + 1.0
float3ish = float3 + 1e-12
float4ish = float4 + 1e-6
assert float1 != float1ish
assert float2 != float2ish
assert float3 != float3ish
assert float4 != float4ish
output1 = ('{"status": "SUCCESS", "log": "...", "outputs": {"ssbo":[[' + str(float1) + '],['
+ str(float2) + ',' + str(float3) + ',' + str(float4) + ']]}}')
output2 = ('{"status": "SUCCESS", "log": "...", "outputs": {"ssbo":[[' + str(float1ish) + '],['
+ str(float2ish) + ',' + str(float3ish) + ',' + str(float4ish) + ']]}}')
assert 0 != check_exact_diff(tmp_path, output1, output2)
assert 0 == check_fuzzy_diff(tmp_path, output1, output2,
extra_args=['--rel_tol=1e-06', '--abs_tol=1e-06'])
def test_fuzzydiff_fail_first_invalid(tmp_path: pathlib2.Path):
with pytest.raises(ValueError) as value_error:
check_exact_diff(tmp_path, (
'not_json'), (
'{"status": "IGNORED_DURING_DIFF", "log": "#### Different stuff", "outputs": '
'{"ssbo": [ [2.0] ] } }'))
assert 'ValueError: First input file did not contain valid SSBO data' in str(value_error)
def test_fuzzydiff_fail_second_invalid(tmp_path: pathlib2.Path):
with pytest.raises(ValueError) as value_error:
check_exact_diff(tmp_path, (
'{"status": "IGNORED_DURING_DIFF", "log": "#### Different stuff", "outputs": '
'{"ssbo": [ [2.0] ] } }'), (
'not_json'))
assert 'ValueError: Second input file did not contain valid SSBO data' in str(value_error)
def test_fuzzydiff_fail_mismatched_number_of_fields(tmp_path: pathlib2.Path):
assert 0 != check_exact_diff(tmp_path, (
'{"status": "SUCCESS", "log": "...", "outputs": '
'{"ssbo":[[88],[28,12,14,14,18,16,18,18,28,22,24,24,28,26]]}}'), (
'{"status": "SUCCESS", "log": "...", "outputs": '
'{"ssbo":[[88]]}}'))
def test_fuzzydiff_fail_mismatched_field_length(tmp_path: pathlib2.Path):
assert 0 != check_exact_diff(tmp_path, (
'{"status": "SUCCESS", "log": "...", "outputs": '
'{"ssbo":[[88],[28,12,14,14,18,16,18,18,28,22,24,24,28,26]]}}'), (
'{"status": "SUCCESS", "log": "...", "outputs": '
'{"ssbo":[[88],[28,12,14,14,18,16,18,18,28,22,24,24,28]]}}'))
def test_fuzzydiff_fail_mismatched_field_element(tmp_path: pathlib2.Path):
float1 = 88.0
float2 = 1e+6
float3 = 1.3e-6
float4 = 0.0
float1ish = float1 + 0.0000001
float2ish = float2 + 1.0
float3ish = float3 + 1e-12
float4ish = float4 + 1e-4 ## Too big a difference
assert float1 != float1ish
assert float2 != float2ish
assert float3 != float3ish
assert float4 != float4ish
output1 = ('{"status": "SUCCESS", "log": "...", "outputs": {"ssbo":[[' + str(float1) + '],['
+ str(float2) + ',' + str(float3) + ',' + str(float4) + ']]}}')
output2 = ('{"status": "SUCCESS", "log": "...", "outputs": {"ssbo":[[' + str(float1ish) + '],['
+ str(float2ish) + ',' + str(float3ish) + ',' + str(float4ish) + ']]}}')
assert 0 != check_exact_diff(tmp_path, output1, output2)
assert 0 != check_fuzzy_diff(tmp_path, output1, output2,
extra_args=['--rel_tol=1e-06', '--abs_tol=1e-06'])
def test_bad_rel_tol():
with pytest.raises(ValueError) as value_error:
inspect_compute_results.main_helper(['fuzzydiff', '1.json', '2.json', '--rel_tol=notafloat'])
assert 'ValueError: Positive floating-point value required for --rel_tol argument'\
in str(value_error)
def test_bad_rel_tol2():
with pytest.raises(ValueError) as value_error:
inspect_compute_results.main_helper(['fuzzydiff', '1.json', '2.json', '--rel_tol=0.0'])
assert 'ValueError: Positive floating-point value required for --rel_tol argument'\
in str(value_error)
def test_bad_rel_tol3():
with pytest.raises(ValueError) as value_error:
inspect_compute_results.main_helper(['fuzzydiff', '1.json', '2.json', '--rel_tol=-0.1'])
assert 'ValueError: Positive floating-point value required for --rel_tol argument'\
in str(value_error)
def test_bad_abs_tol():
with pytest.raises(ValueError) as value_error:
inspect_compute_results.main_helper(['fuzzydiff', '1.json', '2.json', '--abs_tol=notafloat'])
assert 'ValueError: Non-negative floating-point value required for --abs_tol argument'\
in str(value_error)
def test_bad_abs_tol2():
with pytest.raises(ValueError) as value_error:
inspect_compute_results.main_helper(['fuzzydiff', '1.json', '2.json', '--abs_tol=-0.1'])
assert 'ValueError: Non-negative floating-point value required for --abs_tol argument'\
in str(value_error)
| |
# Copyright 2010 OpenStack Foundation
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import os
import shutil
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
import requests
import sendfile
import six
import six.moves.urllib.parse as urlparse
from ironic.common import exception
from ironic.common.i18n import _
LOG = logging.getLogger(__name__)
IMAGE_CHUNK_SIZE = 1024 * 1024 # 1mb
CONF = cfg.CONF
# Import this opt early so that it is available when registering
# glance_opts below.
CONF.import_opt('my_ip', 'ironic.netconf')
glance_opts = [
cfg.StrOpt('glance_host',
default='$my_ip',
help='Default glance hostname or IP address.'),
cfg.IntOpt('glance_port',
default=9292,
help='Default glance port.'),
cfg.StrOpt('glance_protocol',
default='http',
help='Default protocol to use when connecting to glance. '
'Set to https for SSL.'),
cfg.ListOpt('glance_api_servers',
help='A list of the glance api servers available to ironic. '
'Prefix with https:// for SSL-based glance API servers. '
'Format is [hostname|IP]:port.'),
cfg.BoolOpt('glance_api_insecure',
default=False,
help='Allow to perform insecure SSL (https) requests to '
'glance.'),
cfg.IntOpt('glance_num_retries',
default=0,
help='Number of retries when downloading an image from '
'glance.'),
cfg.StrOpt('auth_strategy',
default='keystone',
help='Authentication strategy to use when connecting to '
'glance. Only "keystone" and "noauth" are currently '
'supported by ironic.'),
]
CONF.register_opts(glance_opts, group='glance')
def import_versioned_module(version, submodule=None):
module = 'ironic.common.glance_service.v%s' % version
if submodule:
module = '.'.join((module, submodule))
return importutils.try_import(module)
def GlanceImageService(client=None, version=1, context=None):
module = import_versioned_module(version, 'image_service')
service_class = getattr(module, 'GlanceImageService')
return service_class(client, version, context)
@six.add_metaclass(abc.ABCMeta)
class BaseImageService(object):
"""Provides retrieval of disk images."""
@abc.abstractmethod
def validate_href(self, image_href):
"""Validate image reference.
:param image_href: Image reference.
:raises: exception.ImageRefValidationFailed.
:returns: Information needed to further operate with an image.
"""
@abc.abstractmethod
def download(self, image_href, image_file):
"""Downloads image to specified location.
:param image_href: Image reference.
:param image_file: File object to write data to.
:raises: exception.ImageRefValidationFailed.
:raises: exception.ImageDownloadFailed.
"""
@abc.abstractmethod
def show(self, image_href):
"""Get dictionary of image properties.
:param image_href: Image reference.
:raises: exception.ImageRefValidationFailed.
:returns: dictionary of image properties.
"""
class HttpImageService(BaseImageService):
"""Provides retrieval of disk images using HTTP."""
def validate_href(self, image_href):
"""Validate HTTP image reference.
:param image_href: Image reference.
:raises: exception.ImageRefValidationFailed if HEAD request failed or
returned response code not equal to 200.
:returns: Response to HEAD request.
"""
try:
response = requests.head(image_href)
if response.status_code != 200:
raise exception.ImageRefValidationFailed(image_href=image_href,
reason=_("Got HTTP code %s instead of 200 in response to "
"HEAD request.") % response.status_code)
except requests.RequestException as e:
raise exception.ImageRefValidationFailed(image_href=image_href,
reason=e)
return response
def download(self, image_href, image_file):
"""Downloads image to specified location.
:param image_href: Image reference.
:param image_file: File object to write data to.
:raises: exception.ImageRefValidationFailed if GET request returned
response code not equal to 200.
:raises: exception.ImageDownloadFailed if:
* IOError happened during file write;
* GET request failed.
"""
try:
response = requests.get(image_href, stream=True)
if response.status_code != 200:
raise exception.ImageRefValidationFailed(image_href=image_href,
reason=_("Got HTTP code %s instead of 200 in response to "
"GET request.") % response.status_code)
with response.raw as input_img:
shutil.copyfileobj(input_img, image_file, IMAGE_CHUNK_SIZE)
except (requests.RequestException, IOError) as e:
raise exception.ImageDownloadFailed(image_href=image_href,
reason=e)
def show(self, image_href):
"""Get dictionary of image properties.
:param image_href: Image reference.
:raises: exception.ImageRefValidationFailed if:
* HEAD request failed;
* HEAD request returned response code not equal to 200;
* Content-Length header not found in response to HEAD request.
:returns: dictionary of image properties.
"""
response = self.validate_href(image_href)
image_size = response.headers.get('Content-Length')
if image_size is None:
raise exception.ImageRefValidationFailed(image_href=image_href,
reason=_("Cannot determine image size as there is no "
"Content-Length header specified in response "
"to HEAD request."))
return {
'size': int(image_size),
'properties': {}
}
class FileImageService(BaseImageService):
"""Provides retrieval of disk images available locally on the conductor."""
def validate_href(self, image_href):
"""Validate local image reference.
:param image_href: Image reference.
:raises: exception.ImageRefValidationFailed if source image file
doesn't exist.
:returns: Path to image file if it exists.
"""
image_path = urlparse.urlparse(image_href).path
if not os.path.isfile(image_path):
raise exception.ImageRefValidationFailed(image_href=image_href,
reason=_("Specified image file not found."))
return image_path
def download(self, image_href, image_file):
"""Downloads image to specified location.
:param image_href: Image reference.
:param image_file: File object to write data to.
:raises: exception.ImageRefValidationFailed if source image file
doesn't exist.
:raises: exception.ImageDownloadFailed if exceptions were raised while
writing to file or creating hard link.
"""
source_image_path = self.validate_href(image_href)
dest_image_path = image_file.name
local_device = os.stat(dest_image_path).st_dev
try:
# We should have read and write access to source file to create
# hard link to it.
if (local_device == os.stat(source_image_path).st_dev and
os.access(source_image_path, os.R_OK | os.W_OK)):
image_file.close()
os.remove(dest_image_path)
os.link(source_image_path, dest_image_path)
else:
filesize = os.path.getsize(source_image_path)
with open(source_image_path, 'rb') as input_img:
sendfile.sendfile(image_file.fileno(), input_img.fileno(),
0, filesize)
except Exception as e:
raise exception.ImageDownloadFailed(image_href=image_href,
reason=e)
def show(self, image_href):
"""Get dictionary of image properties.
:param image_href: Image reference.
:raises: exception.ImageRefValidationFailed if image file specified
doesn't exist.
:returns: dictionary of image properties.
"""
source_image_path = self.validate_href(image_href)
return {
'size': os.path.getsize(source_image_path),
'properties': {}
}
protocol_mapping = {
'http': HttpImageService,
'https': HttpImageService,
'file': FileImageService,
'glance': GlanceImageService,
}
def get_image_service(image_href, client=None, version=1, context=None):
"""Get image service instance to download the image.
:param image_href: String containing href to get image service for.
:param client: Glance client to be used for download, used only if
image_href is Glance href.
:param version: Version of Glance API to use, used only if image_href is
Glance href.
:param context: request context, used only if image_href is Glance href.
:raises: exception.ImageRefValidationFailed if no image service can
handle specified href.
:returns: Instance of an image service class that is able to download
specified image.
"""
scheme = urlparse.urlparse(image_href).scheme.lower()
try:
cls = protocol_mapping[scheme or 'glance']
except KeyError:
raise exception.ImageRefValidationFailed(
image_href=image_href,
reason=_('Image download protocol '
'%s is not supported.') % scheme
)
if cls == GlanceImageService:
return cls(client, version, context)
return cls()
| |
'''PipelineWindows - Tasks for window based read distribution analysis
======================================================================
Requirements:
* bedtools >= 2.21.0
* picardtools >= 1.106
* samtools >= 1.1
* MEDIPS >= 1.15.0
Reference
---------
'''
import os
import re
import collections
import pandas
import math
import numpy
import numpy.ma as ma
import itertools
import CGAT.Experiment as E
import CGATPipelines.Pipeline as P
import CGAT.BamTools as BamTools
import CGAT.IOTools as IOTools
import CGAT.Expression as Expression
import CGAT.Bed as Bed
def convertReadsToIntervals(bamfile,
bedfile,
filtering_quality=None,
filtering_dedup=None,
filtering_dedup_method='picard',
filtering_nonunique=False):
'''convert reads in *bamfile* to *intervals*.
This method converts read data into intervals for
counting based methods.
This method is not appropriate for RNA-Seq.
Optional steps include:
For paired end data, pairs are merged and optionally
filtered by insert size.
Arguments
---------
bamfile : string
Filename of input file in :term:`bam` format.
bedfile : string
Filename of output file in :term:`bed` format.
filtering_quality : int
If set, remove reads with a quality score below given threshold.
filtering_dedup : bool
If True, deduplicate data.
filtering_dedup_method : string
Deduplication method. Possible options are ``picard`` and
``samtools``.
filtering_nonunique : bool
If True, remove non-uniquely matching reads.
'''
track = P.snip(bedfile, ".bed.gz")
is_paired = BamTools.isPaired(bamfile)
current_file = bamfile
tmpdir = P.getTempFilename()
os.unlink(tmpdir)
statement = ["mkdir %(tmpdir)s"]
nfiles = 0
if filtering_quality > 0:
next_file = "%(tmpdir)s/bam_%(nfiles)i.bam" % locals()
statement.append('''samtools view
-q %(filtering_quality)i -b
%(current_file)s
2>> %%(bedfile)s.quality.log
> %(next_file)s ''' % locals())
nfiles += 1
current_file = next_file
if filtering_nonunique:
next_file = "%(tmpdir)s/bam_%(nfiles)i.bam" % locals()
statement.append('''cat %(current_file)s
| cgat bam2bam
--method=filter
--filter-method=unique,mapped
--log=%%(bedfile)s.nonunique.log
> %(next_file)s ''' % locals())
nfiles += 1
current_file = next_file
if filtering_dedup is not None:
# Picard's MarkDuplicates requries an explicit bam file.
next_file = "%(tmpdir)s/bam_%(nfiles)i.bam" % locals()
if filtering_dedup_method == 'samtools':
statement.append('''samtools rmdup - - ''')
elif filtering_dedup_method == 'picard':
statement.append('''picard MarkDuplicates
INPUT=%(current_file)s
OUTPUT=%(next_file)s
ASSUME_SORTED=TRUE
METRICS_FILE=%(bedfile)s.duplicate_metrics
REMOVE_DUPLICATES=TRUE
VALIDATION_STRINGENCY=SILENT
2>> %%(bedfile)s.markdup.log ''' % locals())
nfiles += 1
current_file = next_file
if is_paired:
statement.append('''cat %(current_file)s
| cgat bam2bed
--merge-pairs
--min-insert-size=%(filtering_min_insert_size)i
--max-insert-size=%(filtering_max_insert_size)i
--log=%(bedfile)s.bam2bed.log
-
| cgat bed2bed
--method=sanitize-genome
--genome-file=%(genome_dir)s/%(genome)s
--log=%(bedfile)s.sanitize.log
| cut -f 1,2,3,4
| sort -k1,1 -k2,2n
| bgzip > %(bedfile)s''')
else:
statement.append('''cat %(current_file)s
| cgat bam2bed
--log=%(bedfile)s.bam2bed.log
-
| cgat bed2bed
--method=sanitize-genome
--genome-file=%(genome_dir)s/%(genome)s
--log=%(bedfile)s.sanitize.log
| cut -f 1,2,3,4
| sort -k1,1 -k2,2n
| bgzip > %(bedfile)s''')
statement.append("tabix -p bed %(bedfile)s")
statement.append("rm -rf %(tmpdir)s")
statement = " ; checkpoint; ".join(statement)
P.run()
def countTags(infile, outfile):
'''count number of tags in bed-file.
`outfile` will contain the number of tags in `infile`
counted per chromosome.
Arguments
=========
infile : string
Input filename in :term:`bed` format
outfile : string
Output filename in :term:`tsv` format.
'''
statement = '''zcat %(infile)s
| cgat bed2stats
--per-contig
--log=%(outfile)s.log
>& %(outfile)s'''
P.run()
def countTagsWithinWindows(tagfile,
windowfile,
outfile,
counting_method="midpoint",
job_memory="4G"):
'''count tags within windows.
Counting is done using bedtools.
Arguments
---------
tagfile : string
Filename with tags to be counted in :term:`bed` format.
windowfile : string
Filename with windows in :term:`bed` format.
outfile : outfile
Output filename in :term:`bed` format.
counting_method : string
Counting method to use. Possible values are ``nucleotide``
and ``midpoint``.
midpoint counts the number of reads overlapping the midpoint of the
window by at least one base
nucleotide counts the number of reads overlapping the window by at
least one base.
job_memory : string
Amount of memory to allocate.
'''
if counting_method == "midpoint":
f = '''| awk '{a = $2+($3-$2)/2;
printf("%s\\t%i\\t%i\\n", $1, a, a+1)}' '''
elif counting_method == "nucleotide":
f = ""
else:
raise ValueError("unknown counting method: %s" % counting_method)
# Note that in version 2.26, coverage changed from reporting
# A on B to B on A.
statement = '''
zcat %(tagfile)s
%(f)s
| bedtools coverage -a %(windowfile)s -b stdin -split
| sort -k1,1 -k2,2n -k3,3n -k4,4
| gzip
> %(outfile)s
'''
P.run()
def aggregateWindowsTagCounts(infiles,
outfile,
regex="(.*)\..*"):
'''aggregate output from several ``bedtools coverage`` results.
``bedtools coverage`` outputs the following columns for a bed4
file::
1 Contig
2 Start
3 Stop
4 Name
5 The number of features in A that overlapped (by at least one
base pair) the B interval.
6 The number of bases in B that had non-zero coverage from features in A.
7 The length of the entry in B.
8 The fraction of bases in B that had non-zero coverage from
features in A.
This method autodetects the number of columns in the :term:`infiles`
and selects:
* bed4: use column 5
* bed6: use column 7
* bed12: use column 13
Arguments
---------
infiles : list
Input filenames with the output from ``bedtools coverage``
outfile : string
Output filename in :term:`tsv` format.
regex : string
Regular expression used to extract the track name from the
filename. The default removes any suffix.
'''
# get bed format
bed_columns = Bed.getNumColumns(infiles[0])
# +1 as awk is 1-based
column = bed_columns - 4 + 1
src = " ".join(["""<( zcat %s |
awk '{printf("%%s:%%i-%%i\\t%%i\\n", $1,$2,$3,$%s );}')""" %
(x, column) for x in infiles])
tmpfile = P.getTempFilename(".")
statement = '''paste %(src)s > %(tmpfile)s'''
P.run()
# build track names
tracks = [re.search(regex, os.path.basename(x)).groups()[0]
for x in infiles]
outf = IOTools.openFile(outfile, "w")
outf.write("interval_id\t%s\n" % "\t".join(tracks))
# filter for uniqueness - keys with the same value as the
# previous line will be ignored.
last_gene = None
c = E.Counter()
for line in open(tmpfile, "r"):
c.input += 1
data = line[:-1].split("\t")
genes = list(set([data[x] for x in range(0, len(data), 2)]))
values = [int(data[x]) for x in range(1, len(data), 2)]
assert len(genes) == 1, \
"paste command failed, wrong number of genes per line: '%s'" % line
if genes[0] == last_gene:
c.duplicates += 1
continue
c.output += 1
outf.write("%s\t%s\n" % (genes[0], "\t".join(map(str, values))))
last_gene = genes[0]
outf.close()
os.unlink(tmpfile)
E.info("aggregateWindowsTagCounts: %s" % c)
def normalizeTagCounts(infile, outfile, method):
'''normalize Tag counts
Parameters
----------
infile : string
Input filename of file with counts.
outfile : string
Output filename with normalized counts.
method : string
Method to use for normalization.
can be deseq-size factors, total-column, total-row, total-count
deseq-size-factors - use normalisation implemented in DEseq
total-column - divide counts by column total
total-row - divide counts by the value in a row called 'total'
total-count - normalised all values in column by the ratio of the
per column sum of counts and the average column count
across all rows.
'''
statement = '''
zcat %(infile)s
| cgat counts2counts
--method=normalize
--normalization-method=%(method)s
--log=%(outfile)s.log
| gzip
> %(outfile)s
'''
P.run()
def buildDMRStats(infiles, outfile, method, fdr_threshold=None):
'''build dmr summary statistics.
This method works from output files created by Expression.py
(method="deseq" or method="edger") or runMEDIPS (method="medips")
This method counts the number of up/down, 2fold up/down, etc.
genes in output from (:mod:`scripts/runExpression`).
This method also creates diagnostic plots in the
<exportdir>/<method> directory.
Arguments
---------
infiles ; list
List of tabs with DMR output
outfile : string
Output filename. Tab separated file summarizing
method : string
Method name
fdr_threshold : float
FDR threshold to apply. Currently unused.
'''
results = collections.defaultdict(lambda: collections.defaultdict(int))
status = collections.defaultdict(lambda: collections.defaultdict(int))
# deseq/edger
def f_significant(x):
return x.significant == "1"
def f_up(x):
return float(x.l2fold) > 0
def f_down(x):
return float(x.l2fold) < 0
def f_fold2up(x):
return float(x.l2fold) > 1
def f_fold2down(x):
return float(x.l2fold) < -1
def f_key(x):
return (x.treatment_name, x.control_name)
def f_status(x):
return x.status
outf = IOTools.openFile(outfile, "w")
is_first = True
for infile in infiles:
xx = 0
for line in IOTools.iterate(IOTools.openFile(infile)):
key = f_key(line)
r, s = results[key], status[key]
r["tested"] += 1
ss = f_status(line)
s[ss] += 1
if ss != "OK":
continue
is_significant = f_significant(line)
up = f_up(line)
down = f_down(line)
fold2up = f_fold2up(line)
fold2down = f_fold2down(line)
fold2 = fold2up or fold2down
if up:
r["up"] += 1
if down:
r["down"] += 1
if fold2up:
r["l2fold_up"] += 1
if fold2down:
r["l2fold_down"] += 1
if is_significant:
r["significant"] += 1
if up:
r["significant_up"] += 1
if down:
r["significant_down"] += 1
if fold2:
r["fold2"] += 1
if fold2up:
r["significant_l2fold_up"] += 1
if fold2down:
r["significant_l2fold_down"] += 1
if xx > 10000:
break
if is_first:
is_first = False
header1, header2 = set(), set()
for r in list(results.values()):
header1.update(list(r.keys()))
for s in list(status.values()):
header2.update(list(s.keys()))
header = ["method", "treatment", "control"]
header1 = list(sorted(header1))
header2 = list(sorted(header2))
outf.write("\t".join(header + header1 + header2) + "\n")
for treatment, control in list(results.keys()):
key = (treatment, control)
r = results[key]
s = status[key]
outf.write("%s\t%s\t%s\t" % (method, treatment, control))
outf.write("\t".join([str(r[x]) for x in header1]) + "\t")
outf.write("\t".join([str(s[x]) for x in header2]) + "\n")
def buildFDRStats(infile, outfile, method):
'''compute number of windows called at different FDR.
.. note::
This method is incomplete
Arguments
---------
infile : string
Input filename in :term:`tsv` format. Typically the output
from :mod:`scripts/runExpression`.
outfile : string
Output filename in :term:`tsv` format.
method : string
Method name.
'''
raise NotImplementedError("function is incomplete")
data = pandas.read_csv(IOTools.openFile(infile), sep="\t", index_col=0)
assert data['treatment_name'][0] == data['treatment_name'][-1]
assert data['control_name'][0] == data['control_name'][-1]
treatment_name, control_name = data[
'treatment_name'][0], data['control_name'][0]
key = (treatment_name, control_name)
fdrs = (0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)
for fdr in fdrs:
print("fdr")
take = data['qvalue'] <= fdr
significant = sum(take)
print(significant)
def outputAllWindows(infile, outfile):
'''output all windows as a bed file with the l2fold change
as a score.
Arguments
---------
infile : string
Input filename in :term:`tsv` format. Typically the output
from :mod:`scripts/runExpression`.
outfile : string
Output filename in :term:`bed` format.
'''
outf = IOTools.openFile(outfile, "w")
for line in IOTools.iterate(IOTools.openFile(infile)):
outf.write("\t".join(
(line.contig, line.start, line.end,
"%6.4f" % float(line.l2fold))) + "\n")
outf.close()
def outputRegionsOfInterest(design_file, counts_file, outfile,
max_per_sample=10, sum_per_group=40):
'''output windows according to various filters.
The output is a mock analysis similar to a differential expression
result.
Arguments
---------
design_file : string
Filename with experimental design
counts_file : string
:term:`tsv` formatted file with counts per windows
outfile : string
Output filename in :term:`tsv` format
max_per_sample : int
Remove samples with more than threshold counts
sum_per_group : int
Minimum counts per group.
'''
job_memory = "64G"
design = Expression.readDesignFile(design_file)
# remove tracks not included in the design
design = dict([(x, y) for x, y in list(design.items()) if y.include])
# define the two groups
groups = sorted(set([x.group for x in list(design.values())]))
# build a filtering statement
groupA, groupB = groups
def _buildMax(g, threshold):
selected = [x for x, y in list(design.items()) if y.group == g]
if len(selected) > 1:
return "max((%s)) < %f" % (
",".join(
["int(r['%s'])" % x for x in selected]),
threshold)
elif len(selected) == 1:
return "int(r['%s']) < %f" % (selected[0], threshold)
else:
raise ValueError("no groups found for 'g'" % g)
def _buildSum(g, threshold):
selected = [x for x, y in list(design.items()) if y.group == g]
if len(selected) > 1:
return "sum((%s)) > %f" % (
",".join(
["int(r['%s'])" % x for x in selected]),
threshold)
elif len(selected) == 1:
return "int(r['%s']) > %f" % (selected[0], threshold)
else:
raise ValueError("no groups found for 'g'" % g)
upper_levelA = _buildMax(groupA, max_per_sample)
upper_levelB = _buildMax(groupB, max_per_sample)
sum_levelA = _buildSum(groupA, sum_per_group)
sum_levelB = _buildSum(groupB, sum_per_group)
statement = '''
zcat %(counts_file)s
| cgat csv_select
--log=%(outfile)s.log
"(%(upper_levelA)s and %(sum_levelB)s) or
(%(upper_levelB)s and %(sum_levelA)s)"
| cgat runExpression
--log=%(outfile)s.log
--design-tsv-file=%(design_file)s
--tags-tsv-file=-
--method=mock
--filter-min-counts-per-sample=0
| gzip
> %(outfile)s
'''
P.run()
def runDE(design_file,
counts_file,
outfile,
outdir,
method="deseq",
spike_file=None):
'''run DESeq, DESeq2 or EdgeR through :mod:`scripts/runExpression.py`
The job is split into smaller sections. The order of the input
data is randomized in order to avoid any biases due to chromosomes
and break up local correlations.
At the end, a q-value is computed from all results.
Arguments
---------
design_file : string
Filename with experimental design
counts_file : string
:term:`tsv` formatted file with counts per windows
outfile : string
Output filename in :term:`tsv` format.
outdir : string
Directory for additional output files.
method : string
Method to use. See :mod:`scripts/runExpression.py`.
spike_file : string
Filename with spike-in data to add before processing.
'''
if spike_file is None:
statement = "zcat %(counts_file)s"
else:
statement = '''cgat combine_tables
--missing-value=0
--cat=filename
--log=%(outfile)s.log
%(counts_file)s %(spike_file)s
| cgat csv_cut
--remove filename
--log=%(outfile)s.log
'''
prefix = IOTools.snip(os.path.basename(outfile))
E.info(prefix)
# the post-processing strips away the warning,
# renames the qvalue column to old_qvalue
# and adds a new qvalue column after recomputing
# over all windows.
statement += '''
| cgat randomize_lines --keep-header=1
| %(cmd-farm)s
--input-header
--output-header
--split-at-lines=200000
--cluster-options="-l mem_free=16G"
--log=%(outfile)s.log
--output-filename-pattern=%(outdir)s/%%s
--subdirs
--output-regex-header="^test_id"
"cgat runExpression
--method=%(method)s
--tags-tsv-file=-
--design-tsv-file=%(design_file)s
--output-filename-pattern=%%DIR%%%(prefix)s_
--deseq-fit-type=%(deseq_fit_type)s
--deseq-dispersion-method=%(deseq_dispersion_method)s
--deseq-sharing-mode=%(deseq_sharing_mode)s
--edger-dispersion=%(edger_dispersion)f
--deseq2-design-formula=%(deseq2_model)s
--deseq2-contrasts=%(deseq2_contrasts)s
--filter-min-counts-per-row=%(tags_filter_min_counts_per_row)i
--filter-min-counts-per-sample=%(tags_filter_min_counts_per_sample)i
--filter-percentile-rowsums=%(tags_filter_percentile_rowsums)i
--log=%(outfile)s.log
--fdr=%(edger_fdr)f
--deseq2-plot=0"
| perl -p -e "s/qvalue/old_qvalue/"
| cgat table2table
--log=%(outfile)s.log
--method=fdr
--column=pvalue
--fdr-method=BH
--fdr-add-column=qvalue
| gzip
> %(outfile)s '''
E.info(statement)
P.run()
def normalizeBed(countsfile, outfile):
'''normalize counts in a bed file to total library size.
Use :func:`Pipeline.submit` to send to cluster.
Arguments
---------
countsfile : string
Filename with count data in :term:`tsv` format
outfile : string
Output filename in :term:`bedGraph` format.
'''
bed_frame = pandas.read_table(countsfile,
sep="\t",
compression="gzip",
header=0,
index_col=0)
# normalize count column by total library size
# have to explicitly convert data_frame to numpy
# array with int64/float64 data type. Otherwise
# numpy.log will through an Attribute error (wrong
# error to report) as it cannot handle python longs
bed_frame = bed_frame.fillna(0.0)
val_array = numpy.array(bed_frame.values, dtype=numpy.int64)
geom_mean = geoMean(val_array)
ratio_frame = bed_frame.apply(lambda x: x / geom_mean,
axis=0)
size_factors = ratio_frame.apply(numpy.median,
axis=0)
normalize_frame = bed_frame / size_factors
# replace infs and -infs with Nas, then 0s
normalize_frame.replace([numpy.inf, -numpy.inf], numpy.nan, inplace=True)
normalize_frame = normalize_frame.fillna(0.0)
normalize_frame.to_csv(outfile, sep="\t", index_label="interval")
def geoMean(array):
'''
Generate the geometric mean of a list or array,
removing all zero-values but retaining total length
'''
if isinstance(array, pandas.core.frame.DataFrame):
array = array.as_matrix()
else:
pass
non_zero = ma.masked_values(array,
0)
log_a = ma.log(non_zero)
geom_mean = ma.exp(log_a.mean())
return geom_mean
def enrichmentVsInput(infile, outfile):
'''
Calculate the fold enrichment of the test data
vs. the input data
Parameters
----------
infile: list
list of filenames
infile[0]: str
filename of normalised :term:`bedGraph` file showing counts in
the input
infile[1]: str
filename of normalised :term:`bedGraph` files showing
counts in each experiment
outfile: str
filename of output :term:`bedGraph` file
'''
test_frame = pandas.read_table(infile[1],
sep="\t",
compression="gzip",
header=None,
index_col=None)
input_frame = pandas.read_table(infile[0],
sep="\t",
compression="gzip",
header=None,
index_col=None)
merge_frame = pandas.merge(test_frame,
input_frame,
how='left',
left_on=[0, 1, 2],
right_on=[0, 1, 2])
def foldchange(x):
return math.log((x['3_y'] + 1.0) / (x['3_x'] + 1.0), 2)
merge_frame[4] = merge_frame.apply(foldchange, axis=1)
out_frame = merge_frame[[0, 1, 2, 4]]
out_frame.to_csv(outfile,
sep="\t",
header=None,
index=None)
def runMEDIPSQC(infile, outfile):
'''run QC using the MEDIPS package.
The QC data will be stored in the directory
:file:`./medips.dir`
Arguments
---------
infile : string
Filename of :term:`bam` formatted file
outfile : string
Output filename. Containts logging information.
'''
# note that the wrapper adds the filename
# to the output filenames.
job_memory = "10G"
statement = """cgat runMEDIPS
--ucsc-genome=%(medips_genome)s
--treatment=%(infile)s
--toolset=saturation
--toolset=coverage
--toolset=enrichment
--shift=%(medips_shift)s
--extend=%(medips_extension)s
--output-filename-pattern="medips.dir/%%s"
--log=%(outfile)s.log
| gzip
> %(outfile)s
"""
P.run()
def runMEDIPSDMR(design_file, outfile):
'''run differential methylation analysis using MEDIPS package.
Arguments
---------
infile : string
Filename of :term:`bam` formatted file
outfile : string
Output filename in :term:`tsv` format.
'''
job_memory = "30G"
design = Expression.readDesignFile(design_file)
# remove data tracks not needed
design = [(x, y) for x, y in list(design.items()) if y.include]
# build groups
groups = set([y.group for x, y in design])
statements = []
for pair1, pair2 in itertools.combinations(groups, 2):
treatment = ["%s.bam" % x for x, y in design if y.group == pair1]
control = ["%s.bam" % x for x, y in design if y.group == pair2]
treatment = ",".join(treatment)
control = ",".join(control)
# outfile contains directory prefix
statements.append(
"""cgat runMEDIPS
--ucsc-genome=%(medips_genome)s
--treatment=%(treatment)s
--control=%(control)s
--toolset=dmr
--shift=%(medips_shift)s
--extend=%(medips_extension)s
--window-size=%(medips_window_size)i
--output-filename-pattern="%(outfile)s_%(pair1)s_vs_%(pair2)s_%%s"
--fdr-threshold=%(medips_fdr)f
--log=%(outfile)s.log
> %(outfile)s.log2;
checkpoint;
zcat %(outfile)s_%(pair1)s_vs_%(pair2)s_data.tsv.gz
| cgat runMEDIPS
--treatment=%(pair1)s
--control=%(pair2)s
--toolset=convert
--fdr-threshold=%(medips_fdr)f
--log=%(outfile)s.log
| gzip
> %(outfile)s
""")
P.run()
@P.cluster_runnable
def outputSpikeCounts(outfile, infile_name,
expression_nbins=None,
fold_nbins=None,
expression_bins=None,
fold_bins=None):
"""count significant results in bins of expression and fold change.
This method groups the results of a DE analysis in a 2-dimensonal
histogramy by tag counts/expression level and fold change.
Either supply one of `nbins` or `bins` for the histograms.
Arguments
---------
outfile : string
Output filename
infile_name : string
Input filename in :term:`tsv` format. Usually the output of
:mod:`scripts/runExpression`.
expression_nbins : int
Number of bins to use for tag count histogram.
fold_nbins : int
Number of bins to use for fold-change histogram.
expression_bins : list
List of bins to use for tag count histogram.
fold_bins : list
List of bins to use for fold-change histogram.
"""
df = pandas.read_csv(infile_name,
sep="\t",
index_col=0)
E.debug("read %i rows and %i columns of data" % df.shape)
if "edger" in outfile.lower():
# edger: treatment_mean and control_mean do not exist
# use supplied values directly.
l10average = numpy.log(df['treatment_mean'])
l2fold = numpy.log2(df['fold'])
else:
# use pseudocounts to compute fold changes
treatment_mean = df['treatment_mean'] + 1
control_mean = df['control_mean'] + 1
# build log2 average values
l10average = numpy.log((treatment_mean + control_mean) / 2)
l2fold = numpy.log2(treatment_mean / control_mean)
if expression_nbins is not None:
mm = math.ceil(max(l10average))
expression_bins = numpy.arange(0, mm, mm / expression_nbins)
if fold_nbins is not None:
mm = math.ceil(max(abs(min(l2fold)), abs(max(l2fold))))
# ensure that range is centered on exact 0
n = math.ceil(fold_nbins / 2.0)
fold_bins = numpy.concatenate(
(-numpy.arange(0, mm, mm / n)[:0:-1],
numpy.arange(0, mm, mm / n)))
# compute expression bins
d2hist_counts, xedges, yedges = numpy.histogram2d(
l10average, l2fold,
bins=(expression_bins,
fold_bins))
dd = pandas.DataFrame(d2hist_counts)
dd.index = list(xedges[:-1])
dd.columns = list(yedges[:-1])
dd.to_csv(IOTools.openFile(outfile, "w"),
sep="\t")
return df, d2hist_counts, xedges, yedges, l10average, l2fold
@P.cluster_runnable
def plotDETagStats(infile, composition_file, outfile):
'''plot differential expression statistics
Arguments
---------
infile : string
Filename with :term:`tsv` formatted list of differential
methylation results output from :doc:`scripts/runExpression`.
composition_file : string
Filename with :term:`tsv` formatted data about nucleotide
compositions of windows tested.
outfile : string
Output filename, used as sentinel only.
'''
Expression.plotDETagStats(
infile, outfile,
additional_file=composition_file,
join_columns=("contig", "start", "end"),
additional_columns=("CpG_density",
"length"))
P.touch(outfile)
@P.cluster_runnable
def buildSpikeResults(infile, outfile):
'''build matrices with results from spike-in and upload
into database.
The method will output several files:
.spiked.gz: Number of intervals that have been spiked-in
for each bin of expression and fold-change
.power.gz: Global power analysis - aggregates over all
ranges of fold-change and expression and outputs the
power, the proportion of intervals overall that
could be detected as differentially methylated.
This is a table with the following columns:
fdr - fdr threshold
power - power level, number of intervals detectable
intervals - number of intervals in observed data at given
level of fdr and power.
intervals_percent - percentage of intervals in observed data
at given level of fdr and power
The method will also upload the results into the database.
Arguments
---------
infile : string
Input filename in :term:`tsv` format. Usually the output of
:mod:`scripts/runExpression`.
outfile : string
Output filename in :term:`tsv` format.
'''
expression_nbins = 10
fold_nbins = 10
spikefile = P.snip(infile, '.tsv.gz') + '.spike.gz'
if not os.path.exists(spikefile):
E.warn('no spike data: %s' % spikefile)
P.touch(outfile)
return
########################################
# output and load spiked results
tmpfile_name = P.getTempFilename(shared=True)
statement = '''zcat %(spikefile)s
| grep -e "^spike" -e "^test_id"
> %(tmpfile_name)s
'''
P.run()
E.debug("outputting spiked counts")
(spiked, spiked_d2hist_counts, xedges, yedges,
spiked_l10average, spiked_l2fold) = \
outputSpikeCounts(
outfile=P.snip(outfile, ".power.gz") + ".spiked.gz",
infile_name=tmpfile_name,
expression_nbins=expression_nbins,
fold_nbins=fold_nbins)
########################################
# output and load unspiked results
statement = '''zcat %(infile)s
| grep -v -e "^spike"
> %(tmpfile_name)s
'''
P.run()
E.debug("outputting unspiked counts")
(unspiked, unspiked_d2hist_counts, unspiked_xedges,
unspiked_yedges, unspiked_l10average, unspiked_l2fold) = \
outputSpikeCounts(
outfile=P.snip(outfile, ".power.gz") + ".unspiked.gz",
infile_name=tmpfile_name,
expression_bins=xedges,
fold_bins=yedges)
E.debug("computing power")
assert xedges.all() == unspiked_xedges.all()
tmpfile = IOTools.openFile(tmpfile_name, "w")
tmpfile.write("\t".join(
("expression",
"fold",
"fdr",
"counts",
"percent")) + "\n")
fdr_thresholds = [0.01, 0.05] + list(numpy.arange(0.1, 1.0, 0.1))
power_thresholds = numpy.arange(0.1, 1.1, 0.1)
spiked_total = float(spiked_d2hist_counts.sum().sum())
unspiked_total = float(unspiked_d2hist_counts.sum().sum())
outf = IOTools.openFile(outfile, "w")
outf.write("fdr\tpower\tintervals\tintervals_percent\n")
# significant results
for fdr in fdr_thresholds:
take = spiked['qvalue'] < fdr
# compute 2D histogram in spiked data below fdr threshold
spiked_d2hist_fdr, xedges, yedges = \
numpy.histogram2d(spiked_l10average[take],
spiked_l2fold[take],
bins=(xedges, yedges))
# convert to percentage of spike-ins per bin
spiked_d2hist_fdr_normed = spiked_d2hist_fdr / spiked_d2hist_counts
spiked_d2hist_fdr_normed = numpy.nan_to_num(spiked_d2hist_fdr_normed)
# set values without data to -1
spiked_d2hist_fdr_normed[spiked_d2hist_counts == 0] = -1.0
# output to table for database upload
for x, y in itertools.product(list(range(len(xedges) - 1)),
list(range(len(yedges) - 1))):
tmpfile.write("\t".join(map(
str, (xedges[x], yedges[y],
fdr,
spiked_d2hist_fdr[x, y],
100.0 * spiked_d2hist_fdr_normed[x, y]))) + "\n")
# take elements in spiked_hist_fdr above a certain threshold
for power in power_thresholds:
# select 2D bins at a given power level
power_take = spiked_d2hist_fdr_normed >= power
# select the counts in the unspiked data according
# to this level
power_counts = unspiked_d2hist_counts[power_take]
outf.write("\t".join(map(
str, (fdr, power,
power_counts.sum().sum(),
100.0 * power_counts.sum().sum() /
unspiked_total))) + "\n")
tmpfile.close()
outf.close()
# upload into table
method = P.snip(os.path.dirname(outfile), ".dir")
tablename = P.toTable(
P.snip(outfile, "power.gz") + method + ".spike.load")
P.load(tmpfile_name,
outfile + ".log",
tablename=tablename,
options="--add-index=fdr")
os.unlink(tmpfile_name)
def summarizeTagsWithinContext(tagfile,
contextfile,
outfile,
min_overlap=0.5,
job_memory="4G"):
'''count occurances of tags in genomic context.
Examines the genomic context to where tags align.
A tag is assigned to the genomic context that it
overlaps by at least 50%. Thus some reads mapping
several contexts might be dropped.
Arguments
---------
tagfile : string
Filename with tags. The file can be :term:`bam` or :term:`bed` format.
contextfile : string
Filename of :term:`bed` formatted files with named intervals (BED4).
outfile : string
Output in :term:`tsv` format.
min_overlap : float
Minimum overlap (fraction) to count features as overlapping.
job_memory : string
Memory to reserve.
'''
statement = '''
cgat bam_vs_bed
--min-overlap=%(min_overlap)f
--log=%(outfile)s.log
%(tagfile)s %(contextfile)s
| gzip
> %(outfile)s
'''
P.run()
def mergeSummarizedContextStats(infiles, outfile, samples_in_columns=False):
"""combine output from :func:`summarizeTagsWithinContext`.
Arguments
---------
infiles : list
List of filenames in :term:`tsv` format
outfile : string
Output filename in :term:`tsv` format.
samples_in_columns :
If True, put samples in columns. The default is to put them
in rows.
"""
header = ",".join([P.snip(os.path.basename(x), ".contextstats.tsv.gz")
for x in infiles])
filenames = " ".join(infiles)
if not samples_in_columns:
transpose_cmd = \
"""| cgat table2table
--transpose""" % P.getParams()
else:
transpose_cmd = ""
statement = """cgat combine_tables
--header-names=%(header)s
--missing-value=0
--skip-titles
%(filenames)s
| perl -p -e "s/bin/track/; s/\?/Q/g"
%(transpose_cmd)s
| gzip
> %(outfile)s
"""
P.run()
def loadSummarizedContextStats(infiles,
outfile,
suffix=".contextstats.tsv.gz"):
"""merge output from :func:`summarizeTagsWithinContex` and load into database.
Arguments
---------
infiles : list
List of filenames in :term:`tsv` format. The files should end
in suffix.
outfile : string
Output filename, the table name is derived from `outfile`.
suffix : string
Suffix to remove from filename for track name.
"""
header = ",".join([P.snip(os.path.basename(x), suffix)
for x in infiles])
filenames = " ".join(infiles)
load_statement = P.build_load_statement(
P.toTable(outfile),
options="--add-index=track")
statement = """cgat combine_tables
--header-names=%(header)s
--missing-value=0
--skip-titles
%(filenames)s
| perl -p -e "s/bin/track/; s/\?/Q/g"
| cgat table2table --transpose
| %(load_statement)s
> %(outfile)s
"""
P.run()
def testTagContextOverlap(tagfile,
contextfile,
workspace,
outfile,
job_threads=1,
samples=10000,
options=""):
"""use gat to test for overlap between tags and genomic context.
Arguments
---------
tagfile : string
Filename with read tags :term:`bed` format. Tags can be
overlapping.
contextfile : string
Filename with genomic context information in :term:`bed`
format.
workspace : string
Genomic workspace for gat simulations in :term:`bed` format.
outfile : string
Output filename in :term:`tsv` format.
threads : int
Number of threads to use.
samples : int
Number of samples to compute.
options : string
Options to pass to the gat program.
"""
statement = """
gat-run.py
--annotations-label=reads
--annotations=%(tagfile)s
--segments=%(contextfile)s
--workspace=%(workspace)s
--overlapping-annotations
--annotations-to-points=midpoint
--counter=annotation-overlap
--with-segment-tracks
--num-samples=%(samples)i
--num-threads=%(job_threads)i
--log=%(outfile)s.log
%(options)s
| gzip
> %(outfile)s
"""
P.run()
| |
from __future__ import division
import numpy as np
import scipy.linalg
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.externals.joblib import Memory
from sklearn.externals.six import string_types, with_metaclass
from sklearn.externals.six.moves import xrange
from sklearn.utils import check_array
def get_memory(memory):
if isinstance(memory, string_types):
return Memory(memory, verbose=0)
return memory
class _Meta(type):
def __getitem__(cls, indices):
return cls(indices)
class PairwisePicker(BaseEstimator, TransformerMixin, with_metaclass(_Meta)):
'''
Picks a subset of the passed-in matrix. Useful for choosing one of several
divergences computed by
:class:`skl_groups.divergences.KNNDivergenceEstimator`, for example.
Rather than ``PairwisePicker((0, slice(2, 3)))``, you can also do
``PairwisePicker[0, 2:3]``.
Parameters
----------
indices : tuple of integers / slice objects / etc
The indices to subset the input with.
'''
_pairwise = True # TODO: not really
_pairwise_output = True
def __init__(self, indices):
self.indices = indices
def fit(self, X=None, y=None):
"Do nothing; this transformer is stateless."
return self
def transform(self, X):
"Subsets the given matrix."
return X[self.indices]
class Symmetrize(BaseEstimator, TransformerMixin):
'''
Symmetrizes pairwise affinity/distance/whatever matrices, by taking
the mean of itself and its transpose.
Parameters
----------
copy : boolean, optional, default True
If false, invalidates the passed-in matrix.
Notes
-----
``copy=False`` currently doesn't do anything.
'''
_pairwise = True
_pairwise_output = True
def __init__(self, copy=True):
self.copy = copy
def fit(self, X, y=None):
"Raises NotImplementedError."
raise NotImplementedError("Symmetrize can only fit_transform")
def transform(self, X):
"Does nothing, so that it works nicely in pipelines."
return X
def fit_transform(self, X, y=None):
'''
Symmetrizes X.
Parameters
----------
X : array, shape [n, n]
The pairwise inputs to symmetrize.
Returns
-------
X : array, shape [n, n]
The symmetrized pairwise outputs.
'''
# TODO: figure out a no-copy version of this...
n = X.shape[0]
if X.shape != (n, n):
raise TypeError("Input must be a square matrix.")
X = X + X.T
X /= 2
return X
class RBFize(BaseEstimator, TransformerMixin):
r'''
Turns a distance matrix into an RBF kernel:
:math:`K(x, y) = \exp\left( - \gamma \lVert x - y \rVert^2 \right)`.
For the output to be a valid kernel, the inputs should either be
Euclidean distances, e.g. the output of
:func:`sklearn.metrics.pairwise.euclidean_distances`,
or be isometrically embeddable into a Euclidean space.
If not, you should ensure that the result is positive semidefinite before
using it as a kernel, for example via :class:`ProjectPSD`.
Parameters
----------
gamma : float, optional, default 1
The :math:`\gamma` value to use in the kernel.
Defaults to 1, but this is not a very good value to use;
two reasonable heuristics are 1 / feature dimension
or 1 / the median distance value.
You probably want to cross-validate on a range of possible values;
see also :attr:`scale_by_median`.
scale_by_median : boolean, optional, default False
If True, scale :attr:`gamma` by the squared median input distance.
squared : boolean, optional, default False
Whether the inputs are treated as distances or squared distances.
copy : boolean, optional, default True
If False, data passed to :meth:`transform` is overwritten.
Attributes
----------
`median_` : float
If :attr:`scale_by_median`, the median distance. Otherwise, not set.
See Also
--------
sklearn.metrics.pairwise.rbf_kernel : computes this from feature vectors
'''
_pairwise = True
_pairwise_output = True
def __init__(self, gamma=1, scale_by_median=False, squared=False,
copy=True):
self.gamma = gamma
self.scale_by_median = scale_by_median
self.copy = copy
self.squared = squared
def fit(self, X, y=None):
'''
If scale_by_median, find :attr:`median_`; otherwise, do nothing.
Parameters
----------
X : array
The raw pairwise distances.
'''
X = check_array(X)
if self.scale_by_median:
self.median_ = np.median(X[np.triu_indices_from(X, k=1)],
overwrite_input=True)
elif hasattr(self, 'median_'):
del self.median_
return self
def transform(self, X):
'''
Turns distances into RBF values.
Parameters
----------
X : array
The raw pairwise distances.
Returns
-------
X_rbf : array of same shape as X
The distances in X passed through the RBF kernel.
'''
X = check_array(X)
X_rbf = np.empty_like(X) if self.copy else X
X_in = X
if not self.squared:
np.power(X_in, 2, out=X_rbf)
X_in = X_rbf
if self.scale_by_median:
scale = self.median_ if self.squared else self.median_ ** 2
gamma = self.gamma * scale
else:
gamma = self.gamma
np.multiply(X_in, -gamma, out=X_rbf)
np.exp(X_rbf, out=X_rbf)
return X_rbf
class ProjectPSD(BaseEstimator, TransformerMixin):
'''
Projects a pairwise square symmetric affinity matrix to be positive
semidefinite, by discarding any negative eigenvalues from its spectrum.
`fit_transform()` does the actual projection. If you `transform` onto data
different than the data originally `fit` on (not necessarily square), then
the data will be processed in a way that attempts to treat test similarities
consistently with training ones, using the method of [1].
Parameters
----------
min_eig : float, optional, default 0
The minimum eigenvalue for the projected matrix. Because of
floating-point inaccuracies, don't take this too literally.
copy : boolean, optional, default True
Operate on a copy of the passed-in matrix; otherwise, the original
matrix will be invalidated.
negatives_likely : boolean, optional, default True
Optimize memory usage for the case where we expect there to be negative
eigenvalues.
memory : Instance of joblib.Memory or string (optional)
Used to cache the eigendecomposition.
By default, no caching is done. If a string is given, it is the
path to the caching directory.
Attributes
----------
`clip_` : array of shape (n, n)
The linear transformation corresponding to
the clip operation on the training points.
References
----------
[1] Y. Chen, E. K. Garcia, M. R. Gupta, A. Rahimi, & L. Cazzanti (2009).
Similarity-based classification: Concepts and algorithms.
Journal of Machine Learning Research, 10, 747-776.
'''
_pairwise = True
_pairwise_output = True
def __init__(self, min_eig=0, copy=True, negatives_likely=True,
memory=Memory(cachedir=None, verbose=0)):
self.min_eig = min_eig
self.copy = copy
self.negatives_likely = negatives_likely
self.memory = memory
def fit(self, X, y=None):
'''
Learn the linear transformation to clipped eigenvalues.
Note that if min_eig isn't zero and any of the original eigenvalues
were exactly zero, this will leave those eigenvalues as zero.
Parameters
----------
X : array, shape [n, n]
The *symmetric* input similarities. If X is asymmetric, it will be
treated as if it were symmetric based on its lower-triangular part.
'''
n = X.shape[0]
if X.shape != (n, n):
raise TypeError("Input must be a square matrix.")
# TODO: only get negative eigs somehow?
memory = get_memory(self.memory)
vals, vecs = memory.cache(scipy.linalg.eigh, ignore=['overwrite_a'])(
X, overwrite_a=not self.copy)
vals = vals.reshape(-1, 1)
if self.min_eig == 0:
inner = vals > self.min_eig
else:
with np.errstate(divide='ignore'):
inner = np.where(vals >= self.min_eig, 1,
np.where(vals == 0, 0, self.min_eig / vals))
self.clip_ = np.dot(vecs, inner * vecs.T)
return self
def transform(self, X):
'''
Transforms X according to the linear transformation corresponding to
clipping the input eigenvalues.
Parameters
----------
X : array, shape [n_test, n]
The test similarities to training points.
Returns
-------
Xt : array, shape [n_test, n]
The transformed test similarites to training points.
'''
n = self.clip_.shape[0]
if X.ndim != 2 or X.shape[1] != n:
msg = "X should have {} columns, the number of samples at fit time"
raise TypeError(msg.format(self.clip_.shape[0]))
return np.dot(X, self.clip_)
def fit_transform(self, X, y=None):
'''
Clips the negative eigenvalues of X.
Parameters
----------
X : array, shape [n, n]
The *symmetric* input similarities. If X is asymmetric, it will be
treated as if it were symmetric based on its lower-triangular part.
Returns
-------
Xt : array, shape [n, n]
The transformed training similarities; smallest eigenvalue will be
at least `self.min_eig`.
'''
n = X.shape[0]
if X.shape != (n, n):
raise TypeError("Input must be a square matrix.")
memory = get_memory(self.memory)
discard_X = not self.copy and self.negatives_likely
vals, vecs = memory.cache(scipy.linalg.eigh, ignore=['overwrite_a'])(
X, overwrite_a=discard_X)
vals = vals[:, None]
self.clip_ = np.dot(vecs, (vals > self.min_eig) * vecs.T)
if discard_X or vals[0, 0] < self.min_eig:
del X
np.maximum(vals, self.min_eig, out=vals)
X = np.dot(vecs, vals * vecs.T)
del vals, vecs
# should be symmetric, but make sure because floats
X = Symmetrize(copy=False).fit_transform(X)
return X
class FlipPSD(BaseEstimator, TransformerMixin):
'''
Makes a pairwise symmetric square affinity matrix into a valid positive
semidefinite kernel, by flipping the sign of any negative eigenvalues in
its spectrum.
`fit_transform()` does the actual projection. If you `transform` onto data
different than the data originally `fit` on (not necessarily square), then
the data will be processed in a way that attempts to treat test similarities
consistently with training ones, using the method of [1].
Parameters
----------
copy : boolean, optional, default True
Operate on a copy of the passed-in matrix; otherwise, the original
matrix will be invalidated (for both ``fit()`` and ``transform()``).
negatives_likely : boolean, optional, default True
Optimize memory usage for the case where we expect there to be negative
eigenvalues.
memory : Instance of joblib.Memory or string (optional)
Used to cache the eigendecomposition.
By default, no caching is done. If a string is given, it is the
path to the caching directory.
Attributes
----------
`flip_` : array of shape (n, n)
The linear transformation corresponding
to the flip operation on the training points.
References
----------
[1] Y. Chen, E. K. Garcia, M. R. Gupta, A. Rahimi, & L. Cazzanti (2009).
Similarity-based classification: Concepts and algorithms.
Journal of Machine Learning Research, 10, 747-776.
'''
_pairwise = True
_pairwise_output = True
def __init__(self, copy=True, negatives_likely=True,
memory=Memory(cachedir=None, verbose=0)):
self.copy = copy
self.negatives_likely = negatives_likely
self.memory = memory
def fit(self, X, y=None):
'''
Learn the linear transformation to flipped eigenvalues.
Parameters
----------
X : array, shape [n, n]
The *symmetric* input similarities. If X is asymmetric, it will be
treated as if it were symmetric based on its lower-triangular part.
'''
n = X.shape[0]
if X.shape != (n, n):
raise TypeError("Input must be a square matrix.")
# TODO: only get negative eigs somehow?
memory = get_memory(self.memory)
vals, vecs = memory.cache(scipy.linalg.eigh, ignore=['overwrite_a'])(
X, overwrite_a=not self.copy)
vals = vals[:, None]
self.flip_ = np.dot(vecs, np.sign(vals) * vecs.T)
return self
def transform(self, X):
'''
Transforms X according to the linear transformation corresponding to
flipping the input eigenvalues.
Parameters
----------
X : array, shape [n_test, n]
The test similarities to training points.
Returns
-------
Xt : array, shape [n_test, n]
The transformed test similarites to training points.
'''
n = self.flip_.shape[0]
if X.ndim != 2 or X.shape[1] != n:
msg = "X should have {} columns, the number of samples at fit time"
raise TypeError(msg.format(self.flip_.shape[0]))
return np.dot(X, self.flip_)
def fit_transform(self, X, y=None):
'''
Flips the negative eigenvalues of X.
Parameters
----------
X : array, shape [n, n]
The *symmetric* input similarities. If X is asymmetric, it will be
treated as if it were symmetric based on its lower-triangular part.
Returns
-------
Xt : array, shape [n, n]
The transformed training similarities.
'''
n = X.shape[0]
if X.shape != (n, n):
raise TypeError("Input must be a square matrix.")
memory = get_memory(self.memory)
discard_X = not self.copy and self.negatives_likely
vals, vecs = memory.cache(scipy.linalg.eigh, ignore=['overwrite_a'])(
X, overwrite_a=discard_X)
vals = vals[:, None]
self.clip_ = np.dot(vecs, np.sign(vals) * vecs.T)
if discard_X or vals[0, 0] < 0:
del X
np.abs(vals, out=vals)
X = np.dot(vecs, vals * vecs.T)
del vals, vecs
# should be symmetric, but make sure because floats
X = Symmetrize(copy=False).fit_transform(X)
return X
class ShiftPSD(BaseEstimator, TransformerMixin):
'''
Makes a pairwise square affinity matrix into a valid positive semidefinite
kernel, by flipping the sign of any negative eigenvalues in its spectrum.
`fit_transform()` does the actual projection. If you `transform` onto data
different than the data originally `fit` on (not necessarily square), then
nothing will be changed, because the shift operation only affects self-
similarities.
Parameters
----------
min_eig : float, optional, default 0
The minimum eigenvalue for the projected matrix. Because of
floating-point inaccuracies, don't take this too literally.
copy : boolean, optional, default True
Operate on a copy of the passed-in matrix; otherwise, the original
matrix will be invalidated (for both ``fit()`` and ``transform()``).
memory : Instance of joblib.Memory or string (optional)
Used to cache the eigendecomposition.
By default, no caching is done. If a string is given, it is the
path to the caching directory.
Attributes
----------
`train_` : array of shape [n, n]
The training similarities.
Stored so that `transform` can check if
it's transforming the test data and act appropriately.
`shift_` : float
The amount to shift all the eigenvalues up by.
'''
_pairwise = True
_pairwise_output = True
def __init__(self, min_eig=0, copy=True,
memory=Memory(cachedir=None, verbose=0)):
self.min_eig = min_eig
self.copy = copy
self.memory = memory
def fit(self, X, y=None):
'''
Learn the transformation to shifted eigenvalues. Only depends
on the input dimension.
Parameters
----------
X : array, shape [n, n]
The *symmetric* input similarities.
'''
n = X.shape[0]
if X.shape != (n, n):
raise TypeError("Input must be a square matrix.")
self.train_ = X
memory = get_memory(self.memory)
lo, = memory.cache(scipy.linalg.eigvalsh)(X, eigvals=(0, 0))
self.shift_ = max(self.min_eig - lo, 0)
return self
def transform(self, X):
'''
Transforms X according to the linear transformation corresponding to
shifting the input eigenvalues to all be at least ``self.min_eig``.
Parameters
----------
X : array, shape [n_test, n]
The test similarities to training points.
Returns
-------
Xt : array, shape [n_test, n]
The transformed test similarites to training points. Only different
from X if X is the training data.
'''
n = self.train_.shape[0]
if X.ndim != 2 or X.shape[1] != n:
msg = "X should have {} columns, the number of samples at fit time"
raise TypeError(msg.format(n))
if self.copy:
X = X.copy()
if self.shift_ != 0 and X is self.train_ or (
X.shape == self.train_.shape and np.allclose(X, self.train_)):
X[xrange(n), xrange(n)] += self.shift_
return X
class SquarePSD(BaseEstimator, TransformerMixin):
'''
Makes a pairwise symmetric square affinity matrix into a valid positive
semidefinite kernel by squaring its eigenvalues (via S -> S S^T).
Equivalent to using the similarities to training points as features in a
linear classifier.
Parameters
----------
copy : boolean, optional, default True
Operate on a copy of the passed-in matrix; otherwise, the original
matrix will be invalidated.
'''
_pairwise = True
_pairwise_output = True
def __init__(self, copy=True):
self.copy = copy
def fit(self, X, y=None):
raise NotImplementedError("SquarePSD can only fit_transform().")
def transform(self, X):
raise NotImplementedError("SquarePSD can only fit_transform().")
def fit_transform(self, X, y=None):
n = X.shape[0]
if X.shape != (n, n):
raise TypeError("Input must be a square matrix.")
return X.dot(X.T)
| |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9755")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9755")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Hobbitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Hobbitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| |
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
class V1RBDVolumeSource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Swagger model
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'monitors': 'list[str]',
'image': 'str',
'fs_type': 'str',
'pool': 'str',
'user': 'str',
'keyring': 'str',
'secret_ref': 'V1LocalObjectReference',
'read_only': 'bool'
}
self.attribute_map = {
'monitors': 'monitors',
'image': 'image',
'fs_type': 'fsType',
'pool': 'pool',
'user': 'user',
'keyring': 'keyring',
'secret_ref': 'secretRef',
'read_only': 'readOnly'
}
self._monitors = None
self._image = None
self._fs_type = None
self._pool = None
self._user = None
self._keyring = None
self._secret_ref = None
self._read_only = None
@property
def monitors(self):
"""
Gets the monitors of this V1RBDVolumeSource.
a collection of Ceph monitors; see http://releases.k8s.io/v1.0.4/examples/rbd/README.md#how-to-use-it
:return: The monitors of this V1RBDVolumeSource.
:rtype: list[str]
"""
return self._monitors
@monitors.setter
def monitors(self, monitors):
"""
Sets the monitors of this V1RBDVolumeSource.
a collection of Ceph monitors; see http://releases.k8s.io/v1.0.4/examples/rbd/README.md#how-to-use-it
:param monitors: The monitors of this V1RBDVolumeSource.
:type: list[str]
"""
self._monitors = monitors
@property
def image(self):
"""
Gets the image of this V1RBDVolumeSource.
rados image name; see http://releases.k8s.io/v1.0.4/examples/rbd/README.md#how-to-use-it
:return: The image of this V1RBDVolumeSource.
:rtype: str
"""
return self._image
@image.setter
def image(self, image):
"""
Sets the image of this V1RBDVolumeSource.
rados image name; see http://releases.k8s.io/v1.0.4/examples/rbd/README.md#how-to-use-it
:param image: The image of this V1RBDVolumeSource.
:type: str
"""
self._image = image
@property
def fs_type(self):
"""
Gets the fs_type of this V1RBDVolumeSource.
file system type to mount, such as ext4, xfs, ntfs; see http://releases.k8s.io/v1.0.4/examples/rbd/README.md#how-to-use-it
:return: The fs_type of this V1RBDVolumeSource.
:rtype: str
"""
return self._fs_type
@fs_type.setter
def fs_type(self, fs_type):
"""
Sets the fs_type of this V1RBDVolumeSource.
file system type to mount, such as ext4, xfs, ntfs; see http://releases.k8s.io/v1.0.4/examples/rbd/README.md#how-to-use-it
:param fs_type: The fs_type of this V1RBDVolumeSource.
:type: str
"""
self._fs_type = fs_type
@property
def pool(self):
"""
Gets the pool of this V1RBDVolumeSource.
rados pool name; default is rbd; optional; see http://releases.k8s.io/v1.0.4/examples/rbd/README.md#how-to-use-it
:return: The pool of this V1RBDVolumeSource.
:rtype: str
"""
return self._pool
@pool.setter
def pool(self, pool):
"""
Sets the pool of this V1RBDVolumeSource.
rados pool name; default is rbd; optional; see http://releases.k8s.io/v1.0.4/examples/rbd/README.md#how-to-use-it
:param pool: The pool of this V1RBDVolumeSource.
:type: str
"""
self._pool = pool
@property
def user(self):
"""
Gets the user of this V1RBDVolumeSource.
rados user name; default is admin; optional; see http://releases.k8s.io/v1.0.4/examples/rbd/README.md#how-to-use-it
:return: The user of this V1RBDVolumeSource.
:rtype: str
"""
return self._user
@user.setter
def user(self, user):
"""
Sets the user of this V1RBDVolumeSource.
rados user name; default is admin; optional; see http://releases.k8s.io/v1.0.4/examples/rbd/README.md#how-to-use-it
:param user: The user of this V1RBDVolumeSource.
:type: str
"""
self._user = user
@property
def keyring(self):
"""
Gets the keyring of this V1RBDVolumeSource.
keyring is the path to key ring for rados user; default is /etc/ceph/keyring; optional; see http://releases.k8s.io/v1.0.4/examples/rbd/README.md#how-to-use-it
:return: The keyring of this V1RBDVolumeSource.
:rtype: str
"""
return self._keyring
@keyring.setter
def keyring(self, keyring):
"""
Sets the keyring of this V1RBDVolumeSource.
keyring is the path to key ring for rados user; default is /etc/ceph/keyring; optional; see http://releases.k8s.io/v1.0.4/examples/rbd/README.md#how-to-use-it
:param keyring: The keyring of this V1RBDVolumeSource.
:type: str
"""
self._keyring = keyring
@property
def secret_ref(self):
"""
Gets the secret_ref of this V1RBDVolumeSource.
name of a secret to authenticate the RBD user; if provided overrides keyring; optional; see http://releases.k8s.io/v1.0.4/examples/rbd/README.md#how-to-use-it
:return: The secret_ref of this V1RBDVolumeSource.
:rtype: V1LocalObjectReference
"""
return self._secret_ref
@secret_ref.setter
def secret_ref(self, secret_ref):
"""
Sets the secret_ref of this V1RBDVolumeSource.
name of a secret to authenticate the RBD user; if provided overrides keyring; optional; see http://releases.k8s.io/v1.0.4/examples/rbd/README.md#how-to-use-it
:param secret_ref: The secret_ref of this V1RBDVolumeSource.
:type: V1LocalObjectReference
"""
self._secret_ref = secret_ref
@property
def read_only(self):
"""
Gets the read_only of this V1RBDVolumeSource.
rbd volume to be mounted with read-only permissions; see http://releases.k8s.io/v1.0.4/examples/rbd/README.md#how-to-use-it
:return: The read_only of this V1RBDVolumeSource.
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""
Sets the read_only of this V1RBDVolumeSource.
rbd volume to be mounted with read-only permissions; see http://releases.k8s.io/v1.0.4/examples/rbd/README.md#how-to-use-it
:param read_only: The read_only of this V1RBDVolumeSource.
:type: bool
"""
self._read_only = read_only
def to_dict(self):
"""
Return model properties dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Return model properties str
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
| |
"""Test for serializer's patch_relationship."""
from sqlalchemy_jsonapi import errors
from sqlalchemy_jsonapi.unittests.utils import testcases
from sqlalchemy_jsonapi.unittests import models
from sqlalchemy_jsonapi import __version__
class GPatchRelationship(testcases.SqlalchemyJsonapiTestCase):
"""Tests for serializer.patch_relationship."""
def test_patch_relationship_on_to_one_set_to_resource_response(self):
"""Patch single relationship and set resource returns 200."""
user = models.User(
first='Sally', last='Smith',
password='password', username='SallySmith1')
self.session.add(user)
new_user = models.User(
first='Bob', last='Joe',
password='password', username='BobJoe2')
self.session.add(new_user)
blog_post = models.Post(
title='This Is A Title', content='This is the content',
author_id=user.id, author=user)
self.session.add(blog_post)
self.session.commit()
payload = {
'data': {
'type': 'users',
'id': new_user.id
}
}
response = models.serializer.patch_relationship(
self.session, payload, 'posts', blog_post.id, 'author')
expected = {
'data': {
'id': new_user.id,
'type': 'users'
},
'jsonapi': {
'version': '1.0'
},
'meta': {
'sqlalchemy_jsonapi_version': __version__
}
}
actual = response.data
self.assertEqual(expected, actual)
self.assertEqual(200, response.status_code)
def test_patch_relationship_on_to_one_set_to_resource_successful(self):
"""Patch single relationship successfully updates resource."""
user = models.User(
first='Sally', last='Smith',
password='password', username='SallySmith1')
self.session.add(user)
new_user = models.User(
first='Bob', last='Joe',
password='password', username='BobJoe2')
self.session.add(new_user)
blog_post = models.Post(
title='This Is A Title', content='This is the content',
author_id=user.id, author=user)
self.session.add(blog_post)
self.session.commit()
payload = {
'data': {
'type': 'users',
'id': new_user.id
}
}
models.serializer.patch_relationship(
self.session, payload, 'posts', blog_post.id, 'author')
self.assertEqual(blog_post.author.id, new_user.id)
self.assertEqual(blog_post.author, new_user)
def test_patch_relationship_on_to_one_set_resource_to_null_response(self):
"""Patch relationship of a single resource and set to null returns 200."""
user = models.User(
first='Sally', last='Smith',
password='password', username='SallySmith1')
self.session.add(user)
blog_post = models.Post(
title='This Is A Title', content='This is the content',
author_id=user.id, author=user)
self.session.add(blog_post)
self.session.commit()
payload = {
'data': None
}
response = models.serializer.patch_relationship(
self.session, payload, 'posts', blog_post.id, 'author')
expected = {
'data': None,
'jsonapi': {
'version': '1.0'
},
'meta': {
'sqlalchemy_jsonapi_version': __version__
}
}
actual = response.data
self.assertEqual(expected, actual)
self.assertEqual(200, response.status_code)
def test_patch_relationship_on_to_one_set_resource_to_null_successful(self):
"""Patch relationship of single resource and set to null is successful."""
user = models.User(
first='Sally', last='Smith',
password='password', username='SallySmith1')
self.session.add(user)
blog_post = models.Post(
title='This Is A Title', content='This is the content',
author_id=user.id, author=user)
self.session.add(blog_post)
self.session.commit()
payload = {
'data': None
}
models.serializer.patch_relationship(
self.session, payload, 'posts', blog_post.id, 'author')
self.assertEqual(blog_post.author, None)
def test_patch_relationship_on_to_many_set_resources_response(self):
"""Patch relationships on many and set resources returns 200."""
user = models.User(
first='Sally', last='Smith',
password='password', username='SallySmith1')
self.session.add(user)
blog_post = models.Post(
title='This Is A Title', content='This is the content',
author_id=user.id, author=user)
self.session.add(blog_post)
comment = models.Comment(
content='This is comment 1', author_id=user.id,
post_id=blog_post.id, author=user, post=blog_post)
self.session.add(comment)
new_comment = models.Comment(
content='This is a new comment 2', author_id=user.id,
author=user)
self.session.add(new_comment)
self.session.commit()
payload = {
'data': [{
'type': 'comments',
'id': new_comment.id
}]
}
response = models.serializer.patch_relationship(
self.session, payload, 'posts', blog_post.id, 'comments')
expected = {
'data': [{
'type': 'comments',
'id': 2
}],
'jsonapi': {
'version': '1.0'
},
'meta': {
'sqlalchemy_jsonapi_version': __version__
}
}
actual = response.data
self.assertEqual(expected, actual)
self.assertEqual(200, response.status_code)
def test_patch_relationship_on_to_many_set_resources_successful(self):
"""Patch relationships on many and set resources is successful."""
user = models.User(
first='Sally', last='Smith',
password='password', username='SallySmith1')
self.session.add(user)
blog_post = models.Post(
title='This Is A Title', content='This is the content',
author_id=user.id, author=user)
self.session.add(blog_post)
comment = models.Comment(
content='This is comment 1', author_id=user.id,
post_id=blog_post.id, author=user, post=blog_post)
self.session.add(comment)
new_comment = models.Comment(
content='This is a new comment 2', author_id=user.id,
author=user)
self.session.add(new_comment)
self.session.commit()
payload = {
'data': [{
'type': 'comments',
'id': new_comment.id
}]
}
models.serializer.patch_relationship(
self.session, payload, 'posts', blog_post.id, 'comments')
self.assertEqual(new_comment.post.id, blog_post.id)
self.assertEqual(new_comment.post, blog_post)
def test_patch_relationship_on_to_many_set_to_empty_response(self):
"""Patch relationships on many and set to empty returns 200."""
user = models.User(
first='Sally', last='Smith',
password='password', username='SallySmith1')
self.session.add(user)
blog_post = models.Post(
title='This Is A Title', content='This is the content',
author_id=user.id, author=user)
self.session.add(blog_post)
comment = models.Comment(
content='This is comment 1', author_id=user.id,
post_id=blog_post.id, author=user, post=blog_post)
self.session.add(comment)
self.session.commit()
payload = {
'data': []
}
response = models.serializer.patch_relationship(
self.session, payload, 'posts', blog_post.id, 'comments')
expected = {
'data': [],
'jsonapi': {
'version': '1.0'
},
'meta': {
'sqlalchemy_jsonapi_version': __version__
}
}
actual = response.data
self.assertEqual(expected, actual)
self.assertEqual(200, response.status_code)
def test_patch_relationship_on_to_many_set_to_empty_successful(self):
"""Patch relationships on many and set to empty is successful."""
user = models.User(
first='Sally', last='Smith',
password='password', username='SallySmith1')
self.session.add(user)
blog_post = models.Post(
title='This Is A Title', content='This is the content',
author_id=user.id, author=user)
self.session.add(blog_post)
comment = models.Comment(
content='This is comment 1', author_id=user.id,
post_id=blog_post.id, author=user, post=blog_post)
self.session.add(comment)
self.session.commit()
payload = {
'data': []
}
models.serializer.patch_relationship(
self.session, payload, 'posts', blog_post.id, 'comments')
self.assertEqual(comment.post, None)
def test_patch_relationship_on_to_one_with_empty_list(self):
"""Patch relationship on to one with empty list returns 409.
A ValidationError is raised.
"""
user = models.User(
first='Sally', last='Smith',
password='password', username='SallySmith1')
self.session.add(user)
new_user = models.User(
first='Bob', last='Joe',
password='password', username='BobJoe2')
self.session.add(new_user)
blog_post = models.Post(
title='This Is A Title', content='This is the content',
author_id=user.id, author=user)
self.session.add(blog_post)
self.session.commit()
payload = {
'data': []
}
with self.assertRaises(errors.ValidationError) as error:
models.serializer.patch_relationship(
self.session, payload, 'posts', blog_post.id, 'author')
expected_detail = 'Provided data must be a hash.'
self.assertEqual(error.exception.detail, expected_detail)
self.assertEqual(error.exception.status_code, 409)
def test_patch_relationship_on_to_many_with_null(self):
"""Patch relationship on to many with null returns 409.
A ValidationError is raised.
"""
user = models.User(
first='Sally', last='Smith',
password='password', username='SallySmith1')
self.session.add(user)
blog_post = models.Post(
title='This Is A Title', content='This is the content',
author_id=user.id, author=user)
self.session.add(blog_post)
comment = models.Comment(
content='This is comment 1', author_id=user.id,
post_id=blog_post.id, author=user, post=blog_post)
self.session.add(comment)
self.session.commit()
payload = {
'data': None
}
with self.assertRaises(errors.ValidationError) as error:
models.serializer.patch_relationship(
self.session, payload, 'posts', blog_post.id, 'comments')
expected_detail = 'Provided data must be an array.'
self.assertEqual(error.exception.detail, expected_detail)
self.assertEqual(error.exception.status_code, 409)
def test_patch_relationship_with_unknown_relationship(self):
"""Patch relationship with unknown relationship returns 404.
A RelationshipNotFoundError is raised.
"""
user = models.User(
first='Sally', last='Smith',
password='password', username='SallySmith1')
self.session.add(user)
blog_post = models.Post(
title='This Is A Title', content='This is the content',
author_id=user.id, author=user)
self.session.add(blog_post)
comment = models.Comment(
content='This is comment 1', author_id=user.id,
post_id=blog_post.id, author=user, post=blog_post)
self.session.add(comment)
self.session.commit()
payload = {
'data': {}
}
with self.assertRaises(errors.RelationshipNotFoundError) as error:
models.serializer.patch_relationship(
self.session, payload, 'posts',
blog_post.id, 'unknown-relationship')
self.assertEqual(error.exception.status_code, 404)
def test_patch_relationship_on_to_one_with_incompatible_model(self):
"""Patch relationship on to one with incompatible model returns 409.
A ValidationError is raised.
"""
user = models.User(
first='Sally', last='Smith',
password='password', username='SallySmith1')
self.session.add(user)
blog_post = models.Post(
title='This Is A Title', content='This is the content',
author_id=user.id, author=user)
self.session.add(blog_post)
comment = models.Comment(
content='This is comment 1', author_id=user.id,
post_id=blog_post.id, author=user, post=blog_post)
self.session.add(comment)
self.session.commit()
payload = {
'data': {
'type': 'comments',
'id': comment.id
}
}
with self.assertRaises(errors.ValidationError) as error:
models.serializer.patch_relationship(
self.session, payload, 'posts', blog_post.id, 'author')
expected_detail = 'Incompatible Type'
self.assertEqual(error.exception.detail, expected_detail)
self.assertEqual(error.exception.status_code, 409)
def test_patch_relationship_on_to_many_with_incompatible_model(self):
"""Patch relationship on to many with incompatible model returns 409.
A ValidationError is raised.
"""
user = models.User(
first='Sally', last='Smith',
password='password', username='SallySmith1')
self.session.add(user)
blog_post = models.Post(
title='This Is A Title', content='This is the content',
author_id=user.id, author=user)
self.session.add(blog_post)
comment = models.Comment(
content='This is comment 1', author_id=user.id,
post_id=blog_post.id, author=user, post=blog_post)
self.session.add(comment)
self.session.commit()
payload = {
'data': [{
'type': 'users',
'id': user.id
}]
}
with self.assertRaises(errors.ValidationError) as error:
models.serializer.patch_relationship(
self.session, payload, 'posts', blog_post.id, 'comments')
expected_detail = 'Incompatible Type'
self.assertEqual(error.exception.detail, expected_detail)
self.assertEqual(error.exception.status_code, 409)
| |
from core.himesis import Himesis, HimesisPostConditionPattern
import cPickle as pickle
class HPassRuleRHS(HimesisPostConditionPattern):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HPassRuleRHS.
"""
# Create the himesis graph
EDGE_LIST = [(3, 0), (0, 4), (2, 1), (1, 4)]
super(HPassRuleRHS, self).__init__(name='HPassRuleRHS', num_nodes=5, edges=EDGE_LIST)
self.is_compiled = True # now this instance has been compiled
# Set the graph attributes
self["MT_action__"] = pickle.loads("""S"#===============================================================================@n# This code is executed after the rule has been applied.@n# You can access a node labelled n matched by this rule by: PostNode('n').@n# To access attribute x of node n, use: PostNode('n')['x'].@n#===============================================================================@n@npass@n"
p1
.""").replace("@n", "\n")
self["name"] = pickle.loads("""S'HPassRuleRHS'
p1
.""")
self["GUID__"] = pickle.loads("""ccopy_reg
_reconstructor
p1
(cuuid
UUID
p2
c__builtin__
object
p3
NtRp4
(dp5
S'int'
p6
L125574966889889872500382374054681047273L
sb.""")
# Set the node attributes
self.vs[0]["mm__"] = pickle.loads("""S'MT_post__next'
p1
.""")
self.vs[0]["MT_label__"] = pickle.loads("""S'3'
.""")
self.vs[0]["GUID__"] = pickle.loads("""ccopy_reg
_reconstructor
p1
(cuuid
UUID
p2
c__builtin__
object
p3
NtRp4
(dp5
S'int'
p6
L196686460342300917440785140690447607417L
sb.""")
self.vs[1]["mm__"] = pickle.loads("""S'MT_post__toke'
p1
.""")
self.vs[1]["MT_label__"] = pickle.loads("""S'6'
.""")
self.vs[1]["GUID__"] = pickle.loads("""ccopy_reg
_reconstructor
p1
(cuuid
UUID
p2
c__builtin__
object
p3
NtRp4
(dp5
S'int'
p6
L13717354398703184989716447421721648986L
sb.""")
self.vs[2]["mm__"] = pickle.loads("""S'MT_post__Resource'
p1
.""")
self.vs[2]["MT_label__"] = pickle.loads("""S'4'
.""")
self.vs[2]["MT_post__name"] = pickle.loads("""S"@n#===============================================================================@n# You can access the value of the current node's attribute value by: attr_value.@n# You can access a matched node labelled n by: PreNode('n').@n# To access attribute x of node n, use: PreNode('n')['x'].@n# Note that the attribute values are those before the match is rewritten.@n# The order in which this code is executed depends on the label value of the encapsulating node.@n# The given action must return the new value of the attribute.@n#===============================================================================@n@nreturn attr_value@n"
p1
.""").replace("@n", "\n")
self.vs[2]["GUID__"] = pickle.loads("""ccopy_reg
_reconstructor
p1
(cuuid
UUID
p2
c__builtin__
object
p3
NtRp4
(dp5
S'int'
p6
L33827841268236302024623171411680384218L
sb.""")
self.vs[3]["mm__"] = pickle.loads("""S'MT_post__Process'
p1
.""")
self.vs[3]["MT_label__"] = pickle.loads("""S'1'
.""")
self.vs[3]["MT_post__name"] = pickle.loads("""S"@n#===============================================================================@n# You can access the value of the current node's attribute value by: attr_value.@n# You can access a matched node labelled n by: PreNode('n').@n# To access attribute x of node n, use: PreNode('n')['x'].@n# Note that the attribute values are those before the match is rewritten.@n# The order in which this code is executed depends on the label value of the encapsulating node.@n# The given action must return the new value of the attribute.@n#===============================================================================@n@nreturn attr_value@n"
p1
.""").replace("@n", "\n")
self.vs[3]["GUID__"] = pickle.loads("""ccopy_reg
_reconstructor
p1
(cuuid
UUID
p2
c__builtin__
object
p3
NtRp4
(dp5
S'int'
p6
L238350784918438927757042458975403903149L
sb.""")
self.vs[4]["mm__"] = pickle.loads("""S'MT_post__Process'
p1
.""")
self.vs[4]["MT_label__"] = pickle.loads("""S'2'
.""")
self.vs[4]["MT_post__name"] = pickle.loads("""S"@n#===============================================================================@n# You can access the value of the current node's attribute value by: attr_value.@n# You can access a matched node labelled n by: PreNode('n').@n# To access attribute x of node n, use: PreNode('n')['x'].@n# Note that the attribute values are those before the match is rewritten.@n# The order in which this code is executed depends on the label value of the encapsulating node.@n# The given action must return the new value of the attribute.@n#===============================================================================@n@nreturn attr_value@n"
p1
.""").replace("@n", "\n")
self.vs[4]["GUID__"] = pickle.loads("""ccopy_reg
_reconstructor
p1
(cuuid
UUID
p2
c__builtin__
object
p3
NtRp4
(dp5
S'int'
p6
L44996985395786454121306452830736141613L
sb.""")
from HPassRuleLHS import HPassRuleLHS
self.pre = HPassRuleLHS()
def action(self, PostNode, graph):
"""
Executable constraint code.
@param PostNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the rule has been applied.
# You can access a node labelled n matched by this rule by: PostNode('n').
# To access attribute x of node n, use: PostNode('n')['x'].
#===============================================================================
pass
def execute(self, packet, match):
"""
Transforms the current match of the packet according to the rule %s.
Pivots are also assigned, if any.
@param packet: The input packet.
@param match: The match to rewrite.
"""
graph = packet.graph
# Build a dictionary {label: node index} mapping each label of the pattern to a node in the graph to rewrite.
# Because of the uniqueness property of labels in a rule, we can store all LHS labels
# and subsequently add the labels corresponding to the nodes to be created.
labels = match.copy()
#===============================================================================
# Update attribute values
#===============================================================================
#===============================================================================
# Create new nodes
#===============================================================================
# MT_post__toke6
new_node = graph.add_node()
labels[6] = new_node
graph.vs[new_node][Himesis.Constants.META_MODEL] = 'toke'
#===============================================================================
# Create new edges
#===============================================================================
# MT_post__Resource4 -> MT_post__toke6
graph.add_edges((labels[4], labels[6]))
# MT_post__toke6 -> MT_post__Process2
graph.add_edges((labels[6], labels[2]))
#===============================================================================
# Delete nodes (this will automatically delete the adjacent edges)
#===============================================================================
# MT_pre__toke5
graph.delete_nodes([labels[5]])
#===============================================================================
# Set the output pivots
#===============================================================================
#===============================================================================
# Finally, perform the post-action
#===============================================================================
try:
self.action(lambda i: graph.vs[labels[i]], graph)
except Exception, e:
raise Exception('An error has occurred while applying the post-action', e)
| |
from copy import deepcopy
from csv import writer
from datetime import datetime
from io import BytesIO, StringIO
from mimetypes import guess_type
from os.path import join
from django.contrib import admin
from django.contrib.messages import info
from django.core.files.storage import FileSystemStorage
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import re_path
from django.utils.translation import gettext_lazy as _
from django.utils.translation import ngettext
from mezzanine.conf import settings
from mezzanine.core.admin import TabularDynamicInlineAdmin
from mezzanine.core.forms import DynamicInlineAdminForm
from mezzanine.forms.forms import EntriesForm
from mezzanine.forms.models import Field, FieldEntry, Form, FormEntry
from mezzanine.pages.admin import PageAdmin
from mezzanine.utils.static import static_lazy as static
from mezzanine.utils.urls import admin_url, slugify
fs = FileSystemStorage(location=settings.FORMS_UPLOAD_ROOT)
# Copy the fieldsets for PageAdmin and add the extra fields for FormAdmin.
form_fieldsets = deepcopy(PageAdmin.fieldsets)
form_fieldsets[0][1]["fields"][3:0] = ["content", "button_text", "response"]
form_fieldsets = list(form_fieldsets)
form_fieldsets.insert(
1,
(
_("Email"),
{
"fields": (
"send_email",
"email_from",
"email_copies",
"email_subject",
"email_message",
)
},
),
)
inline_field_excludes = []
if not settings.FORMS_USE_HTML5:
inline_field_excludes += ["placeholder_text"]
class FieldAdminInlineForm(DynamicInlineAdminForm):
def __init__(self, *args, **kwargs):
"""
Ensure the label and help_text fields are rendered as text inputs
instead of text areas.
"""
super().__init__(*args, **kwargs)
for name in self.fields:
# We just want to swap some textareas for inputs here, but
# there are some extra considerations for modeltranslation:
# 1) Form field names are suffixed with language,
# eg help_text_en, so we check for the name as a prefix.
# 2) At this point, modeltranslation has also monkey-patched
# on necessary CSS classes to the widget, so retain those.
if name.startswith("label") or name.startswith("help_text"):
css_class = self.fields[name].widget.attrs.get("class", None)
self.fields[name].widget = admin.widgets.AdminTextInputWidget()
if css_class:
self.fields[name].widget.attrs["class"] = css_class
class Meta:
model = Field
exclude = inline_field_excludes
class FieldAdmin(TabularDynamicInlineAdmin):
"""
Admin class for the form field. Inherits from TabularDynamicInlineAdmin to
add dynamic "Add another" link and drag/drop ordering.
"""
model = Field
form = FieldAdminInlineForm
class FormAdmin(PageAdmin):
"""
Admin class for the Form model. Includes the urls & views for exporting
form entries as CSV and downloading files uploaded via the forms app.
"""
class Media:
css = {"all": (static("mezzanine/css/admin/form.css"),)}
inlines = (FieldAdmin,)
list_display = (
"title",
"status",
"email_copies",
)
list_display_links = ("title",)
list_editable = ("status", "email_copies")
list_filter = ("status",)
search_fields = ("title", "content", "response", "email_from", "email_copies")
fieldsets = form_fieldsets
def get_urls(self):
"""
Add the entries view to urls.
"""
urls = super().get_urls()
extra_urls = [
re_path(
r"^(?P<form_id>\d+)/entries/$",
self.admin_site.admin_view(self.entries_view),
name="form_entries",
),
re_path(
r"^file/(?P<field_entry_id>\d+)/$",
self.admin_site.admin_view(self.file_view),
name="form_file",
),
]
return extra_urls + urls
def entries_view(self, request, form_id):
"""
Displays the form entries in a HTML table with option to
export as CSV file.
"""
if request.POST.get("back"):
change_url = admin_url(Form, "change", form_id)
return HttpResponseRedirect(change_url)
form = get_object_or_404(Form, id=form_id)
entries_form = EntriesForm(form, request, request.POST or None)
delete_entries_perm = "%s.delete_formentry" % FormEntry._meta.app_label
can_delete_entries = request.user.has_perm(delete_entries_perm)
submitted = entries_form.is_valid()
if submitted:
if request.POST.get("export"):
response = HttpResponse(content_type="text/csv")
timestamp = slugify(datetime.now().ctime())
fname = f"{form.slug}-{timestamp}.csv"
header = "attachment; filename=%s" % fname
response["Content-Disposition"] = header
queue = StringIO()
delimiter = settings.FORMS_CSV_DELIMITER
try:
csv = writer(queue, delimiter=delimiter)
writerow = csv.writerow
except TypeError:
queue = BytesIO()
delimiter = bytes(delimiter, encoding="utf-8")
csv = writer(queue, delimiter=delimiter)
writerow = lambda row: csv.writerow(
[c.encode("utf-8") if hasattr(c, "encode") else c for c in row]
)
writerow(entries_form.columns())
for row in entries_form.rows(csv=True):
writerow(row)
data = queue.getvalue()
response.write(data)
return response
elif request.POST.get("delete") and can_delete_entries:
selected = request.POST.getlist("selected")
if selected:
entries = FormEntry.objects.filter(id__in=selected)
count = entries.count()
if count > 0:
entries.delete()
message = ngettext(
"1 entry deleted", "%(count)s entries deleted", count
)
info(request, message % {"count": count})
template = "admin/forms/entries.html"
context = {
"title": _("View Entries"),
"entries_form": entries_form,
"opts": self.model._meta,
"original": form,
"can_delete_entries": can_delete_entries,
"submitted": submitted,
}
return render(request, template, context)
def file_view(self, request, field_entry_id):
"""
Output the file for the requested field entry.
"""
field_entry = get_object_or_404(FieldEntry, id=field_entry_id)
path = join(fs.location, field_entry.value)
response = HttpResponse(content_type=guess_type(path)[0])
with open(path, "r+b") as f:
response["Content-Disposition"] = "attachment; filename=%s" % f.name
response.write(f.read())
return response
admin.site.register(Form, FormAdmin)
| |
import json
from django.db import models, transaction
from django.core.exceptions import ValidationError
from django.contrib.contenttypes.fields import GenericRelation
from mezzanine.pages.page_processors import processor_for
from dominate.tags import legend, table, tbody, tr, td, th, h4, div, strong, form, button, _input
from rdflib import RDF, BNode, Literal
from rdflib.namespace import DCTERMS
from hs_core.hs_rdf import HSTERMS, rdf_terms
from hs_core.models import BaseResource, ResourceManager
from hs_core.models import resource_processor, CoreMetaData, AbstractMetaDataElement
from hs_core.hydroshare.utils import get_resource_file_name_and_extension, \
get_resource_files_by_extension
# Define original spatial coverage metadata info
@rdf_terms(HSTERMS.spatialReference)
class OriginalCoverage(AbstractMetaDataElement):
PRO_STR_TYPES = (
('', '---------'),
('WKT String', 'WKT String'),
('Proj4 String', 'Proj4 String')
)
term = 'OriginalCoverage'
"""
_value field stores a json string. The content of the json as box coverage info
_value = "{'northlimit':northenmost coordinate value,
'eastlimit':easternmost coordinate value,
'southlimit':southernmost coordinate value,
'westlimit':westernmost coordinate value,
'units:units applying to 4 limits (north, east, south & east),
'projection': name of the projection (optional)}"
"""
_value = models.CharField(max_length=1024, null=True)
projection_string_type = models.CharField(max_length=20, choices=PRO_STR_TYPES, null=True)
projection_string_text = models.TextField(null=True, blank=True)
datum = models.CharField(max_length=300, blank=True)
class Meta:
# OriginalCoverage element is not repeatable
unique_together = ("content_type", "object_id")
@property
def value(self):
return json.loads(self._value)
@classmethod
def ingest_rdf(cls, graph, subject, content_object):
for _, _, cov in graph.triples((subject, cls.get_class_term(), None)):
value = graph.value(subject=cov, predicate=RDF.value)
value_dict = {}
datum = ''
projection_string_text = ''
for key_value in value.split(";"):
key_value = key_value.strip()
k, v = key_value.split("=")
if k == 'datum':
datum = v
elif k == 'projection_string':
projection_string_text = v
elif k == 'projection_name':
value_dict['projection'] = v
elif k == 'projection_string_type':
projection_string_type = v
else:
value_dict[k] = v
OriginalCoverage.create(projection_string_type=projection_string_type,
projection_string_text=projection_string_text, _value=json.dumps(value_dict),
datum=datum, content_object=content_object)
def rdf_triples(self, subject, graph):
coverage = BNode()
graph.add((subject, self.get_class_term(), coverage))
graph.add((coverage, RDF.type, DCTERMS.box))
value_dict = {}
for k, v in self.value.items():
if k == 'projection':
value_dict['projection_name'] = v
else:
value_dict[k] = v
value_dict['datum'] = self.datum
value_dict['projection_string'] = self.projection_string_text
value_dict['projection_string_type'] = self.projection_string_type
value_string = "; ".join(["=".join([key, str(val)]) for key, val in value_dict.items()])
graph.add((coverage, RDF.value, Literal(value_string)))
@classmethod
def create(cls, **kwargs):
"""
The '_value' subelement needs special processing. (Check if the 'value' includes the
required information and convert 'value' dict as Json string to be the '_value'
subelement value.) The base class create() can't do it.
:param kwargs: the 'value' in kwargs should be a dictionary
the '_value' in kwargs is a serialized json string
"""
value_arg_dict = None
if 'value' in kwargs:
value_arg_dict = kwargs['value']
elif '_value' in kwargs:
value_arg_dict = json.loads(kwargs['_value'])
if value_arg_dict:
# check that all the required sub-elements exist and create new original coverage meta
for value_item in ['units', 'northlimit', 'eastlimit', 'southlimit', 'westlimit']:
if value_item not in value_arg_dict:
raise ValidationError("For original coverage meta, one or more bounding "
"box limits or 'units' is missing.")
value_dict = {k: v for k, v in list(value_arg_dict.items())
if k in ('units', 'northlimit', 'eastlimit', 'southlimit',
'westlimit', 'projection')}
cls._validate_bounding_box(value_dict)
value_json = json.dumps(value_dict)
if 'value' in kwargs:
del kwargs['value']
kwargs['_value'] = value_json
return super(OriginalCoverage, cls).create(**kwargs)
else:
raise ValidationError('Coverage value is missing.')
@classmethod
def update(cls, element_id, **kwargs):
"""
The '_value' subelement needs special processing. (Convert 'value' dict as Json string
to be the '_value' subelement value) and the base class update() can't do it.
:param kwargs: the 'value' in kwargs should be a dictionary
"""
ori_cov = OriginalCoverage.objects.get(id=element_id)
if 'value' in kwargs:
value_dict = ori_cov.value
for item_name in ('units', 'northlimit', 'eastlimit', 'southlimit',
'westlimit', 'projection'):
if item_name in kwargs['value']:
value_dict[item_name] = kwargs['value'][item_name]
cls._validate_bounding_box(value_dict)
value_json = json.dumps(value_dict)
del kwargs['value']
kwargs['_value'] = value_json
super(OriginalCoverage, cls).update(element_id, **kwargs)
@classmethod
def _validate_bounding_box(cls, box_dict):
for limit in ('northlimit', 'eastlimit', 'southlimit', 'westlimit'):
try:
box_dict[limit] = float(box_dict[limit])
except ValueError:
raise ValidationError("Bounding box data is not numeric")
@classmethod
def get_html_form(cls, resource, element=None, allow_edit=True, file_type=False):
"""Generates html form code for this metadata element so that this element can be edited"""
from .forms import OriginalCoverageForm
ori_coverage_data_dict = dict()
if element is not None:
ori_coverage_data_dict['projection'] = element.value.get('projection', None)
ori_coverage_data_dict['datum'] = element.datum
ori_coverage_data_dict['projection_string_type'] = element.projection_string_type
ori_coverage_data_dict['projection_string_text'] = element.projection_string_text
ori_coverage_data_dict['units'] = element.value['units']
ori_coverage_data_dict['northlimit'] = element.value['northlimit']
ori_coverage_data_dict['eastlimit'] = element.value['eastlimit']
ori_coverage_data_dict['southlimit'] = element.value['southlimit']
ori_coverage_data_dict['westlimit'] = element.value['westlimit']
originalcov_form = OriginalCoverageForm(
initial=ori_coverage_data_dict, allow_edit=allow_edit,
res_short_id=resource.short_id if resource else None,
element_id=element.id if element else None, file_type=file_type)
return originalcov_form
def get_html(self, pretty=True):
"""Generates html code for displaying data for this metadata element"""
root_div = div(cls='content-block')
def get_th(heading_name):
return th(heading_name, cls="text-muted")
with root_div:
legend('Spatial Reference')
if self.value.get('projection', ''):
div('Coordinate Reference System', cls='text-muted')
div(self.value.get('projection', ''))
if self.datum:
div('Datum', cls='text-muted space-top')
div(self.datum)
if self.projection_string_type:
div('Coordinate String Type', cls='text-muted space-top')
div(self.projection_string_type)
if self.projection_string_text:
div('Coordinate String Text', cls='text-muted space-top')
div(self.projection_string_text)
h4('Extent', cls='space-top')
with table(cls='custom-table'):
with tbody():
with tr():
get_th('North')
td(self.value['northlimit'])
with tr():
get_th('West')
td(self.value['westlimit'])
with tr():
get_th('South')
td(self.value['southlimit'])
with tr():
get_th('East')
td(self.value['eastlimit'])
with tr():
get_th('Unit')
td(self.value['units'])
return root_div.render(pretty=pretty)
# Define netCDF variable metadata
class Variable(AbstractMetaDataElement):
# variable types are defined in OGC enhanced_data_model_extension_standard
# left is the given value stored in database right is the value for the drop down list
VARIABLE_TYPES = (
('Char', 'Char'), # 8-bit byte that contains uninterpreted character data
('Byte', 'Byte'), # integer(8bit)
('Short', 'Short'), # signed integer (16bit)
('Int', 'Int'), # signed integer (32bit)
('Float', 'Float'), # floating point (32bit)
('Double', 'Double'), # floating point(64bit)
('Int64', 'Int64'), # integer(64bit)
('Unsigned Byte', 'Unsigned Byte'),
('Unsigned Short', 'Unsigned Short'),
('Unsigned Int', 'Unsigned Int'),
('Unsigned Int64', 'Unsigned Int64'),
('String', 'String'), # variable length character string
('User Defined Type', 'User Defined Type'), # compound, vlen, opaque, enum
('Unknown', 'Unknown')
)
term = 'Variable'
# required variable attributes
name = models.CharField(max_length=1000)
unit = models.CharField(max_length=1000)
type = models.CharField(max_length=1000, choices=VARIABLE_TYPES)
shape = models.CharField(max_length=1000)
# optional variable attributes
descriptive_name = models.CharField(max_length=1000, null=True, blank=True,
verbose_name='long name')
method = models.TextField(null=True, blank=True, verbose_name='comment')
missing_value = models.CharField(max_length=1000, null=True, blank=True)
def __unicode__(self):
return self.name
@classmethod
def remove(cls, element_id):
raise ValidationError("The variable of the resource can't be deleted.")
def get_html(self, pretty=True):
"""Generates html code for displaying data for this metadata element"""
root_div = div(cls="content-block")
def get_th(heading_name):
return th(heading_name, cls="text-muted")
with root_div:
with div(cls="custom-well"):
strong(self.name)
with table(cls='custom-table'):
with tbody():
with tr():
get_th('Unit')
td(self.unit)
with tr():
get_th('Type')
td(self.type)
with tr():
get_th('Shape')
td(self.shape)
if self.descriptive_name:
with tr():
get_th('Long Name')
td(self.descriptive_name)
if self.missing_value:
with tr():
get_th('Missing Value')
td(self.missing_value)
if self.method:
with tr():
get_th('Comment')
td(self.method)
return root_div.render(pretty=pretty)
# TODO Deprecated
class NetcdfResource(BaseResource):
objects = ResourceManager("NetcdfResource")
@classmethod
def get_metadata_class(cls):
return NetcdfMetaData
@classmethod
def get_supported_upload_file_types(cls):
# only file with extension .nc is supported for uploading
return (".nc",)
@classmethod
def allow_multiple_file_upload(cls):
# can upload only 1 file
return False
@classmethod
def can_have_multiple_files(cls):
# can have only 1 file
return False
# add resource-specific HS terms
def get_hs_term_dict(self):
# get existing hs_term_dict from base class
hs_term_dict = super(NetcdfResource, self).get_hs_term_dict()
# add new terms for NetCDF res
hs_term_dict["HS_FILE_NAME"] = ""
for res_file in self.files.all():
_, f_fullname, f_ext = get_resource_file_name_and_extension(res_file)
if f_ext.lower() == '.nc':
hs_term_dict["HS_FILE_NAME"] = f_fullname
break
return hs_term_dict
def update_netcdf_file(self, user):
self.metadata.refresh_from_db()
if not self.metadata.is_dirty:
return
nc_res_file = get_resource_files_by_extension(self, ".nc")
txt_res_file = get_resource_files_by_extension(self, ".txt")
from hs_file_types.models.netcdf import netcdf_file_update # avoid recursive import
if nc_res_file and txt_res_file:
netcdf_file_update(self, nc_res_file[0], txt_res_file[0], user)
discovery_content_type = 'Multidimensional (NetCDF)' # used during discovery
class Meta:
verbose_name = 'Multidimensional (NetCDF)'
proxy = True
processor_for(NetcdfResource)(resource_processor)
class NetCDFMetaDataMixin(models.Model):
"""This class must be the first class in the multi-inheritance list of classes"""
variables = GenericRelation(Variable)
ori_coverage = GenericRelation(OriginalCoverage)
class Meta:
abstract = True
@property
def originalCoverage(self):
return self.ori_coverage.all().first()
def has_all_required_elements(self):
# checks if all required metadata elements have been created
if not super(NetCDFMetaDataMixin, self).has_all_required_elements():
return False
if not self.variables.all():
return False
if not (self.coverages.all().filter(type='box').first() or
self.coverages.all().filter(type='point').first()):
return False
if not self.originalCoverage:
return False
return True
def get_required_missing_elements(self):
# get a list of missing required metadata element names
missing_required_elements = super(NetCDFMetaDataMixin, self).get_required_missing_elements()
if not (self.coverages.all().filter(type='box').first() or
self.coverages.all().filter(type='point').first()):
missing_required_elements.append('Spatial Coverage')
if not self.variables.all().first():
missing_required_elements.append('Variable')
if not self.originalCoverage:
missing_required_elements.append('Spatial Reference')
return missing_required_elements
def delete_all_elements(self):
super(NetCDFMetaDataMixin, self).delete_all_elements()
self.ori_coverage.all().delete()
self.variables.all().delete()
@classmethod
def get_supported_element_names(cls):
# get the class names of all supported metadata elements for this resource type
# or file type
elements = super(NetCDFMetaDataMixin, cls).get_supported_element_names()
# add the name of any additional element to the list
elements.append('Variable')
elements.append('OriginalCoverage')
return elements
# define the netcdf metadata
class NetcdfMetaData(NetCDFMetaDataMixin, CoreMetaData):
is_dirty = models.BooleanField(default=False)
@property
def resource(self):
return NetcdfResource.objects.filter(object_id=self.id).first()
@property
def serializer(self):
"""Return an instance of rest_framework Serializer for self """
from .serializers import NetCDFMetaDataSerializer
return NetCDFMetaDataSerializer(self)
@classmethod
def parse_for_bulk_update(cls, metadata, parsed_metadata):
"""Overriding the base class method"""
CoreMetaData.parse_for_bulk_update(metadata, parsed_metadata)
keys_to_update = list(metadata.keys())
if 'originalcoverage' in keys_to_update:
parsed_metadata.append({"originalcoverage": metadata.pop('originalcoverage')})
if 'variables' in keys_to_update:
for variable in metadata.pop('variables'):
parsed_metadata.append({"variable": variable})
def set_dirty(self, flag):
"""
Overriding the base class method
"""
if self.resource.files.all():
self.is_dirty = flag
self.save()
def update(self, metadata, user):
# overriding the base class update method for bulk update of metadata
from .forms import VariableValidationForm, OriginalCoverageValidationForm
super(NetcdfMetaData, self).update(metadata, user)
missing_file_msg = "Resource specific metadata can't be updated when there is no " \
"content files"
with transaction.atomic():
# update/create non-repeatable element (originalcoverage)
for dict_item in metadata:
if 'originalcoverage' in dict_item:
if not self.resource.files.all():
raise ValidationError(missing_file_msg)
coverage_data = dict_item['originalcoverage']
for key in ('datum', 'projection_string_type', 'projection_string_text'):
coverage_data.pop(key, None)
if 'value' not in coverage_data:
raise ValidationError("Coverage value data is missing")
if 'projection' in coverage_data['value']:
coverage_data['value'].pop('projection')
coverage_value_dict = coverage_data.pop('value')
validation_form = OriginalCoverageValidationForm(coverage_value_dict)
coverage_data['value'] = coverage_value_dict
if not validation_form.is_valid():
err_string = self.get_form_errors_as_string(validation_form)
raise ValidationError(err_string)
if self.originalCoverage:
self.update_element('originalcoverage', self.originalCoverage.id,
**coverage_data)
else:
self.create_element('originalcoverage', **coverage_data)
break
# update repeatable element (variable)
for dict_item in metadata:
if 'variable' in dict_item:
if not self.resource.files.all():
raise ValidationError(missing_file_msg)
variable_data = dict_item['variable']
if 'name' not in variable_data:
raise ValidationError("Invalid variable data")
# find the matching (lookup by name) variable element to update
var_element = self.variables.filter(name=variable_data['name']).first()
if var_element is None:
raise ValidationError("No matching variable element was found")
for key in ('name', 'type', 'shape'):
variable_data.pop(key, None)
variable_data['name'] = var_element.name
variable_data['type'] = var_element.type
variable_data['shape'] = var_element.shape
if 'unit' not in variable_data:
variable_data['unit'] = var_element.unit
validation_form = VariableValidationForm(variable_data)
if not validation_form.is_valid():
err_string = self.get_form_errors_as_string(validation_form)
raise ValidationError(err_string)
self.update_element('variable', var_element.id, **variable_data)
# write updated metadata to netcdf file
self.resource.update_netcdf_file(user)
def update_element(self, element_model_name, element_id, **kwargs):
super(NetcdfMetaData, self).update_element(element_model_name, element_id, **kwargs)
if self.resource.files.all() and element_model_name in ['variable', 'title', 'description',
'rights', 'coverage',
'relation', 'creator',
'contributor']:
if element_model_name != 'relation':
self.is_dirty = True
elif kwargs.get('type', None) in ('references', 'source'):
self.is_dirty = True
self.save()
def create_element(self, element_model_name, **kwargs):
element = super(NetcdfMetaData, self).create_element(element_model_name, **kwargs)
if self.resource.files.all() and element_model_name in ['description', 'subject',
'coverage', 'relation', 'creator',
'contributor']:
if element_model_name != 'relation':
self.is_dirty = True
elif kwargs.get('type', None) in ('references', 'source'):
self.is_dirty = True
self.save()
return element
def delete_element(self, element_model_name, element_id):
super(NetcdfMetaData, self).delete_element(element_model_name, element_id)
if self.resource.files.all() and element_model_name in ['source', 'contributor', 'creator',
'relation']:
self.is_dirty = True
self.save()
def get_update_netcdf_file_html_form(self):
form_action = "/hsapi/_internal/netcdf_update/{}/".\
format(self.resource.short_id)
style = "display:none;"
root_div = div(id="netcdf-file-update", cls="space-bottom", style=style)
with root_div:
with div(cls="col-sm-12"):
with div(cls="alert alert-warning alert-dismissible", role="alert"):
div("NetCDF file needs to be synced with metadata changes.", cls='space-bottom')
_input(id="metadata-dirty", type="hidden", value="{{ cm.metadata.is_dirty }}")
with form(action=form_action, method="post", id="update-netcdf-file",):
div('{% csrf_token %}')
button("Update NetCDF File", type="submit", cls="btn btn-primary",
id="id-update-netcdf-file",
)
return root_div
| |
#!/usr/bin/env python
# Copyright 2021 Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Assuming pre-requisites are in place (from running
`build_scripts/ios/install_prereqs.sh`), this builds the firebase cpp sdk for
ios and tvos.
It does the following,
- Build ios and tvos libraries via cmake (after downloading corresponding
cocoapods).
- Create a "universal" framework if all platforms and architectures were built.
- Create "xcframeworks" combining both ios and tvos frameworks.
Usage examples:
# Build all supported targets and architectures on all platforms.
python3 scripts/gha/build_ios_tvos.py
# Build specific targets
python3 scripts/gha/build_ios_tvos.py -t firebase_auth firebase_database
# Build for specific architectures
python3 scripts/gha/build_ios_tvos.py -a arm64 -t firebase_remote_config
"""
import argparse
from collections import defaultdict
import logging
import multiprocessing
import os
import shutil
import subprocess
import utils
# Configuration for supported os's, and platforms.
CONFIG = {
'ios': {
'supported_targets' : ('firebase_admob', 'firebase_analytics',
'firebase_auth', 'firebase_database',
'firebase_dynamic_links', 'firebase_firestore',
'firebase_functions', 'firebase_installations',
'firebase_messaging', 'firebase_remote_config',
'firebase_storage'),
'device': {
'architectures' : ('arm64', 'armv7'),
'toolchain' : 'cmake/toolchains/ios.cmake',
},
'simulator': {
'architectures' : ('arm64', 'x86_64', 'i386'),
'toolchain': 'cmake/toolchains/ios_simulator.cmake',
}
},
'tvos': {
'supported_targets' : ('firebase_auth', 'firebase_analytics',
'firebase_database', 'firebase_firestore',
'firebase_functions', 'firebase_installations',
'firebase_messaging', 'firebase_remote_config',
'firebase_storage'),
'device': {
'architectures' : ('arm64',),
'toolchain' : 'cmake/toolchains/apple.toolchain.cmake',
'toolchain_platform': 'TVOS',
},
'simulator': {
'architectures' : ('x86_64',),
'toolchain' : 'cmake/toolchains/apple.toolchain.cmake',
'toolchain_platform': 'SIMULATOR_TVOS'
}
},
}
def arrange_frameworks(archive_output_path):
"""Rename frameworks and remove unnecessary files.
Args:
archive_output_path (str): Output path containing frameworks.
Subdirectories should be the various target frameworks.
"""
archive_output_dir_entries = os.listdir(archive_output_path)
if not 'firebase.framework' in archive_output_dir_entries:
# Rename firebase_app path to firebase path
old_dir = os.path.join(archive_output_path, 'firebase_app.framework')
new_dir = os.path.join(archive_output_path, 'firebase.framework')
logging.info('Renaming {0} to {1}'.format(old_dir, new_dir))
os.rename(old_dir, new_dir)
# Rename firebase_app library to firebase library
logging.info('Renaming firebase_app library to firebase')
os.rename(os.path.join(new_dir, 'firebase_app'),
os.path.join(new_dir, 'firebase'),)
# Delete all non framework directories
for entry in archive_output_dir_entries:
if not entry.endswith('.framework'):
entry_absolute_path = os.path.join(archive_output_path, entry)
if os.path.isdir(entry_absolute_path):
logging.info('Deleting unnecessary path ' + entry_absolute_path)
shutil.rmtree(entry_absolute_path)
else:
logging.info('Deleting unnecessary file ' + entry_absolute_path)
os.remove(entry_absolute_path)
# Delete all useless Info.plist
for root, dirs, files in os.walk(archive_output_path):
for f in files:
if f == 'Info.plist':
logging.info('Deleting unnecessary Info.plist file ' +
os.path.join(root, f))
os.remove(os.path.join(root, f))
def build_universal_framework(frameworks_path, targets):
"""Create universal frameworks if possible.
If all architectures (eg: arm64, armv7 etc) and platforms (device, simulator)
were built, combine all of the libraries into a single universal framework.
Args:
frameworks_path (str): Root path containing subdirectories for each
operating system and its frameworks.
targets iterable(str): List of firebase libraries to process.
(eg: [firebase_auth, firebase_remote_config])
Eg: <build_dir>/frameworks <------------- <frameworks_path>
- ios
- device-arm64
- firebase.framework
- firebase_admob.framework
...
- simulator-i386
...
- tvos
- device-arm64
- firebase.framework
- firebase_admob.framework
...
- simulator-x86_64
...
Output: <build_dir>/frameworks
- ios
- device-arm64
...
- simulator-i386
...
...
- universal <-------------- Newly created
- firebase.framework
- firebase_admob.framework
...
- tvos
- device-arm64
...
- simulator-x86_64
...
...
- universal <-------------- Newly created
- firebase.framework
- firebase_admob.framework
...
"""
for apple_os in os.listdir(frameworks_path):
logging.info('Building universal framework for {0}'.format(apple_os))
framework_os_path = os.path.join(frameworks_path, apple_os)
# Extract list of all built platform-architecture combinations into a map.
# Map looks like this,
# {'device': ['arm64', 'armv7'], 'simulator': ['x86_64']}
platform_variant_architecture_dirs = os.listdir(framework_os_path)
platform_variant_arch_map = defaultdict(list)
for variant_architecture in platform_variant_architecture_dirs:
logging.debug('Inspecting ' + variant_architecture)
platform_variant, architecture = variant_architecture.split('-')
platform_variant_arch_map[platform_variant].append(architecture)
build_universal = True
for platform in platform_variant_arch_map:
logging.debug('Found architectures for platform '
'{0}: {1}'.format(platform,
' '.join(platform_variant_arch_map[platform])))
missing_architectures = set(CONFIG[apple_os][platform]['architectures']) \
- set(platform_variant_arch_map[platform])
if missing_architectures:
logging.error('Following architectures are missing for platform variant'
'{0}: {1}'.format(platform_variant, ' '.join(missing_architectures)))
build_universal = False
break
if not build_universal:
logging.error('Missing some supported architectures. Skipping universal '
'framework creation')
return
# Pick any of the platform-arch directories as a reference candidate mainly
# for obtaining a list of contained targets.
reference_dir_path = os.path.join(framework_os_path,
platform_variant_architecture_dirs[0])
logging.debug('Using {0} as reference path for scanning '
'targets'.format(reference_dir_path))
# Filter only .framework directories and make sure the framework is
# in list of supported targets.
target_frameworks = [x for x in os.listdir(reference_dir_path)
if x.endswith('.framework') and
x.split('.')[0] in targets]
logging.debug('Targets found: {0}'.format(' '.join(target_frameworks)))
# Collect a list of libraries from various platform-arch combinations for
# each target and build a universal framework using lipo.
for target_framework in target_frameworks:
target_libraries = []
# Eg: split firebase_auth.framework -> firebase_auth, .framework
target, _ = os.path.splitext(target_framework)
for variant_architecture_dir in platform_variant_architecture_dirs:
# Since we have arm64 for both device and simulator, lipo cannot combine
# them in the same fat file. We ignore simulator-arm64.
if variant_architecture_dir == 'simulator-arm64':
continue
# <build_dir>/<apple_os>/frameworks/<platform-arch>/
# <target>.framework/target
library_path = os.path.join(framework_os_path,
variant_architecture_dir,
target_framework, target)
target_libraries.append(library_path)
# <build_dir>/<apple_os>/frameworks/universal/<target>.framework
universal_target_path = os.path.join(framework_os_path, 'universal',
target_framework)
logging.debug('Ensuring all directories exist: ' + universal_target_path)
os.makedirs(universal_target_path)
# <build_dir>/<apple_os>/frameworks/universal/<target>.framework/<target>
universal_target_library_path = os.path.join(universal_target_path,
target)
# lipo -create <lib1> <lib2> <lib3> .. -output <universal_lib>
cmd = ['lipo', '-create']
cmd.extend(target_libraries)
cmd.append('-output')
cmd.append(universal_target_library_path)
logging.info('Creating universal framework at' +
universal_target_library_path)
utils.run_command(cmd)
# Copy headers from platform specific firebase.framework to newly created
# universal firebase.framework.
firebase_framework_headers_path = os.path.join(reference_dir_path,
'firebase.framework',
'Headers')
universal_firebase_framework_headers_path = os.path.join(
framework_os_path,
'universal',
'firebase.framework',
'Headers')
shutil.copytree(firebase_framework_headers_path,
universal_firebase_framework_headers_path)
def build_xcframeworks(frameworks_path, xcframeworks_path, template_info_plist,
targets):
"""Build xcframeworks combining libraries for different operating systems.
Combine frameworks for different operating systems (ios, tvos), architectures
(arm64, armv7, x86_64 etc) per platform variant (device, simulator).
This makes it super convenient for developers to use a single deliverable in
XCode and develop for multiple platforms/operating systems in one project.
Args:
frameworks_path (str): Absolute path to path containing frameworks.
xcframeworks_path (str): Absolute path to create xcframeworks in.
template_info_plist (str): Absolute path to a template Info.plist that
will be copied over to each xcframework and provides metadata to XCode.
targets iterable(str): List of firebase target libraries.
(eg: [firebase_auth, firebase_remote_config])
Eg: <build_dir>/frameworks <------------- <frameworks_path>
- ios <---------- <frameworks_os_path>
- device-arm64
- firebase.framework
- firebase_admob.framework
...
- simulator-i386
...
- tvos
- device-arm64
- firebase.framework
- firebase_admob.framework
...
- simulator-x86_64
...
Output: <build_dir>/xcframeworks <----------- <xcframeworks_path>
- firebase.xcframework
- Info.plist <----------- <Info.plist file>
- ios-arm64_armv7 <-- <all_libraries_for_ios_device>
- firebase.framework
- firebase <---- <library>
- Headers <---- <all_include_headers>
- ios-arm64_i386_x86_64-simulator <--- <for_ios_simulator>
- firebase.framework
- firebase
- Headers
- tvos-arm64 <- <all_libraries_for_tvos_device>
- firebase.framework
- firebase
- Headers
...
...
- firebase_auth.xcframework <-- <firebase_auth target>
- Info.plist
- ios-arm64_armv7
- firebase_auth.framework
- firebase_auth
- ios-arm64_i386_x86_64-simulator
- firebase_auth.framework
- firebase_auth
- tvos-arm64
- firebase.framework
- firebase
- Headers
...
...
...
"""
for apple_os in os.listdir(frameworks_path):
framework_os_path = os.path.join(frameworks_path, apple_os)
platform_variant_architecture_dirs = os.listdir(framework_os_path)
# Extract list of all built platform-architecture combinations into a map.
# Map looks like this,
# {'device': ['arm64', 'armv7'], 'simulator': ['x86_64']}
platform_variant_arch_map = defaultdict(list)
for variant_architecture in platform_variant_architecture_dirs:
# Skip directories not of the format platform-arch (eg: universal)
if not '-' in variant_architecture:
continue
platform_variant, architecture = variant_architecture.split('-')
platform_variant_arch_map[platform_variant].append(architecture)
reference_dir_path = os.path.join(framework_os_path,
platform_variant_architecture_dirs[0])
logging.debug('Using {0} as reference path for scanning '
'targets'.format(reference_dir_path))
# Filter only .framework directories and make sure the framework is
# in list of supported targets.
target_frameworks = [x for x in os.listdir(reference_dir_path)
if x.endswith('.framework') and
x.split('.')[0] in targets]
logging.debug('Targets found: {0}'.format(' '.join(target_frameworks)))
# For each target, we collect all libraries for a specific platform variants
# (device or simulator) and os (ios or tvos).
for target_framework in target_frameworks:
target_libraries = []
# Eg: split firebase_auth.framework -> firebase_auth, .framework
target, _ = os.path.splitext(target_framework)
xcframework_target_map = {}
for platform_variant in platform_variant_arch_map:
architectures = platform_variant_arch_map[platform_variant]
xcframework_libraries = []
for architecture in architectures:
# <build_dir>/<apple_os>/frameworks/<platform-arch>/
# <target>.framework/target
library_path = os.path.join(framework_os_path,
'{0}-{1}'.format(platform_variant,
architecture), target_framework, target)
xcframework_libraries.append(library_path)
xcframework_key_parts = [apple_os]
xcframework_key_parts.append('_'.join(sorted(architectures)))
if platform_variant != 'device':
# device is treated as default platform variant and we do not add any
# suffix at the end. For all other variants, add them as suffix.
xcframework_key_parts.append(platform_variant)
# Eg: ios-arm64_armv7, tvos-x86_64-simulator
xcframework_key = '-'.join(xcframework_key_parts)
# <build_dir>/xcframeworks/<target>.xcframework/<os>-<list_of_archs>/
# <target>.framework
library_output_dir = os.path.join(xcframeworks_path,
'{0}.xcframework'.format(target),
xcframework_key,
'{0}.framework'.format(target))
logging.debug('Ensuring all directories exist: ' + library_output_dir)
os.makedirs(library_output_dir)
cmd = ['lipo', '-create']
cmd.extend(xcframework_libraries)
cmd.append('-output')
cmd.append(os.path.join(library_output_dir, target))
logging.info('Creating xcframework at' +
os.path.join(library_output_dir, target))
utils.run_command(cmd)
# <build_dir>/xcframeworks/<target>.xcframework
target_xcframeworks_path = os.path.join(xcframeworks_path,
'{0}.xcframework'.format(target))
# Create Info.plist for xcframework
dest_path = os.path.join(target_xcframeworks_path, 'Info.plist')
logging.info('Copying template {0}'.format(template_info_plist))
shutil.copy(template_info_plist, dest_path)
contents = None
# Replace token LIBRARY_PATH with current target framework.
with open(dest_path, 'r') as info_plist_file:
contents = info_plist_file.read()
if contents:
logging.debug('Updating LIBRARY_PATH with '
'{0}.framework'.format(target))
contents = contents.replace('LIBRARY_PATH',
'{0}.framework'.format(target))
with open(dest_path, 'w') as info_plist_file:
info_plist_file.write(contents)
# Copy Headers for firebase.xcframework from firebase.framework.
# Using a random platform specific firebase.framework.
firebase_framework_headers_path = os.path.join(reference_dir_path,
'firebase.framework',
'Headers')
firebase_xcframework_path = os.path.join(xcframeworks_path,
'firebase.xcframework')
for xcframework_key in os.listdir(firebase_xcframework_path):
if os.path.isfile(os.path.join(firebase_xcframework_path,
xcframework_key)):
continue
dest_headers_path = os.path.join(firebase_xcframework_path,
xcframework_key,
'firebase.framework', 'Headers')
if os.path.exists(dest_headers_path):
continue
logging.info('Copying {0} to {1}'.format(firebase_framework_headers_path,
dest_headers_path))
shutil.copytree(firebase_framework_headers_path,
dest_headers_path)
def cmake_configure(source_path, build_path, toolchain, archive_output_path,
architecture=None, toolchain_platform=None):
"""CMake configure which sets up the build project.
Args:
source_path (str): Source directory containing top level CMakeLists.txt.
build_path (str): CMake build path (where project is built).
toolchain (str): Path to CMake toolchain file. Differs based on os and/or
platform.
archive_output_path (str): Path to build and save libraries/frameworks to.
targets (list(str)): CMake build targets. (eg: firebase_auth, etc)
architecture (str, optional): Architecture passed onto the cmake build
system. Used when building for ios only. (eg:'arm64', 'x86_64')
toolchain_platform (str, optional): Platform cmake option passed for tvos
builds only. Accepts all platforms supported by the tvos toolchain.
(eg: 'TVOS', 'SIMULATOR_TVOS' etc)
"""
cmd = ['cmake', '-S', source_path, '-B', build_path]
cmd.append('-DCMAKE_TOOLCHAIN_FILE={0}'.format(toolchain))
cmd.append('-DCMAKE_ARCHIVE_OUTPUT_DIRECTORY={0}'.format(archive_output_path))
if architecture:
cmd.append('-DCMAKE_OSX_ARCHITECTURES={0}'.format(architecture))
elif toolchain_platform:
cmd.append('-DPLATFORM={0}'.format(toolchain_platform))
utils.run_command(cmd)
def cmake_build(build_path, targets):
"""CMake build which builds all libraries.
Args:
build_path (str): CMake build path (where project is built).
targets (list(str)): CMake build targets. (eg: firebase_auth, etc)
"""
# CMake build for os-platform-architecture
cmd = ['cmake', '--build', build_path]
cmd.append('--target')
cmd.extend(targets)
utils.run_command(cmd)
def main():
args = parse_cmdline_args()
if not os.path.isabs(args.build_dir):
args.build_dir = os.path.abspath(args.build_dir)
if not os.path.isabs(args.source_dir):
args.source_dir = os.path.abspath(args.source_dir)
frameworks_path = os.path.join(args.build_dir, 'frameworks')
# List of cmake build process that we will be launched in parallel.
processes = []
for apple_os in args.os:
logging.info("Building for {0}".format(apple_os))
os_config = CONFIG.get(apple_os)
if not os_config:
raise ValueError('OS {0} not supported for building.'.format(apple_os))
targets_from_config = set(os_config['supported_targets'])
supported_targets = targets_from_config.intersection(args.target)
if not supported_targets:
raise ValueError('No supported targets found for {0}'.format(apple_os))
frameworks_os_path = os.path.join(frameworks_path, apple_os)
for platform_variant in args.platform_variant:
os_platform_variant_config = os_config.get(platform_variant)
if not os_platform_variant_config:
raise ValueError('Could not find configuration for platform '
'{0} for os {1}'.format(platform_variant, apple_os))
archs_from_config = set(os_platform_variant_config['architectures'])
supported_archs = archs_from_config.intersection(args.architecture)
if not supported_archs:
raise ValueError('Could not find valid architectures for platform '
'{0} for os {1}'.format(platform_variant, apple_os))
for architecture in supported_archs:
platform_architecture_token = '{0}-{1}'.format(platform_variant,
architecture)
archive_output_path = os.path.join(frameworks_os_path,
platform_architecture_token)
# Eg: <build_dir>/tvos_cmake_build/device-arm64
build_path = os.path.join(args.build_dir,
'{0}_cmake_build'.format(apple_os),
platform_architecture_token)
# For ios builds, we specify architecture to cmake configure.
architecture = architecture if apple_os == 'ios' else None
# For tvos builds, we pass a special cmake option PLATFORM to toolchain.
toolchain_platform = os_platform_variant_config['toolchain_platform'] \
if apple_os == 'tvos' else None
# CMake configure was having all sorts of issues when run in parallel.
# It might be the Cocoapods that are downloaded in parallel into a
# single cache directory.
cmake_configure(args.source_dir, build_path,
os_platform_variant_config['toolchain'],
archive_output_path, architecture,
toolchain_platform)
process = multiprocessing.Process(target=cmake_build,
args=(build_path, supported_targets))
processes.append((process, archive_output_path))
# Launch all cmake build processes in parallel.
for process, _ in processes:
process.start()
for process, archive_output_path in processes:
process.join()
# Reorganize frameworks (renaming, copying over headers etc)
arrange_frameworks(archive_output_path)
# Since we renamed firebase_app.framework to firebase.framework we add that
# to our list of targets.
targets = set(args.target)
targets.add('firebase')
# if we built for all architectures build universal framework as well.
build_universal_framework(frameworks_path, targets)
# Build xcframeworks
xcframeworks_path = os.path.join(args.build_dir, 'xcframeworks')
template_info_plist_path = os.path.join(args.source_dir, 'build_scripts',
'tvos', 'Info_ios_and_tvos.plist')
build_xcframeworks(frameworks_path, xcframeworks_path,
template_info_plist_path, targets)
def parse_cmdline_args():
parser = argparse.ArgumentParser(description='Build for iOS and tvOS.')
parser.add_argument('-b', '--build_dir',
default='ios_tvos_build', help='Name of build directory.')
parser.add_argument('-s', '--source_dir',
default=os.getcwd(),
help='Directory containing source code (top level CMakeLists.txt)')
parser.add_argument('-v', '--platform_variant', nargs='+',
default=('device', 'simulator'),
help='List of platforms to build for.')
parser.add_argument('-a', '--architecture', nargs='+',
default=('arm64', 'armv7', 'x86_64', 'i386'),
help='List of architectures to build for.')
parser.add_argument('-t', '--target', nargs='+',
default=('firebase_admob', 'firebase_analytics',
'firebase_auth', 'firebase_database',
'firebase_dynamic_links', 'firebase_firestore',
'firebase_functions', 'firebase_installations',
'firebase_messaging', 'firebase_remote_config',
'firebase_storage'),
help='List of CMake build targets')
parser.add_argument('-o', '--os', nargs='+', default=('ios', 'tvos'),
help='List of operating systems to build for.')
parser.add_argument('--log_level', default='info',
help="Logging level (debug, warning, info)")
args = parser.parse_args()
# Special handling for log level argument
log_levels = {
'critical': logging.CRITICAL,
'error': logging.ERROR,
'warning': logging.WARNING,
'info': logging.INFO,
'debug': logging.DEBUG
}
level = log_levels.get(args.log_level.lower())
if level is None:
raise ValueError('Please use one of the following as'
'log levels:\n{0}'.format(','.join(log_levels.keys())))
logging.basicConfig(level=level)
logging.getLogger(__name__)
return args
if __name__ == '__main__':
main()
| |
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
from eventlet import greenthread
from oslo.config import cfg
from oslo.db import exception as os_db_exception
from oslo.serialization import jsonutils
from oslo.utils import excutils
from oslo.utils import importutils
from oslo_concurrency import lockutils
from sqlalchemy import exc as sql_exc
from sqlalchemy.orm import exc as sa_exc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.rpc.handlers import dhcp_rpc
from neutron.api.rpc.handlers import dvr_rpc
from neutron.api.rpc.handlers import metadata_rpc
from neutron.api.rpc.handlers import securitygroups_rpc
from neutron.api.v2 import attributes
from neutron.common import constants as const
from neutron.common import exceptions as exc
from neutron.common import ipv6_utils
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import allowedaddresspairs_db as addr_pair_db
from neutron.db import api as db_api
from neutron.db import db_base_plugin_v2
from neutron.db import dvr_mac_db
from neutron.db import external_net_db
from neutron.db import extradhcpopt_db
from neutron.db import models_v2
from neutron.db import quota_db # noqa
from neutron.db import securitygroups_rpc_base as sg_db_rpc
from neutron.extensions import allowedaddresspairs as addr_pair
from neutron.extensions import extra_dhcp_opt as edo_ext
from neutron.extensions import l3agentscheduler
from neutron.extensions import portbindings
from neutron.extensions import providernet as provider
from neutron.i18n import _LE, _LI, _LW
from neutron import manager
from neutron.openstack.common import log
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants as service_constants
from neutron.plugins.ml2.common import exceptions as ml2_exc
from neutron.plugins.ml2 import config # noqa
from neutron.plugins.ml2 import db
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2 import driver_context
from neutron.plugins.ml2 import managers
from neutron.plugins.ml2 import models
from neutron.plugins.ml2 import rpc
LOG = log.getLogger(__name__)
MAX_BIND_TRIES = 10
# REVISIT(rkukura): Move this and other network_type constants to
# providernet.py?
TYPE_MULTI_SEGMENT = 'multi-segment'
class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
dvr_mac_db.DVRDbMixin,
external_net_db.External_net_db_mixin,
sg_db_rpc.SecurityGroupServerRpcMixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
addr_pair_db.AllowedAddressPairsMixin,
extradhcpopt_db.ExtraDhcpOptMixin):
"""Implement the Neutron L2 abstractions using modules.
Ml2Plugin is a Neutron plugin based on separately extensible sets
of network types and mechanisms for connecting to networks of
those types. The network types and mechanisms are implemented as
drivers loaded via Python entry points. Networks can be made up of
multiple segments (not yet fully implemented).
"""
# This attribute specifies whether the plugin supports or not
# bulk/pagination/sorting operations. Name mangling is used in
# order to ensure it is qualified by class
__native_bulk_support = True
__native_pagination_support = True
__native_sorting_support = True
# List of supported extensions
_supported_extension_aliases = ["provider", "external-net", "binding",
"quotas", "security-group", "agent",
"dhcp_agent_scheduler",
"multi-provider", "allowed-address-pairs",
"extra_dhcp_opt"]
@property
def supported_extension_aliases(self):
if not hasattr(self, '_aliases'):
aliases = self._supported_extension_aliases[:]
aliases += self.extension_manager.extension_aliases()
sg_rpc.disable_security_group_extension_by_config(aliases)
self._aliases = aliases
return self._aliases
def __init__(self):
# First load drivers, then initialize DB, then initialize drivers
self.type_manager = managers.TypeManager()
self.extension_manager = managers.ExtensionManager()
self.mechanism_manager = managers.MechanismManager()
super(Ml2Plugin, self).__init__()
self.type_manager.initialize()
self.extension_manager.initialize()
self.mechanism_manager.initialize()
self._setup_rpc()
# REVISIT(rkukura): Use stevedore for these?
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver
)
LOG.info(_LI("Modular L2 Plugin initialization complete"))
def _setup_rpc(self):
self.notifier = rpc.AgentNotifierApi(topics.AGENT)
self.agent_notifiers[const.AGENT_TYPE_DHCP] = (
dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
)
def start_rpc_listeners(self):
self.endpoints = [rpc.RpcCallbacks(self.notifier, self.type_manager),
securitygroups_rpc.SecurityGroupServerRpcCallback(),
dvr_rpc.DVRServerRpcCallback(),
dhcp_rpc.DhcpRpcCallback(),
agents_db.AgentExtRpcCallback(),
metadata_rpc.MetadataRpcCallback()]
self.topic = topics.PLUGIN
self.conn = n_rpc.create_connection(new=True)
self.conn.create_consumer(self.topic, self.endpoints,
fanout=False)
return self.conn.consume_in_threads()
def _filter_nets_provider(self, context, nets, filters):
# TODO(rkukura): Implement filtering.
return nets
def _notify_l3_agent_new_port(self, context, port):
if not port:
return
# Whenever a DVR serviceable port comes up on a
# node, it has to be communicated to the L3 Plugin
# and agent for creating the respective namespaces.
if (utils.is_dvr_serviced(port['device_owner'])):
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
if (utils.is_extension_supported(
l3plugin, const.L3_DISTRIBUTED_EXT_ALIAS)):
l3plugin.dvr_update_router_addvm(context, port)
def _get_host_port_if_changed(self, mech_context, attrs):
binding = mech_context._binding
host = attrs and attrs.get(portbindings.HOST_ID)
if (attributes.is_attr_set(host) and binding.host != host):
return mech_context.current
def _process_port_binding(self, mech_context, attrs):
binding = mech_context._binding
port = mech_context.current
changes = False
host = attrs and attrs.get(portbindings.HOST_ID)
if (attributes.is_attr_set(host) and
binding.host != host):
binding.host = host
changes = True
vnic_type = attrs and attrs.get(portbindings.VNIC_TYPE)
if (attributes.is_attr_set(vnic_type) and
binding.vnic_type != vnic_type):
binding.vnic_type = vnic_type
changes = True
# treat None as clear of profile.
profile = None
if attrs and portbindings.PROFILE in attrs:
profile = attrs.get(portbindings.PROFILE) or {}
if profile not in (None, attributes.ATTR_NOT_SPECIFIED,
self._get_profile(binding)):
binding.profile = jsonutils.dumps(profile)
if len(binding.profile) > models.BINDING_PROFILE_LEN:
msg = _("binding:profile value too large")
raise exc.InvalidInput(error_message=msg)
changes = True
# Unbind the port if needed.
if changes:
binding.vif_type = portbindings.VIF_TYPE_UNBOUND
binding.vif_details = ''
binding.driver = None
binding.segment = None
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
binding.vif_type = portbindings.VIF_TYPE_DISTRIBUTED
binding.vif_details = ''
binding.driver = None
binding.segment = None
binding.host = ''
self._update_port_dict_binding(port, binding)
return changes
def _bind_port_if_needed(self, context, allow_notify=False,
need_notify=False):
plugin_context = context._plugin_context
port_id = context._port['id']
# Since the mechanism driver bind_port() calls must be made
# outside a DB transaction locking the port state, it is
# possible (but unlikely) that the port's state could change
# concurrently while these calls are being made. If another
# thread or process succeeds in binding the port before this
# thread commits its results, the already committed results are
# used. If attributes such as binding:host_id,
# binding:profile, or binding:vnic_type are updated
# concurrently, this loop retries binding using the new
# values.
count = 0
while True:
# First, determine whether it is necessary and possible to
# bind the port.
binding = context._binding
if (binding.vif_type != portbindings.VIF_TYPE_UNBOUND
or not binding.host):
# We either don't need to bind the port, or can't, so
# notify if needed and return.
if allow_notify and need_notify:
self._notify_port_updated(context)
return context
# Limit binding attempts to avoid any possibility of
# infinite looping and to ensure an error is logged
# instead. This does not need to be tunable because no
# more than a couple attempts should ever be required in
# normal operation. Log at info level if not 1st attempt.
count += 1
if count > MAX_BIND_TRIES:
LOG.error(_LE("Failed to commit binding results for %(port)s "
"after %(max)s tries"),
{'port': port_id, 'max': MAX_BIND_TRIES})
return context
if count > 1:
greenthread.sleep(0) # yield
LOG.info(_LI("Attempt %(count)s to bind port %(port)s"),
{'count': count, 'port': port_id})
# The port isn't already bound and the necessary
# information is available, so attempt to bind the port.
bind_context = self._bind_port(context)
# Now try to commit result of attempting to bind the port.
new_context, did_commit = self._commit_port_binding(
plugin_context, port_id, binding, bind_context)
if not new_context:
# The port has been deleted concurrently, so just
# return the unbound result from the initial
# transaction that completed before the deletion.
LOG.debug("Port %s has been deleted concurrently",
port_id)
return context
# Need to notify if we succeed and our results were
# committed.
if did_commit and (new_context._binding.vif_type !=
portbindings.VIF_TYPE_BINDING_FAILED):
need_notify = True
context = new_context
def _bind_port(self, orig_context):
# Construct a new PortContext from the one from the previous
# transaction.
port = orig_context._port
orig_binding = orig_context._binding
new_binding = models.PortBinding(
host=orig_binding.host,
vnic_type=orig_binding.vnic_type,
profile=orig_binding.profile,
vif_type=portbindings.VIF_TYPE_UNBOUND,
vif_details=''
)
self._update_port_dict_binding(port, new_binding)
new_context = driver_context.PortContext(
self, orig_context._plugin_context, port,
orig_context._network_context._network, new_binding)
# Attempt to bind the port and return the context with the
# result.
self.mechanism_manager.bind_port(new_context)
return new_context
def _commit_port_binding(self, plugin_context, port_id, orig_binding,
new_context):
session = plugin_context.session
new_binding = new_context._binding
# After we've attempted to bind the port, we begin a
# transaction, get the current port state, and decide whether
# to commit the binding results.
#
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock wait
# timeout' errors.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
# Get the current port state and build a new PortContext
# reflecting this state as original state for subsequent
# mechanism driver update_port_*commit() calls.
port_db, cur_binding = db.get_locked_port_and_binding(session,
port_id)
if not port_db:
# The port has been deleted concurrently.
return (None, None)
oport = self._make_port_dict(port_db)
port = self._make_port_dict(port_db)
network = self.get_network(plugin_context, port['network_id'])
cur_context = driver_context.PortContext(
self, plugin_context, port, network, cur_binding,
original_port=oport)
# Commit our binding results only if port has not been
# successfully bound concurrently by another thread or
# process and no binding inputs have been changed.
commit = ((cur_binding.vif_type in
[portbindings.VIF_TYPE_UNBOUND,
portbindings.VIF_TYPE_BINDING_FAILED]) and
orig_binding.host == cur_binding.host and
orig_binding.vnic_type == cur_binding.vnic_type and
orig_binding.profile == cur_binding.profile)
if commit:
# Update the port's binding state with our binding
# results.
cur_binding.vif_type = new_binding.vif_type
cur_binding.vif_details = new_binding.vif_details
cur_binding.driver = new_binding.driver
cur_binding.segment = new_binding.segment
# Update PortContext's port dictionary to reflect the
# updated binding state.
self._update_port_dict_binding(port, cur_binding)
# Update the port status if requested by the bound driver.
if new_binding.segment and new_context._new_port_status:
port_db.status = new_context._new_port_status
port['status'] = new_context._new_port_status
# Call the mechanism driver precommit methods, commit
# the results, and call the postcommit methods.
self.mechanism_manager.update_port_precommit(cur_context)
if commit:
self.mechanism_manager.update_port_postcommit(cur_context)
# Continue, using the port state as of the transaction that
# just finished, whether that transaction committed new
# results or discovered concurrent port state changes.
return (cur_context, commit)
def _update_port_dict_binding(self, port, binding):
port[portbindings.HOST_ID] = binding.host
port[portbindings.VNIC_TYPE] = binding.vnic_type
port[portbindings.PROFILE] = self._get_profile(binding)
port[portbindings.VIF_TYPE] = binding.vif_type
port[portbindings.VIF_DETAILS] = self._get_vif_details(binding)
def _get_vif_details(self, binding):
if binding.vif_details:
try:
return jsonutils.loads(binding.vif_details)
except Exception:
LOG.error(_LE("Serialized vif_details DB value '%(value)s' "
"for port %(port)s is invalid"),
{'value': binding.vif_details,
'port': binding.port_id})
return {}
def _get_profile(self, binding):
if binding.profile:
try:
return jsonutils.loads(binding.profile)
except Exception:
LOG.error(_LE("Serialized profile DB value '%(value)s' for "
"port %(port)s is invalid"),
{'value': binding.profile,
'port': binding.port_id})
return {}
def _ml2_extend_port_dict_binding(self, port_res, port_db):
# None when called during unit tests for other plugins.
if port_db.port_binding:
self._update_port_dict_binding(port_res, port_db.port_binding)
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.PORTS, ['_ml2_extend_port_dict_binding'])
# Register extend dict methods for network and port resources.
# Each mechanism driver that supports extend attribute for the resources
# can add those attribute to the result.
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.NETWORKS, ['_ml2_md_extend_network_dict'])
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.PORTS, ['_ml2_md_extend_port_dict'])
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.SUBNETS, ['_ml2_md_extend_subnet_dict'])
def _ml2_md_extend_network_dict(self, result, netdb):
session = db_api.get_session()
with session.begin(subtransactions=True):
self.extension_manager.extend_network_dict(session, result)
def _ml2_md_extend_port_dict(self, result, portdb):
session = db_api.get_session()
with session.begin(subtransactions=True):
self.extension_manager.extend_port_dict(session, result)
def _ml2_md_extend_subnet_dict(self, result, subnetdb):
session = db_api.get_session()
with session.begin(subtransactions=True):
self.extension_manager.extend_subnet_dict(session, result)
# Note - The following hook methods have "ml2" in their names so
# that they are not called twice during unit tests due to global
# registration of hooks in portbindings_db.py used by other
# plugins.
def _ml2_port_model_hook(self, context, original_model, query):
query = query.outerjoin(models.PortBinding,
(original_model.id ==
models.PortBinding.port_id))
return query
def _ml2_port_result_filter_hook(self, query, filters):
values = filters and filters.get(portbindings.HOST_ID, [])
if not values:
return query
return query.filter(models.PortBinding.host.in_(values))
db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook(
models_v2.Port,
"ml2_port_bindings",
'_ml2_port_model_hook',
None,
'_ml2_port_result_filter_hook')
def _notify_port_updated(self, mech_context):
port = mech_context._port
segment = mech_context.bottom_bound_segment
if not segment:
# REVISIT(rkukura): This should notify agent to unplug port
network = mech_context.network.current
LOG.warning(_LW("In _notify_port_updated(), no bound segment for "
"port %(port_id)s on network %(network_id)s"),
{'port_id': port['id'],
'network_id': network['id']})
return
self.notifier.port_update(mech_context._plugin_context, port,
segment[api.NETWORK_TYPE],
segment[api.SEGMENTATION_ID],
segment[api.PHYSICAL_NETWORK])
def _delete_objects(self, context, resource, objects):
delete_op = getattr(self, 'delete_%s' % resource)
for obj in objects:
try:
delete_op(context, obj['result']['id'])
except KeyError:
LOG.exception(_LE("Could not find %s to delete."),
resource)
except Exception:
LOG.exception(_LE("Could not delete %(res)s %(id)s."),
{'res': resource,
'id': obj['result']['id']})
def _create_bulk_ml2(self, resource, context, request_items):
objects = []
collection = "%ss" % resource
items = request_items[collection]
try:
with context.session.begin(subtransactions=True):
obj_creator = getattr(self, '_create_%s_db' % resource)
for item in items:
attrs = item[resource]
result, mech_context = obj_creator(context, item)
objects.append({'mech_context': mech_context,
'result': result,
'attributes': attrs})
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("An exception occurred while creating "
"the %(resource)s:%(item)s"),
{'resource': resource, 'item': item})
try:
postcommit_op = getattr(self.mechanism_manager,
'create_%s_postcommit' % resource)
for obj in objects:
postcommit_op(obj['mech_context'])
return objects
except ml2_exc.MechanismDriverError:
with excutils.save_and_reraise_exception():
resource_ids = [res['result']['id'] for res in objects]
LOG.exception(_LE("mechanism_manager.create_%(res)s"
"_postcommit failed for %(res)s: "
"'%(failed_id)s'. Deleting "
"%(res)ss %(resource_ids)s"),
{'res': resource,
'failed_id': obj['result']['id'],
'resource_ids': ', '.join(resource_ids)})
self._delete_objects(context, resource, objects)
def _create_network_db(self, context, network):
net_data = network[attributes.NETWORK]
tenant_id = self._get_tenant_id_for_create(context, net_data)
session = context.session
with session.begin(subtransactions=True):
self._ensure_default_security_group(context, tenant_id)
result = super(Ml2Plugin, self).create_network(context, network)
self.extension_manager.process_create_network(session, net_data,
result)
self._process_l3_create(context, result, net_data)
net_data['id'] = result['id']
self.type_manager.create_network_segments(context, net_data,
tenant_id)
self.type_manager._extend_network_dict_provider(context, result)
mech_context = driver_context.NetworkContext(self, context,
result)
self.mechanism_manager.create_network_precommit(mech_context)
return result, mech_context
def create_network(self, context, network):
result, mech_context = self._create_network_db(context, network)
try:
self.mechanism_manager.create_network_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("mechanism_manager.create_network_postcommit "
"failed, deleting network '%s'"), result['id'])
self.delete_network(context, result['id'])
return result
def create_network_bulk(self, context, networks):
objects = self._create_bulk_ml2(attributes.NETWORK, context, networks)
return [obj['result'] for obj in objects]
def update_network(self, context, id, network):
provider._raise_if_updates_provider_attributes(network['network'])
session = context.session
with session.begin(subtransactions=True):
original_network = super(Ml2Plugin, self).get_network(context, id)
updated_network = super(Ml2Plugin, self).update_network(context,
id,
network)
self.extension_manager.process_update_network(session, network,
original_network)
self._process_l3_update(context, updated_network,
network['network'])
self.type_manager._extend_network_dict_provider(context,
updated_network)
mech_context = driver_context.NetworkContext(
self, context, updated_network,
original_network=original_network)
self.mechanism_manager.update_network_precommit(mech_context)
# TODO(apech) - handle errors raised by update_network, potentially
# by re-calling update_network with the previous attributes. For
# now the error is propogated to the caller, which is expected to
# either undo/retry the operation or delete the resource.
self.mechanism_manager.update_network_postcommit(mech_context)
return updated_network
def get_network(self, context, id, fields=None):
session = context.session
with session.begin(subtransactions=True):
result = super(Ml2Plugin, self).get_network(context, id, None)
self.type_manager._extend_network_dict_provider(context, result)
return self._fields(result, fields)
def get_networks(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
session = context.session
with session.begin(subtransactions=True):
nets = super(Ml2Plugin,
self).get_networks(context, filters, None, sorts,
limit, marker, page_reverse)
for net in nets:
self.type_manager._extend_network_dict_provider(context, net)
nets = self._filter_nets_provider(context, nets, filters)
nets = self._filter_nets_l3(context, nets, filters)
return [self._fields(net, fields) for net in nets]
def _delete_ports(self, context, ports):
for port in ports:
try:
self.delete_port(context, port.id)
except exc.PortNotFound:
# concurrent port deletion can be performed by
# release_dhcp_port caused by concurrent subnet_delete
LOG.info(_LI("Port %s was deleted concurrently"), port.id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Exception auto-deleting port %s"),
port.id)
def _delete_subnets(self, context, subnets):
for subnet in subnets:
try:
self.delete_subnet(context, subnet.id)
except exc.SubnetNotFound:
LOG.info(_LI("Subnet %s was deleted concurrently"),
subnet.id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Exception auto-deleting subnet %s"),
subnet.id)
def delete_network(self, context, id):
# REVISIT(rkukura) The super(Ml2Plugin, self).delete_network()
# function is not used because it auto-deletes ports and
# subnets from the DB without invoking the derived class's
# delete_port() or delete_subnet(), preventing mechanism
# drivers from being called. This approach should be revisited
# when the API layer is reworked during icehouse.
LOG.debug("Deleting network %s", id)
session = context.session
while True:
try:
# REVISIT: Serialize this operation with a semaphore
# to prevent deadlock waiting to acquire a DB lock
# held by another thread in the same process, leading
# to 'lock wait timeout' errors.
#
# Process L3 first, since, depending on the L3 plugin, it may
# involve locking the db-access semaphore, sending RPC
# notifications, and/or calling delete_port on this plugin.
# Additionally, a rollback may not be enough to undo the
# deletion of a floating IP with certain L3 backends.
self._process_l3_delete(context, id)
# Using query().with_lockmode isn't necessary. Foreign-key
# constraints prevent deletion if concurrent creation happens.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
# Get ports to auto-delete.
ports = (session.query(models_v2.Port).
enable_eagerloads(False).
filter_by(network_id=id).all())
LOG.debug("Ports to auto-delete: %s", ports)
only_auto_del = all(p.device_owner
in db_base_plugin_v2.
AUTO_DELETE_PORT_OWNERS
for p in ports)
if not only_auto_del:
LOG.debug("Tenant-owned ports exist")
raise exc.NetworkInUse(net_id=id)
# Get subnets to auto-delete.
subnets = (session.query(models_v2.Subnet).
enable_eagerloads(False).
filter_by(network_id=id).all())
LOG.debug("Subnets to auto-delete: %s", subnets)
if not (ports or subnets):
network = self.get_network(context, id)
mech_context = driver_context.NetworkContext(self,
context,
network)
self.mechanism_manager.delete_network_precommit(
mech_context)
self.type_manager.release_network_segments(session, id)
record = self._get_network(context, id)
LOG.debug("Deleting network record %s", record)
session.delete(record)
# The segment records are deleted via cascade from the
# network record, so explicit removal is not necessary.
LOG.debug("Committing transaction")
break
except os_db_exception.DBError as e:
with excutils.save_and_reraise_exception() as ctxt:
if isinstance(e.inner_exception, sql_exc.IntegrityError):
ctxt.reraise = False
LOG.warning(_LW("A concurrent port creation has "
"occurred"))
continue
self._delete_ports(context, ports)
self._delete_subnets(context, subnets)
try:
self.mechanism_manager.delete_network_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
# TODO(apech) - One or more mechanism driver failed to
# delete the network. Ideally we'd notify the caller of
# the fact that an error occurred.
LOG.error(_LE("mechanism_manager.delete_network_postcommit"
" failed"))
self.notifier.network_delete(context, id)
def _create_subnet_db(self, context, subnet):
session = context.session
with session.begin(subtransactions=True):
result = super(Ml2Plugin, self).create_subnet(context, subnet)
self.extension_manager.process_create_subnet(session, subnet,
result)
mech_context = driver_context.SubnetContext(self, context, result)
self.mechanism_manager.create_subnet_precommit(mech_context)
return result, mech_context
def create_subnet(self, context, subnet):
result, mech_context = self._create_subnet_db(context, subnet)
try:
self.mechanism_manager.create_subnet_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("mechanism_manager.create_subnet_postcommit "
"failed, deleting subnet '%s'"), result['id'])
self.delete_subnet(context, result['id'])
return result
def create_subnet_bulk(self, context, subnets):
objects = self._create_bulk_ml2(attributes.SUBNET, context, subnets)
return [obj['result'] for obj in objects]
def update_subnet(self, context, id, subnet):
session = context.session
with session.begin(subtransactions=True):
original_subnet = super(Ml2Plugin, self).get_subnet(context, id)
updated_subnet = super(Ml2Plugin, self).update_subnet(
context, id, subnet)
self.extension_manager.process_update_subnet(session, subnet,
original_subnet)
mech_context = driver_context.SubnetContext(
self, context, updated_subnet, original_subnet=original_subnet)
self.mechanism_manager.update_subnet_precommit(mech_context)
# TODO(apech) - handle errors raised by update_subnet, potentially
# by re-calling update_subnet with the previous attributes. For
# now the error is propogated to the caller, which is expected to
# either undo/retry the operation or delete the resource.
self.mechanism_manager.update_subnet_postcommit(mech_context)
return updated_subnet
def delete_subnet(self, context, id):
# REVISIT(rkukura) The super(Ml2Plugin, self).delete_subnet()
# function is not used because it deallocates the subnet's addresses
# from ports in the DB without invoking the derived class's
# update_port(), preventing mechanism drivers from being called.
# This approach should be revisited when the API layer is reworked
# during icehouse.
LOG.debug("Deleting subnet %s", id)
session = context.session
while True:
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock
# wait timeout' errors.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
record = self._get_subnet(context, id)
subnet = self._make_subnet_dict(record, None)
qry_allocated = (session.query(models_v2.IPAllocation).
filter_by(subnet_id=id).
join(models_v2.Port))
is_auto_addr_subnet = ipv6_utils.is_auto_address_subnet(subnet)
# Remove network owned ports, and delete IP allocations
# for IPv6 addresses which were automatically generated
# via SLAAC
if not is_auto_addr_subnet:
qry_allocated = (
qry_allocated.filter(models_v2.Port.device_owner.
in_(db_base_plugin_v2.AUTO_DELETE_PORT_OWNERS)))
allocated = qry_allocated.all()
# Delete all the IPAllocation that can be auto-deleted
if allocated:
map(session.delete, allocated)
LOG.debug("Ports to auto-deallocate: %s", allocated)
# Check if there are tenant owned ports
tenant_port = (session.query(models_v2.IPAllocation).
filter_by(subnet_id=id).
join(models_v2.Port).
first())
if tenant_port:
LOG.debug("Tenant-owned ports exist")
raise exc.SubnetInUse(subnet_id=id)
# If allocated is None, then all the IPAllocation were
# correctly deleted during the previous pass.
if not allocated:
mech_context = driver_context.SubnetContext(self, context,
subnet)
self.mechanism_manager.delete_subnet_precommit(
mech_context)
LOG.debug("Deleting subnet record")
session.delete(record)
LOG.debug("Committing transaction")
break
for a in allocated:
if a.port_id:
# calling update_port() for each allocation to remove the
# IP from the port and call the MechanismDrivers
data = {'port':
{'fixed_ips': [{'subnet_id': ip.subnet_id,
'ip_address': ip.ip_address}
for ip in a.ports.fixed_ips
if ip.subnet_id != id]}}
try:
self.update_port(context, a.port_id, data)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Exception deleting fixed_ip "
"from port %s"), a.port_id)
try:
self.mechanism_manager.delete_subnet_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
# TODO(apech) - One or more mechanism driver failed to
# delete the subnet. Ideally we'd notify the caller of
# the fact that an error occurred.
LOG.error(_LE("mechanism_manager.delete_subnet_postcommit failed"))
def _create_port_db(self, context, port):
attrs = port[attributes.PORT]
attrs['status'] = const.PORT_STATUS_DOWN
session = context.session
with session.begin(subtransactions=True):
self._ensure_default_security_group_on_port(context, port)
sgids = self._get_security_groups_on_port(context, port)
dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, [])
result = super(Ml2Plugin, self).create_port(context, port)
self.extension_manager.process_create_port(session, attrs, result)
self._process_port_create_security_group(context, result, sgids)
network = self.get_network(context, result['network_id'])
binding = db.add_port_binding(session, result['id'])
mech_context = driver_context.PortContext(self, context, result,
network, binding)
self._process_port_binding(mech_context, attrs)
result[addr_pair.ADDRESS_PAIRS] = (
self._process_create_allowed_address_pairs(
context, result,
attrs.get(addr_pair.ADDRESS_PAIRS)))
self._process_port_create_extra_dhcp_opts(context, result,
dhcp_opts)
self.mechanism_manager.create_port_precommit(mech_context)
return result, mech_context
def create_port(self, context, port):
attrs = port['port']
result, mech_context = self._create_port_db(context, port)
new_host_port = self._get_host_port_if_changed(mech_context, attrs)
self._notify_l3_agent_new_port(context, new_host_port)
try:
self.mechanism_manager.create_port_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("mechanism_manager.create_port_postcommit "
"failed, deleting port '%s'"), result['id'])
self.delete_port(context, result['id'])
# REVISIT(rkukura): Is there any point in calling this before
# a binding has been successfully established?
self.notify_security_groups_member_updated(context, result)
try:
bound_context = self._bind_port_if_needed(mech_context)
except ml2_exc.MechanismDriverError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("_bind_port_if_needed "
"failed, deleting port '%s'"), result['id'])
self.delete_port(context, result['id'])
return bound_context._port
def create_port_bulk(self, context, ports):
objects = self._create_bulk_ml2(attributes.PORT, context, ports)
for obj in objects:
# REVISIT(rkukura): Is there any point in calling this before
# a binding has been successfully established?
# TODO(banix): Use a single notification for all objects
self.notify_security_groups_member_updated(context,
obj['result'])
attrs = obj['attributes']
if attrs and attrs.get(portbindings.HOST_ID):
new_host_port = self._get_host_port_if_changed(
obj['mech_context'], attrs)
self._notify_l3_agent_new_port(context, new_host_port)
try:
for obj in objects:
obj['bound_context'] = self._bind_port_if_needed(
obj['mech_context'])
return [obj['bound_context']._port for obj in objects]
except ml2_exc.MechanismDriverError:
with excutils.save_and_reraise_exception():
resource_ids = [res['result']['id'] for res in objects]
LOG.error(_LE("_bind_port_if_needed failed. "
"Deleting all ports from create bulk '%s'"),
resource_ids)
self._delete_objects(context, 'port', objects)
def update_port(self, context, id, port):
attrs = port['port']
need_port_update_notify = False
session = context.session
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock wait
# timeout' errors.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
port_db, binding = db.get_locked_port_and_binding(session, id)
if not port_db:
raise exc.PortNotFound(port_id=id)
original_port = self._make_port_dict(port_db)
updated_port = super(Ml2Plugin, self).update_port(context, id,
port)
self.extension_manager.process_update_port(session, attrs,
original_port)
if addr_pair.ADDRESS_PAIRS in port['port']:
need_port_update_notify |= (
self.update_address_pairs_on_port(context, id, port,
original_port,
updated_port))
need_port_update_notify |= self.update_security_group_on_port(
context, id, port, original_port, updated_port)
network = self.get_network(context, original_port['network_id'])
need_port_update_notify |= self._update_extra_dhcp_opts_on_port(
context, id, port, updated_port)
mech_context = driver_context.PortContext(
self, context, updated_port, network, binding,
original_port=original_port)
new_host_port = self._get_host_port_if_changed(mech_context, attrs)
need_port_update_notify |= self._process_port_binding(
mech_context, attrs)
self.mechanism_manager.update_port_precommit(mech_context)
# Notification must be sent after the above transaction is complete
self._notify_l3_agent_new_port(context, new_host_port)
# TODO(apech) - handle errors raised by update_port, potentially
# by re-calling update_port with the previous attributes. For
# now the error is propogated to the caller, which is expected to
# either undo/retry the operation or delete the resource.
self.mechanism_manager.update_port_postcommit(mech_context)
need_port_update_notify |= self.is_security_group_member_updated(
context, original_port, updated_port)
if original_port['admin_state_up'] != updated_port['admin_state_up']:
need_port_update_notify = True
bound_port = self._bind_port_if_needed(
mech_context,
allow_notify=True,
need_notify=need_port_update_notify)
return bound_port._port
def _process_dvr_port_binding(self, mech_context, context, attrs):
binding = mech_context._binding
port = mech_context.current
if binding.vif_type != portbindings.VIF_TYPE_UNBOUND:
binding.vif_details = ''
binding.vif_type = portbindings.VIF_TYPE_UNBOUND
binding.driver = None
binding.segment = None
binding.host = ''
self._update_port_dict_binding(port, binding)
binding.host = attrs and attrs.get(portbindings.HOST_ID)
binding.router_id = attrs and attrs.get('device_id')
def update_dvr_port_binding(self, context, id, port):
attrs = port['port']
host = attrs and attrs.get(portbindings.HOST_ID)
host_set = attributes.is_attr_set(host)
if not host_set:
LOG.error(_LE("No Host supplied to bind DVR Port %s"), id)
return
session = context.session
binding = db.get_dvr_port_binding_by_host(session, id, host)
device_id = attrs and attrs.get('device_id')
router_id = binding and binding.get('router_id')
update_required = (not binding or
binding.vif_type == portbindings.VIF_TYPE_BINDING_FAILED or
router_id != device_id)
if update_required:
with session.begin(subtransactions=True):
try:
orig_port = super(Ml2Plugin, self).get_port(context, id)
except exc.PortNotFound:
LOG.debug("DVR Port %s has been deleted concurrently", id)
return
if not binding:
binding = db.ensure_dvr_port_binding(
session, id, host, router_id=device_id)
network = self.get_network(context, orig_port['network_id'])
mech_context = driver_context.DvrPortContext(self,
context, orig_port, network,
binding, original_port=orig_port)
self._process_dvr_port_binding(mech_context, context, attrs)
self.mechanism_manager.bind_port(mech_context)
# Now try to commit result of attempting to bind the port.
self._commit_dvr_port_binding(mech_context._plugin_context,
orig_port['id'],
host,
mech_context)
def _commit_dvr_port_binding(self, plugin_context,
port_id, host,
mech_context):
session = plugin_context.session
new_binding = mech_context._binding
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
# Get the current port state and build a new PortContext
# reflecting this state as original state for subsequent
# mechanism driver update_port_*commit() calls.
cur_binding = db.get_dvr_port_binding_by_host(session,
port_id,
host)
if not cur_binding:
LOG.info(_LI("Binding info for port %s was not found, "
"it might have been deleted already."),
port_id)
return
# Commit our binding results only if port has not been
# successfully bound concurrently by another thread or
# process and no binding inputs have been changed.
commit = ((cur_binding.vif_type in
[portbindings.VIF_TYPE_UNBOUND,
portbindings.VIF_TYPE_BINDING_FAILED]) and
new_binding.host == cur_binding.host and
new_binding.vnic_type == cur_binding.vnic_type and
new_binding.profile == cur_binding.profile)
if commit:
# Update the port's binding state with our binding
# results.
cur_binding.vif_type = new_binding.vif_type
cur_binding.vif_details = new_binding.vif_details
cur_binding.driver = new_binding.driver
cur_binding.segment = new_binding.segment
def delete_port(self, context, id, l3_port_check=True):
LOG.debug("Deleting port %s", id)
removed_routers = []
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
is_dvr_enabled = utils.is_extension_supported(
l3plugin, const.L3_DISTRIBUTED_EXT_ALIAS)
if l3plugin and l3_port_check:
l3plugin.prevent_l3_port_deletion(context, id)
session = context.session
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock wait
# timeout' errors.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
port_db, binding = db.get_locked_port_and_binding(session, id)
if not port_db:
# the port existed when l3plugin.prevent_l3_port_deletion
# was called but now is already gone
LOG.debug("The port '%s' was deleted", id)
return
port = self._make_port_dict(port_db)
network = self.get_network(context, port['network_id'])
bound_mech_contexts = []
device_owner = port['device_owner']
if device_owner == const.DEVICE_OWNER_DVR_INTERFACE:
bindings = db.get_dvr_port_bindings(context.session, id)
for bind in bindings:
mech_context = driver_context.DvrPortContext(
self, context, port, network, bind)
self.mechanism_manager.delete_port_precommit(mech_context)
bound_mech_contexts.append(mech_context)
else:
mech_context = driver_context.PortContext(self, context, port,
network, binding)
if is_dvr_enabled and utils.is_dvr_serviced(device_owner):
removed_routers = l3plugin.dvr_deletens_if_no_port(
context, id)
self.mechanism_manager.delete_port_precommit(mech_context)
bound_mech_contexts.append(mech_context)
if l3plugin:
router_ids = l3plugin.disassociate_floatingips(
context, id, do_notify=False)
LOG.debug("Calling delete_port for %(port_id)s owned by %(owner)s",
{"port_id": id, "owner": device_owner})
super(Ml2Plugin, self).delete_port(context, id)
# now that we've left db transaction, we are safe to notify
if l3plugin:
if is_dvr_enabled:
l3plugin.dvr_vmarp_table_update(context, port, "del")
l3plugin.notify_routers_updated(context, router_ids)
for router in removed_routers:
try:
l3plugin.remove_router_from_l3_agent(
context, router['agent_id'], router['router_id'])
except l3agentscheduler.RouterNotHostedByL3Agent:
# router may have been removed by another process
LOG.debug("Router %(id)s not hosted by L3 agent %(agent)s",
{'id': router['router_id'],
'agent': router['agent_id']})
try:
# Note that DVR Interface ports will have bindings on
# multiple hosts, and so will have multiple mech_contexts,
# while other ports typically have just one.
for mech_context in bound_mech_contexts:
self.mechanism_manager.delete_port_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
# TODO(apech) - One or more mechanism driver failed to
# delete the port. Ideally we'd notify the caller of the
# fact that an error occurred.
LOG.error(_LE("mechanism_manager.delete_port_postcommit failed for"
" port %s"), id)
self.notify_security_groups_member_updated(context, port)
def get_bound_port_context(self, plugin_context, port_id, host=None):
session = plugin_context.session
with session.begin(subtransactions=True):
try:
port_db = (session.query(models_v2.Port).
enable_eagerloads(False).
filter(models_v2.Port.id.startswith(port_id)).
one())
except sa_exc.NoResultFound:
LOG.debug("No ports have port_id starting with %s",
port_id)
return
except exc.MultipleResultsFound:
LOG.error(_LE("Multiple ports have port_id starting with %s"),
port_id)
return
port = self._make_port_dict(port_db)
network = self.get_network(plugin_context, port['network_id'])
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
binding = db.get_dvr_port_binding_by_host(
session, port['id'], host)
if not binding:
LOG.error(_LE("Binding info for DVR port %s not found"),
port_id)
return None
port_context = driver_context.DvrPortContext(
self, plugin_context, port, network, binding)
else:
# since eager loads are disabled in port_db query
# related attribute port_binding could disappear in
# concurrent port deletion.
# It's not an error condition.
binding = port_db.port_binding
if not binding:
LOG.info(_LI("Binding info for port %s was not found, "
"it might have been deleted already."),
port_id)
return
port_context = driver_context.PortContext(
self, plugin_context, port, network, binding)
return self._bind_port_if_needed(port_context)
def update_port_status(self, context, port_id, status, host=None):
"""
Returns port_id (non-truncated uuid) if the port exists.
Otherwise returns None.
"""
updated = False
session = context.session
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock wait
# timeout' errors.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
port = db.get_port(session, port_id)
if not port:
LOG.warning(_LW("Port %(port)s updated up by agent not found"),
{'port': port_id})
return None
if (port.status != status and
port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE):
original_port = self._make_port_dict(port)
port.status = status
updated_port = self._make_port_dict(port)
network = self.get_network(context,
original_port['network_id'])
mech_context = driver_context.PortContext(
self, context, updated_port, network, port.port_binding,
original_port=original_port)
self.mechanism_manager.update_port_precommit(mech_context)
updated = True
elif port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
binding = db.get_dvr_port_binding_by_host(
session, port['id'], host)
if not binding:
return
binding['status'] = status
binding.update(binding)
updated = True
if (updated and
port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE):
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
port = db.get_port(session, port_id)
if not port:
LOG.warning(_LW("Port %s not found during update"),
port_id)
return
original_port = self._make_port_dict(port)
network = self.get_network(context,
original_port['network_id'])
port.status = db.generate_dvr_port_status(session, port['id'])
updated_port = self._make_port_dict(port)
mech_context = (driver_context.DvrPortContext(
self, context, updated_port, network,
binding, original_port=original_port))
self.mechanism_manager.update_port_precommit(mech_context)
if updated:
self.mechanism_manager.update_port_postcommit(mech_context)
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
db.delete_dvr_port_binding_if_stale(session, binding)
return port['id']
def port_bound_to_host(self, context, port_id, host):
port = db.get_port(context.session, port_id)
if not port:
LOG.debug("No Port match for: %s", port_id)
return False
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
bindings = db.get_dvr_port_bindings(context.session, port_id)
for b in bindings:
if b.host == host:
return True
LOG.debug("No binding found for DVR port %s", port['id'])
return False
else:
port_host = db.get_port_binding_host(port_id)
return (port_host == host)
def get_ports_from_devices(self, devices):
port_ids_to_devices = dict((self._device_to_port_id(device), device)
for device in devices)
port_ids = port_ids_to_devices.keys()
ports = db.get_ports_and_sgs(port_ids)
for port in ports:
# map back to original requested id
port_id = next((port_id for port_id in port_ids
if port['id'].startswith(port_id)), None)
port['device'] = port_ids_to_devices.get(port_id)
return ports
def _device_to_port_id(self, device):
# REVISIT(rkukura): Consider calling into MechanismDrivers to
# process device names, or having MechanismDrivers supply list
# of device prefixes to strip.
if device.startswith(const.TAP_DEVICE_PREFIX):
return device[len(const.TAP_DEVICE_PREFIX):]
else:
# REVISIT(irenab): Consider calling into bound MD to
# handle the get_device_details RPC, then remove the 'else' clause
if not uuidutils.is_uuid_like(device):
port = db.get_port_from_device_mac(device)
if port:
return port.id
return device
| |
#!/usr/bin/env python
"""Unittest for GRRafana HTTP server."""
import copy
from unittest import mock
from absl import app
from absl.testing import absltest
from werkzeug import test as werkzeug_test
from google.protobuf import timestamp_pb2
from grr_response_server import data_store
from grr_response_server import fleetspeak_connector
from grr_response_server.bin import grrafana
from grr_response_server.fleet_utils import FleetStats
from fleetspeak.src.server.proto.fleetspeak_server import admin_pb2
from fleetspeak.src.server.proto.fleetspeak_server import resource_pb2
_TEST_CLIENT_ID_1 = "C.0000000000000001"
_TEST_CLIENT_ID_2 = "C.0000000000000002"
_START_RANGE_TIMESTAMP = "2020-08-13T14:20:17.158Z"
_END_RANGE_TIMESTAMP = "2020-08-18T17:15:58.761Z"
_TEST_CLIENT_RESOURCE_USAGE_RECORD_1 = {
"scope": "system",
"pid": 2714460,
"process_start_time": {
"seconds": 1597327815,
"nanos": 817468715
},
"client_timestamp": {
"seconds": 1597328416,
"nanos": 821525280
},
"server_timestamp": {
"seconds": 1597328417,
"nanos": 823124057
},
"mean_user_cpu_rate": 0.31883034110069275,
"max_user_cpu_rate": 4.999776840209961,
"mean_system_cpu_rate": 0.31883034110069275,
"max_system_cpu_rate": 4.999776840209961,
"mean_resident_memory_mib": 20,
"max_resident_memory_mib": 20
}
_TEST_CLIENT_RESOURCE_USAGE_RECORD_2 = {
"scope": "GRR",
"pid": 2714474,
"process_start_time": {
"seconds": 1597327815,
"nanos": 818657389
},
"client_timestamp": {
"seconds": 1597328418,
"nanos": 402023428
},
"server_timestamp": {
"seconds": 1597328419,
"nanos": 403123025
},
"mean_user_cpu_rate": 0.492735356092453,
"max_user_cpu_rate": 4.999615669250488,
"mean_system_cpu_rate": 0.07246342301368713,
"max_system_cpu_rate": 0.3333326578140259,
"mean_resident_memory_mib": 59,
"max_resident_memory_mib": 59
}
_TEST_CLIENT_BREAKDOWN_STATS = FleetStats(
day_buckets=grrafana._FLEET_BREAKDOWN_DAY_BUCKETS,
label_counts={
1: {
"foo-label": {
"bar-os": 3,
"baz-os": 4
},
"bar-label": {
"bar-os": 5,
"foo-os": 1
}
},
7: {
"foo-label": {
"bar-os": 6,
"baz-os": 5
},
"bar-label": {
"bar-os": 5,
"foo-os": 2
}
},
14: {
"foo-label": {
"bar-os": 6,
"baz-os": 5
},
"bar-label": {
"bar-os": 5,
"foo-os": 2
},
"baz-label": {
"bar-os": 1
}
},
30: {
"foo-label": {
"bar-os": 6,
"baz-os": 5
},
"bar-label": {
"bar-os": 5,
"foo-os": 2
},
"baz-label": {
"bar-os": 3,
"foo-os": 1
}
}
},
total_counts={
1: {
"bar-os": 8,
"baz-os": 4,
"foo-os": 1
},
7: {
"bar-os": 11,
"baz-os": 5,
"foo-os": 2
},
14: {
"bar-os": 12,
"baz-os": 5,
"foo-os": 2
},
30: {
"bar-os": 14,
"baz-os": 5,
"foo-os": 3
}
})
_TEST_VALID_RUD_QUERY = {
"app": "dashboard",
"requestId": "Q119",
"timezone": "browser",
"panelId": 2,
"dashboardId": 77,
"range": {
"from": _START_RANGE_TIMESTAMP,
"to": _END_RANGE_TIMESTAMP,
"raw": {
"from": _START_RANGE_TIMESTAMP,
"to": _END_RANGE_TIMESTAMP
}
},
"timeInfo": "",
"interval": "10m",
"intervalMs": 600000,
"targets": [{
"data": None,
"target": "Max User CPU Rate",
"refId": "A",
"hide": False,
"type": "timeseries"
}, {
"data": None,
"target": "Mean System CPU Rate",
"refId": "A",
"hide": False,
"type": "timeseries"
}],
"maxDataPoints": 800,
"scopedVars": {
"ClientID": {
"text": _TEST_CLIENT_ID_1,
"value": _TEST_CLIENT_ID_1
},
"__interval": {
"text": "10m",
"value": "10m"
},
"__interval_ms": {
"text": "600000",
"value": 600000
}
},
"startTime": 1598782453496,
"rangeRaw": {
"from": _START_RANGE_TIMESTAMP,
"to": _END_RANGE_TIMESTAMP
},
"adhocFilters": []
}
_TEST_VALID_CLIENT_STATS_QUERY = {
"app": "dashboard",
"requestId": "Q1",
"timezone": "browser",
"panelId": 12345,
"dashboardId": 1,
"range": {
"from": "2020-10-21T04:29:36.806Z",
"to": "2020-10-21T10:29:36.806Z",
"raw": {
"from": "now-6h",
"to": "now"
}
},
"timeInfo": "",
"interval": "15s",
"intervalMs": 15000,
"targets": [{
"data": "",
"refId": "A",
"target": "OS Platform Breakdown - 7 Day Active",
"type": "timeseries",
"datasource": "JSON"
}],
"maxDataPoints": 1700,
"scopedVars": {
"__interval": {
"text": "15s",
"value": "15s"
},
"__interval_ms": {
"text": "15000",
"value": 15000
}
},
"startTime": 1603276176806,
"rangeRaw": {
"from": "now-6h",
"to": "now"
},
"adhocFilters": [],
"endTime": 1603276176858
}
_TEST_INVALID_TARGET_QUERY = copy.deepcopy(_TEST_VALID_RUD_QUERY)
_TEST_INVALID_TARGET_QUERY["targets"][0]["target"] = "unavailable_metric"
def _MockConnReturningRecords(client_ruds):
conn = mock.MagicMock()
records = []
for record in client_ruds:
records.append(
resource_pb2.ClientResourceUsageRecord(
scope=record["scope"],
pid=record["pid"],
process_start_time=record["process_start_time"],
client_timestamp=record["client_timestamp"],
server_timestamp=record["server_timestamp"],
mean_user_cpu_rate=record["mean_user_cpu_rate"],
max_user_cpu_rate=record["max_user_cpu_rate"],
mean_system_cpu_rate=record["mean_system_cpu_rate"],
max_system_cpu_rate=record["max_system_cpu_rate"],
mean_resident_memory_mib=record["mean_resident_memory_mib"],
max_resident_memory_mib=record["max_resident_memory_mib"]))
conn.outgoing.FetchClientResourceUsageRecords.return_value = admin_pb2.FetchClientResourceUsageRecordsResponse(
records=records)
return conn
def _MockDatastoreReturningPlatformFleetStats(client_fleet_stats):
client_fleet_stats.Validate()
rel_db = mock.MagicMock()
rel_db.CountClientPlatformsByLabel.return_value = client_fleet_stats
return rel_db
class GrrafanaTest(absltest.TestCase):
"""Test the GRRafana HTTP server."""
def setUp(self):
super().setUp()
self.client = werkzeug_test.Client(
application=grrafana.Grrafana(), response_wrapper=grrafana.JSONResponse)
def testRoot(self):
response = self.client.get("/")
self.assertEqual(200, response.status_code)
def testSearchMetrics(self):
response = self.client.post(
"/search", json={
"type": "timeseries",
"target": ""
})
self.assertEqual(200, response.status_code)
expected_res = [
"Mean User CPU Rate", "Max User CPU Rate", "Mean System CPU Rate",
"Max System CPU Rate", "Mean Resident Memory MB",
"Max Resident Memory MB"
]
expected_res.extend([
f"OS Platform Breakdown - {n_days} Day Active"
for n_days in grrafana._FLEET_BREAKDOWN_DAY_BUCKETS
])
expected_res.extend([
f"OS Release Version Breakdown - {n_days} Day Active"
for n_days in grrafana._FLEET_BREAKDOWN_DAY_BUCKETS
])
expected_res.extend([
f"Client Version Strings - {n_days} Day Active"
for n_days in grrafana._FLEET_BREAKDOWN_DAY_BUCKETS
])
self.assertListEqual(response.json, expected_res)
def testClientResourceUsageMetricQuery(self):
conn = _MockConnReturningRecords([
_TEST_CLIENT_RESOURCE_USAGE_RECORD_1,
_TEST_CLIENT_RESOURCE_USAGE_RECORD_2
])
with mock.patch.object(fleetspeak_connector, "CONN", conn):
valid_response = self.client.post("/query", json=_TEST_VALID_RUD_QUERY)
self.assertEqual(200, valid_response.status_code)
self.assertEqual(valid_response.json, [{
"target":
"Max User CPU Rate",
"datapoints": [[4.999776840209961, 1597328417823],
[4.999615669250488, 1597328419403]]
}, {
"target":
"Mean System CPU Rate",
"datapoints": [[0.31883034110069275, 1597328417823],
[0.07246342301368713, 1597328419403]]
}])
def testQueryInvalidRequest(self):
conn = _MockConnReturningRecords([
_TEST_CLIENT_RESOURCE_USAGE_RECORD_1,
_TEST_CLIENT_RESOURCE_USAGE_RECORD_2
])
with mock.patch.object(fleetspeak_connector, "CONN", conn):
with self.assertRaises(KeyError):
self.client.post("/query", json=_TEST_INVALID_TARGET_QUERY)
def testClientsStatisticsMetric(self):
rel_db = _MockDatastoreReturningPlatformFleetStats(
_TEST_CLIENT_BREAKDOWN_STATS)
with mock.patch.object(data_store, "REL_DB", rel_db):
valid_response = self.client.post(
"/query", json=_TEST_VALID_CLIENT_STATS_QUERY)
self.assertEqual(200, valid_response.status_code)
expected_res = [{
"columns": [{
"text": "Label",
"type": "string"
}, {
"text": "Value",
"type": "number"
}],
"rows": [["bar-os", 11], ["baz-os", 5], ["foo-os", 2]],
"type": "table"
}]
self.assertEqual(valid_response.json, expected_res)
class TimeToProtoTimestampTest(absltest.TestCase):
"""Tests the conversion between Grafana and proto timestamps."""
def testTimeToProtoTimestamp(self):
self.assertEqual(
grrafana.TimeToProtoTimestamp(_START_RANGE_TIMESTAMP),
timestamp_pb2.Timestamp(seconds=1597328417, nanos=(158 * 1000000)))
self.assertEqual(
grrafana.TimeToProtoTimestamp(_END_RANGE_TIMESTAMP),
timestamp_pb2.Timestamp(seconds=1597770958, nanos=(761 * 1000000)))
def main(argv):
absltest.main(argv)
if __name__ == "__main__":
app.run(main)
| |
from tierpsy.gui.TrackerViewerAux import TrackerViewerAuxGUI
from tierpsy.helper.misc import remove_ext
from collections import OrderedDict
import tables
import pandas as pd
from PyQt5.QtWidgets import QDialog, QApplication, QPushButton, QComboBox, QVBoxLayout, QHBoxLayout, QFileDialog
from PyQt5.QtCore import Qt
from matplotlib.backends.backend_qt5agg import FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
#plt.style.use(['default', 'fast'])
#plt.style.use(['ggplot', 'fast'])
#plt.style.use(['fivethirtyeight', 'fast'])
plt.style.use(['seaborn', 'fast'])
class PlotFeatures(QDialog):
def __init__(self,
features_file = '',
timeseries_data = None,
traj_worm_index_grouped = None,
time_units = None,
xy_units = None,
fps = None,
parent = None):
super().__init__(parent)
self.plot_funcs = OrderedDict ([
('Single Trajectory, Time Series', self._plot_single_timeseries),
('All Trajectories, Time Series', self._plot_all_timeseries),
('Single Trajectory, Histogram', self._plot_single_histogram),
('All Trajectories, Histogram', self._plot_all_histogram)
])
self.worm_index = None
self.feature = None
self.ts_bin = 5
self.timeseries_data = timeseries_data
self.traj_worm_index_grouped = traj_worm_index_grouped
self.time_units = time_units
self.xy_units = xy_units
self.fps = fps
self.root_file = remove_ext(features_file)
self.df2save = pd.DataFrame([])
self.save_postfix = ''
self.button_save_csv = QPushButton('Write to csv')
self.button_save_fig = QPushButton('Save Figure')
self.combobox_plot_types = QComboBox()
self.combobox_plot_types.addItems(self.plot_funcs.keys())
self.combobox_plot_types.currentIndexChanged.connect(lambda x : self.plot())
self.button_save_csv.clicked.connect(self.save_csv)
self.button_save_fig.clicked.connect(self.save_fig)
# a figure instance to plot on
self.figure = Figure(figsize=(6, 3))
# this is the Canvas Widget that displays the `figure`
# it takes the `figure` instance as a parameter to __init__
self.canvas = FigureCanvas(self.figure)
self._ax = self.canvas.figure.subplots()
# this is the Navigation widget
# it takes the Canvas widget and a parent
self.toolbar = NavigationToolbar(self.canvas, self)
# set the layout
layout = QVBoxLayout()
layout.addWidget(self.toolbar)
layout.addWidget(self.canvas)
layout.addWidget(self.combobox_plot_types)
layout_menu = QHBoxLayout()
layout_menu.addWidget(self.button_save_csv)
layout_menu.addWidget(self.button_save_fig)
layout.addLayout(layout_menu)
self.setLayout(layout)
def _get_save_name(self, ext):
fullname = '{}_{}{}'.format(self.root_file, self.save_postfix ,ext)
dialog = QFileDialog()
dialog.selectFile(fullname)
dialog.setOptions(QFileDialog.DontUseNativeDialog)
dialog.setFileMode(QFileDialog.AnyFile)
dialog.setAcceptMode(QFileDialog.AcceptSave)
dialog.setNameFilters(['*' + ext])
ret = dialog.exec();
if (ret == QDialog.Accepted):
fullname = dialog.selectedFiles()[0]
return fullname
def save_csv(self):
fullname = self._get_save_name('.csv')
self.df2save.to_csv(fullname, index=False)
def save_fig(self):
fullname = self._get_save_name('.pdf')
self.figure.savefig(fullname)
def plot(self, worm_index = None, feature = None):
if worm_index is None:
worm_index = self.worm_index
else:
self.worm_index = worm_index
if feature is None:
feature = self.feature
else:
self.feature = feature
key = self.combobox_plot_types.currentText()
func = self.plot_funcs[key]
if 'All' in key:
func(feature)
else:
func(worm_index, feature)
def feature_label(self, feature):
lab = feature.replace('_' , ' ').title()
return lab
def _plot_single_timeseries(self, worm_index, feature):
worm_data = self.traj_worm_index_grouped.get_group(worm_index)
feat_val = self.timeseries_data.loc[worm_data.index]
self._ax.clear()
self._ax.set_xlabel('Time [{}]'.format(self.time_units))
self._ax.set_ylabel(self.feature_label(feature))
self._ax.set_title('W: {}'.format(worm_index))
tt = feat_val['timestamp']/self.fps
self._ax.plot(tt, feat_val[feature])
self.figure.tight_layout()
self._ax.figure.canvas.draw()
self.df2save = pd.DataFrame({'time':tt, feature:feat_val[feature]})
self.save_postfix = 'TS_W{}_{}'.format(worm_index, feature)
def _plot_all_timeseries(self, feature):
self._ax.clear()
self._ax.set_xlabel('Time [{}]'.format(self.time_units))
self._ax.set_ylabel(self.feature_label(feature))
self._ax.set_title('All Trajectories')
self.timeseries_data['timestamp_s'] = self.timeseries_data['timestamp']/self.fps
#self._ax.plot(feat_val['timestamp'], feat_val[feature])
for _, worm_data in self.traj_worm_index_grouped:
feat_val = self.timeseries_data.loc[worm_data.index]
self._ax.plot(feat_val['timestamp_s'], feat_val[feature], alpha=0.4)
self.timeseries_data['timestamp_binned'] = round(self.timeseries_data['timestamp_s']/self.ts_bin)
agg_data = self.timeseries_data[['timestamp_binned', feature]].groupby('timestamp_binned').agg('median')[feature]
xx = agg_data.index*self.ts_bin
yy = agg_data.values
self._ax.plot(xx, yy, '-', lw=2, color='black')
self.figure.tight_layout()
self._ax.figure.canvas.draw()
self.df2save = pd.DataFrame({'time_bin':xx, 'median_' + feature : yy })
self.save_postfix = 'TS_ALL_{}'.format(feature)
def _plot_single_histogram(self, worm_index, feature):
worm_data = self.traj_worm_index_grouped.get_group(worm_index)
feat_val = self.timeseries_data.loc[worm_data.index]
self._ax.clear()
self._ax.set_xlabel(self.feature_label(feature))
self._ax.set_ylabel('Counts')
self._ax.set_title('W: {}'.format(worm_index))
counts, edges, _ = self._ax.hist(feat_val[feature].dropna())
bins = edges[:-1] + (edges[1] - edges[0])/2
self.figure.tight_layout()
self._ax.figure.canvas.draw()
self.df2save = pd.DataFrame({feature + '_bin': bins, 'counts': counts })
self.save_postfix = 'HIST_W{}_{}'.format(worm_index, feature)
def _plot_all_histogram(self, feature):
self._ax.clear()
self._ax.set_xlabel(self.feature_label(feature))
self._ax.set_ylabel('Counts')
self._ax.set_title('All Trajectories')
counts, edges, _ = self._ax.hist(self.timeseries_data[feature].dropna(), 100)
bins = edges[:-1] + (edges[1] - edges[0])/2
self.figure.tight_layout()
self._ax.figure.canvas.draw()
self.df2save = pd.DataFrame({feature + '_bin': bins, 'counts': counts })
self.save_postfix = 'HIST_ALL_{}'.format(feature)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
main = FeatureReaderBase(ui='')
#skel_file = '/Users/avelinojaver/OneDrive - Imperial College London/tierpsy_examples/mutliworm_example/BRC20067_worms10_food1-10_Set2_Pos5_Ch2_02062017_121709_featuresN.hdf5'
mask_file = '/Users/avelinojaver/OneDrive - Imperial College London/tierpsy_examples/mutliworm_example/BRC20067_worms10_food1-10_Set2_Pos5_Ch2_02062017_121709.hdf5'
main.updateVideoFile(mask_file)
plotter = PlotFeatures(main.skeletons_file,
main.timeseries_data,
main.traj_worm_index_grouped,
main.time_units,
main.xy_units,
main.fps)
plotter.show()
plotter.plot(1, 'length')
sys.exit(plotter.exec_())
| |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
import os
import re
import shutil
import socket
import sys
import netaddr
from oslo.config import cfg
import six
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import constants
from neutron.common import exceptions
from neutron.common import utils as commonutils
from neutron.openstack.common import importutils
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
LOG = logging.getLogger(__name__)
OPTS = [
cfg.StrOpt('dhcp_confs',
default='$state_path/dhcp',
help=_('Location to store DHCP server config files')),
cfg.StrOpt('dhcp_domain',
default='openstacklocal',
help=_('Domain to use for building the hostnames')),
cfg.StrOpt('dnsmasq_config_file',
default='',
help=_('Override the default dnsmasq settings with this file')),
cfg.ListOpt('dnsmasq_dns_servers',
help=_('Comma-separated list of the DNS servers which will be '
'used as forwarders.'),
deprecated_name='dnsmasq_dns_server'),
cfg.BoolOpt('dhcp_delete_namespaces', default=False,
help=_("Delete namespace after removing a dhcp server.")),
cfg.IntOpt(
'dnsmasq_lease_max',
default=(2 ** 24),
help=_('Limit number of leases to prevent a denial-of-service.')),
]
IPV4 = 4
IPV6 = 6
UDP = 'udp'
TCP = 'tcp'
DNS_PORT = 53
DHCPV4_PORT = 67
DHCPV6_PORT = 547
METADATA_DEFAULT_PREFIX = 16
METADATA_DEFAULT_IP = '169.254.169.254'
METADATA_DEFAULT_CIDR = '%s/%d' % (METADATA_DEFAULT_IP,
METADATA_DEFAULT_PREFIX)
METADATA_PORT = 80
WIN2k3_STATIC_DNS = 249
NS_PREFIX = 'qdhcp-'
class DictModel(dict):
"""Convert dict into an object that provides attribute access to values."""
def __init__(self, *args, **kwargs):
"""Convert dict values to DictModel values."""
super(DictModel, self).__init__(*args, **kwargs)
def needs_upgrade(item):
"""Check if `item` is a dict and needs to be changed to DictModel.
"""
return isinstance(item, dict) and not isinstance(item, DictModel)
def upgrade(item):
"""Upgrade item if it needs to be upgraded."""
if needs_upgrade(item):
return DictModel(item)
else:
return item
for key, value in self.iteritems():
if isinstance(value, (list, tuple)):
# Keep the same type but convert dicts to DictModels
self[key] = type(value)(
(upgrade(item) for item in value)
)
elif needs_upgrade(value):
# Change dict instance values to DictModel instance values
self[key] = DictModel(value)
def __getattr__(self, name):
try:
return self[name]
except KeyError as e:
raise AttributeError(e)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
del self[name]
class NetModel(DictModel):
def __init__(self, use_namespaces, d):
super(NetModel, self).__init__(d)
self._ns_name = (use_namespaces and
"%s%s" % (NS_PREFIX, self.id) or None)
@property
def namespace(self):
return self._ns_name
@six.add_metaclass(abc.ABCMeta)
class DhcpBase(object):
def __init__(self, conf, network, root_helper='sudo',
version=None, plugin=None):
self.conf = conf
self.network = network
self.root_helper = root_helper
self.device_manager = DeviceManager(self.conf,
self.root_helper, plugin)
self.version = version
@abc.abstractmethod
def enable(self):
"""Enables DHCP for this network."""
@abc.abstractmethod
def disable(self, retain_port=False):
"""Disable dhcp for this network."""
def restart(self):
"""Restart the dhcp service for the network."""
self.disable(retain_port=True)
self.enable()
@abc.abstractproperty
def active(self):
"""Boolean representing the running state of the DHCP server."""
@abc.abstractmethod
def reload_allocations(self):
"""Force the DHCP server to reload the assignment database."""
@classmethod
def existing_dhcp_networks(cls, conf, root_helper):
"""Return a list of existing networks ids that we have configs for."""
raise NotImplementedError()
@classmethod
def check_version(cls):
"""Execute version checks on DHCP server."""
raise NotImplementedError()
@classmethod
def get_isolated_subnets(cls, network):
"""Returns a dict indicating whether or not a subnet is isolated"""
raise NotImplementedError()
@classmethod
def should_enable_metadata(cls, conf, network):
"""True if the metadata-proxy should be enabled for the network."""
raise NotImplementedError()
class DhcpLocalProcess(DhcpBase):
PORTS = []
def _enable_dhcp(self):
"""check if there is a subnet within the network with dhcp enabled."""
for subnet in self.network.subnets:
if subnet.enable_dhcp:
return True
return False
def enable(self):
"""Enables DHCP for this network by spawning a local process."""
if self.active:
self.restart()
elif self._enable_dhcp():
interface_name = self.device_manager.setup(self.network)
self.interface_name = interface_name
self.spawn_process()
def disable(self, retain_port=False):
"""Disable DHCP for this network by killing the local process."""
pid = self.pid
if pid:
if self.active:
cmd = ['kill', '-9', pid]
utils.execute(cmd, self.root_helper)
else:
LOG.debug(_('DHCP for %(net_id)s is stale, pid %(pid)d '
'does not exist, performing cleanup'),
{'net_id': self.network.id, 'pid': pid})
if not retain_port:
self.device_manager.destroy(self.network,
self.interface_name)
else:
LOG.debug(_('No DHCP started for %s'), self.network.id)
self._remove_config_files()
if not retain_port:
if self.conf.dhcp_delete_namespaces and self.network.namespace:
ns_ip = ip_lib.IPWrapper(self.root_helper,
self.network.namespace)
try:
ns_ip.netns.delete(self.network.namespace)
except RuntimeError:
msg = _('Failed trying to delete namespace: %s')
LOG.exception(msg, self.network.namespace)
def _remove_config_files(self):
confs_dir = os.path.abspath(os.path.normpath(self.conf.dhcp_confs))
conf_dir = os.path.join(confs_dir, self.network.id)
shutil.rmtree(conf_dir, ignore_errors=True)
def get_conf_file_name(self, kind, ensure_conf_dir=False):
"""Returns the file name for a given kind of config file."""
confs_dir = os.path.abspath(os.path.normpath(self.conf.dhcp_confs))
conf_dir = os.path.join(confs_dir, self.network.id)
if ensure_conf_dir:
if not os.path.isdir(conf_dir):
os.makedirs(conf_dir, 0o755)
return os.path.join(conf_dir, kind)
def _get_value_from_conf_file(self, kind, converter=None):
"""A helper function to read a value from one of the state files."""
file_name = self.get_conf_file_name(kind)
msg = _('Error while reading %s')
try:
with open(file_name, 'r') as f:
try:
return converter and converter(f.read()) or f.read()
except ValueError:
msg = _('Unable to convert value in %s')
except IOError:
msg = _('Unable to access %s')
LOG.debug(msg % file_name)
return None
@property
def pid(self):
"""Last known pid for the DHCP process spawned for this network."""
return self._get_value_from_conf_file('pid', int)
@property
def active(self):
pid = self.pid
if pid is None:
return False
cmdline = '/proc/%s/cmdline' % pid
try:
with open(cmdline, "r") as f:
return self.network.id in f.readline()
except IOError:
return False
@property
def interface_name(self):
return self._get_value_from_conf_file('interface')
@interface_name.setter
def interface_name(self, value):
interface_file_path = self.get_conf_file_name('interface',
ensure_conf_dir=True)
utils.replace_file(interface_file_path, value)
@abc.abstractmethod
def spawn_process(self):
pass
class Dnsmasq(DhcpLocalProcess):
# The ports that need to be opened when security policies are active
# on the Neutron port used for DHCP. These are provided as a convenience
# for users of this class.
PORTS = {IPV4: [(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV4_PORT)],
IPV6: [(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV6_PORT)],
}
_TAG_PREFIX = 'tag%d'
NEUTRON_NETWORK_ID_KEY = 'NEUTRON_NETWORK_ID'
NEUTRON_RELAY_SOCKET_PATH_KEY = 'NEUTRON_RELAY_SOCKET_PATH'
MINIMUM_VERSION = 2.63
MINIMUM_IPV6_VERSION = 2.67
@classmethod
def check_version(cls):
ver = 0
try:
cmd = ['dnsmasq', '--version']
out = utils.execute(cmd)
ver = re.findall("\d+.\d+", out)[0]
is_valid_version = float(ver) >= cls.MINIMUM_VERSION
if not is_valid_version:
LOG.error(_('FAILED VERSION REQUIREMENT FOR DNSMASQ. '
'DHCP AGENT MAY NOT RUN CORRECTLY! '
'Please ensure that its version is %s '
'or above!'), cls.MINIMUM_VERSION)
raise SystemExit(1)
is_valid_version = float(ver) >= cls.MINIMUM_IPV6_VERSION
if not is_valid_version:
LOG.warning(_('FAILED VERSION REQUIREMENT FOR DNSMASQ. '
'DHCP AGENT MAY NOT RUN CORRECTLY WHEN '
'SERVING IPV6 STATEFUL SUBNETS! '
'Please ensure that its version is %s '
'or above!'), cls.MINIMUM_IPV6_VERSION)
except (OSError, RuntimeError, IndexError, ValueError):
LOG.error(_('Unable to determine dnsmasq version. '
'Please ensure that its version is %s '
'or above!'), cls.MINIMUM_VERSION)
raise SystemExit(1)
return float(ver)
@classmethod
def existing_dhcp_networks(cls, conf, root_helper):
"""Return a list of existing networks ids that we have configs for."""
confs_dir = os.path.abspath(os.path.normpath(conf.dhcp_confs))
return [
c for c in os.listdir(confs_dir)
if uuidutils.is_uuid_like(c)
]
def spawn_process(self):
"""Spawns a Dnsmasq process for the network."""
env = {
self.NEUTRON_NETWORK_ID_KEY: self.network.id,
}
cmd = [
'dnsmasq',
'--no-hosts',
'--no-resolv',
'--strict-order',
'--bind-interfaces',
'--interface=%s' % self.interface_name,
'--except-interface=lo',
'--pid-file=%s' % self.get_conf_file_name(
'pid', ensure_conf_dir=True),
'--dhcp-hostsfile=%s' % self._output_hosts_file(),
'--addn-hosts=%s' % self._output_addn_hosts_file(),
'--dhcp-optsfile=%s' % self._output_opts_file(),
'--leasefile-ro',
]
possible_leases = 0
for i, subnet in enumerate(self.network.subnets):
mode = None
# if a subnet is specified to have dhcp disabled
if not subnet.enable_dhcp:
continue
if subnet.ip_version == 4:
mode = 'static'
else:
# Note(scollins) If the IPv6 attributes are not set, set it as
# static to preserve previous behavior
addr_mode = getattr(subnet, 'ipv6_address_mode', None)
ra_mode = getattr(subnet, 'ipv6_ra_mode', None)
if (addr_mode in [constants.DHCPV6_STATEFUL,
constants.DHCPV6_STATELESS] or
not addr_mode and not ra_mode):
mode = 'static'
cidr = netaddr.IPNetwork(subnet.cidr)
if self.conf.dhcp_lease_duration == -1:
lease = 'infinite'
else:
lease = '%ss' % self.conf.dhcp_lease_duration
# mode is optional and is not set - skip it
if mode:
if subnet.ip_version == 4:
cmd.append('--dhcp-range=%s%s,%s,%s,%s' %
('set:', self._TAG_PREFIX % i,
cidr.network, mode, lease))
else:
cmd.append('--dhcp-range=%s%s,%s,%s,%d,%s' %
('set:', self._TAG_PREFIX % i,
cidr.network, mode,
cidr.prefixlen, lease))
possible_leases += cidr.size
# Cap the limit because creating lots of subnets can inflate
# this possible lease cap.
cmd.append('--dhcp-lease-max=%d' %
min(possible_leases, self.conf.dnsmasq_lease_max))
cmd.append('--conf-file=%s' % self.conf.dnsmasq_config_file)
if self.conf.dnsmasq_dns_servers:
cmd.extend(
'--server=%s' % server
for server in self.conf.dnsmasq_dns_servers)
if self.conf.dhcp_domain:
cmd.append('--domain=%s' % self.conf.dhcp_domain)
ip_wrapper = ip_lib.IPWrapper(self.root_helper,
self.network.namespace)
ip_wrapper.netns.execute(cmd, addl_env=env)
def _release_lease(self, mac_address, ip):
"""Release a DHCP lease."""
cmd = ['dhcp_release', self.interface_name, ip, mac_address]
ip_wrapper = ip_lib.IPWrapper(self.root_helper,
self.network.namespace)
ip_wrapper.netns.execute(cmd)
def reload_allocations(self):
"""Rebuild the dnsmasq config and signal the dnsmasq to reload."""
# If all subnets turn off dhcp, kill the process.
if not self._enable_dhcp():
self.disable()
LOG.debug(_('Killing dhcpmasq for network since all subnets have '
'turned off DHCP: %s'), self.network.id)
return
self._release_unused_leases()
self._output_hosts_file()
self._output_addn_hosts_file()
self._output_opts_file()
if self.active:
cmd = ['kill', '-HUP', self.pid]
utils.execute(cmd, self.root_helper)
else:
LOG.debug(_('Pid %d is stale, relaunching dnsmasq'), self.pid)
LOG.debug(_('Reloading allocations for network: %s'), self.network.id)
self.device_manager.update(self.network, self.interface_name)
def _iter_hosts(self):
"""Iterate over hosts.
For each host on the network we yield a tuple containing:
(
port, # a DictModel instance representing the port.
alloc, # a DictModel instance of the allocated ip and subnet.
host_name, # Host name.
name, # Canonical hostname in the format 'hostname[.domain]'.
)
"""
v6_nets = dict((subnet.id, subnet) for subnet in
self.network.subnets if subnet.ip_version == 6)
for port in self.network.ports:
for alloc in port.fixed_ips:
# Note(scollins) Only create entries that are
# associated with the subnet being managed by this
# dhcp agent
if alloc.subnet_id in v6_nets:
addr_mode = v6_nets[alloc.subnet_id].ipv6_address_mode
if addr_mode != constants.DHCPV6_STATEFUL:
continue
hostname = 'host-%s' % alloc.ip_address.replace(
'.', '-').replace(':', '-')
fqdn = hostname
if self.conf.dhcp_domain:
fqdn = '%s.%s' % (fqdn, self.conf.dhcp_domain)
yield (port, alloc, hostname, fqdn)
def _output_hosts_file(self):
"""Writes a dnsmasq compatible dhcp hosts file.
The generated file is sent to the --dhcp-hostsfile option of dnsmasq,
and lists the hosts on the network which should receive a dhcp lease.
Each line in this file is in the form::
'mac_address,FQDN,ip_address'
IMPORTANT NOTE: a dnsmasq instance does not resolve hosts defined in
this file if it did not give a lease to a host listed in it (e.g.:
multiple dnsmasq instances on the same network if this network is on
multiple network nodes). This file is only defining hosts which
should receive a dhcp lease, the hosts resolution in itself is
defined by the `_output_addn_hosts_file` method.
"""
buf = six.StringIO()
filename = self.get_conf_file_name('host')
LOG.debug(_('Building host file: %s'), filename)
for (port, alloc, hostname, name) in self._iter_hosts():
# (dzyu) Check if it is legal ipv6 address, if so, need wrap
# it with '[]' to let dnsmasq to distinguish MAC address from
# IPv6 address.
ip_address = alloc.ip_address
if netaddr.valid_ipv6(ip_address):
ip_address = '[%s]' % ip_address
LOG.debug(_('Adding %(mac)s : %(name)s : %(ip)s'),
{"mac": port.mac_address, "name": name,
"ip": ip_address})
if getattr(port, 'extra_dhcp_opts', False):
buf.write('%s,%s,%s,%s%s\n' %
(port.mac_address, name, ip_address,
'set:', port.id))
else:
buf.write('%s,%s,%s\n' %
(port.mac_address, name, ip_address))
utils.replace_file(filename, buf.getvalue())
LOG.debug(_('Done building host file %s'), filename)
return filename
def _read_hosts_file_leases(self, filename):
leases = set()
if os.path.exists(filename):
with open(filename) as f:
for l in f.readlines():
host = l.strip().split(',')
leases.add((host[2], host[0]))
return leases
def _release_unused_leases(self):
filename = self.get_conf_file_name('host')
old_leases = self._read_hosts_file_leases(filename)
new_leases = set()
for port in self.network.ports:
for alloc in port.fixed_ips:
new_leases.add((alloc.ip_address, port.mac_address))
for ip, mac in old_leases - new_leases:
self._release_lease(mac, ip)
def _output_addn_hosts_file(self):
"""Writes a dnsmasq compatible additional hosts file.
The generated file is sent to the --addn-hosts option of dnsmasq,
and lists the hosts on the network which should be resolved even if
the dnsmaq instance did not give a lease to the host (see the
`_output_hosts_file` method).
Each line in this file is in the same form as a standard /etc/hosts
file.
"""
buf = six.StringIO()
for (port, alloc, hostname, fqdn) in self._iter_hosts():
# It is compulsory to write the `fqdn` before the `hostname` in
# order to obtain it in PTR responses.
buf.write('%s\t%s %s\n' % (alloc.ip_address, fqdn, hostname))
addn_hosts = self.get_conf_file_name('addn_hosts')
utils.replace_file(addn_hosts, buf.getvalue())
return addn_hosts
def _output_opts_file(self):
"""Write a dnsmasq compatible options file."""
if self.conf.enable_isolated_metadata:
subnet_to_interface_ip = self._make_subnet_interface_ip_map()
options = []
isolated_subnets = self.get_isolated_subnets(self.network)
dhcp_ips = collections.defaultdict(list)
subnet_idx_map = {}
for i, subnet in enumerate(self.network.subnets):
if (not subnet.enable_dhcp or
(subnet.ip_version == 6 and
getattr(subnet, 'ipv6_address_mode', None)
in [None, constants.IPV6_SLAAC])):
continue
if subnet.dns_nameservers:
options.append(
self._format_option(
subnet.ip_version, i, 'dns-server',
','.join(
Dnsmasq._convert_to_literal_addrs(
subnet.ip_version, subnet.dns_nameservers))))
else:
# use the dnsmasq ip as nameservers only if there is no
# dns-server submitted by the server
subnet_idx_map[subnet.id] = i
if self.conf.dhcp_domain and subnet.ip_version == 6:
options.append('tag:tag%s,option6:domain-search,%s' %
(i, ''.join(self.conf.dhcp_domain)))
gateway = subnet.gateway_ip
host_routes = []
for hr in subnet.host_routes:
if hr.destination == "0.0.0.0/0":
if not gateway:
gateway = hr.nexthop
else:
host_routes.append("%s,%s" % (hr.destination, hr.nexthop))
# Add host routes for isolated network segments
if (isolated_subnets[subnet.id] and
self.conf.enable_isolated_metadata and
subnet.ip_version == 4):
subnet_dhcp_ip = subnet_to_interface_ip[subnet.id]
host_routes.append(
'%s/32,%s' % (METADATA_DEFAULT_IP, subnet_dhcp_ip)
)
if subnet.ip_version == 4:
if host_routes:
if gateway:
host_routes.append("%s,%s" % ("0.0.0.0/0", gateway))
options.append(
self._format_option(subnet.ip_version, i,
'classless-static-route',
','.join(host_routes)))
options.append(
self._format_option(subnet.ip_version, i,
WIN2k3_STATIC_DNS,
','.join(host_routes)))
if gateway:
options.append(self._format_option(subnet.ip_version,
i, 'router',
gateway))
else:
options.append(self._format_option(subnet.ip_version,
i, 'router'))
for port in self.network.ports:
if getattr(port, 'extra_dhcp_opts', False):
for ip_version in (4, 6):
if any(
netaddr.IPAddress(ip.ip_address).version == ip_version
for ip in port.fixed_ips):
options.extend(
# TODO(xuhanp):Instead of applying extra_dhcp_opts
# to both DHCPv4 and DHCPv6, we need to find a new
# way to specify options for v4 and v6
# respectively. We also need to validate the option
# before applying it.
self._format_option(ip_version, port.id,
opt.opt_name, opt.opt_value)
for opt in port.extra_dhcp_opts)
# provides all dnsmasq ip as dns-server if there is more than
# one dnsmasq for a subnet and there is no dns-server submitted
# by the server
if port.device_owner == constants.DEVICE_OWNER_DHCP:
for ip in port.fixed_ips:
i = subnet_idx_map.get(ip.subnet_id)
if i is None:
continue
dhcp_ips[i].append(ip.ip_address)
for i, ips in dhcp_ips.items():
for ip_version in (4, 6):
vx_ips = [ip for ip in ips
if netaddr.IPAddress(ip).version == ip_version]
if vx_ips:
options.append(
self._format_option(
ip_version, i, 'dns-server',
','.join(
Dnsmasq._convert_to_literal_addrs(ip_version,
vx_ips))))
name = self.get_conf_file_name('opts')
utils.replace_file(name, '\n'.join(options))
return name
def _make_subnet_interface_ip_map(self):
ip_dev = ip_lib.IPDevice(
self.interface_name,
self.root_helper,
self.network.namespace
)
subnet_lookup = dict(
(netaddr.IPNetwork(subnet.cidr), subnet.id)
for subnet in self.network.subnets
)
retval = {}
for addr in ip_dev.addr.list():
ip_net = netaddr.IPNetwork(addr['cidr'])
if ip_net in subnet_lookup:
retval[subnet_lookup[ip_net]] = addr['cidr'].split('/')[0]
return retval
def _format_option(self, ip_version, tag, option, *args):
"""Format DHCP option by option name or code."""
option = str(option)
if isinstance(tag, int):
tag = self._TAG_PREFIX % tag
if not option.isdigit():
if ip_version == 4:
option = 'option:%s' % option
else:
option = 'option6:%s' % option
return ','.join(('tag:' + tag, '%s' % option) + args)
@staticmethod
def _convert_to_literal_addrs(ip_version, ips):
if ip_version == 4:
return ips
return ['[' + ip + ']' for ip in ips]
@classmethod
def get_isolated_subnets(cls, network):
"""Returns a dict indicating whether or not a subnet is isolated
A subnet is considered non-isolated if there is a port connected to
the subnet, and the port's ip address matches that of the subnet's
gateway. The port must be owned by a nuetron router.
"""
isolated_subnets = collections.defaultdict(lambda: True)
subnets = dict((subnet.id, subnet) for subnet in network.subnets)
for port in network.ports:
if port.device_owner not in (constants.DEVICE_OWNER_ROUTER_INTF,
constants.DEVICE_OWNER_DVR_INTERFACE):
continue
for alloc in port.fixed_ips:
if subnets[alloc.subnet_id].gateway_ip == alloc.ip_address:
isolated_subnets[alloc.subnet_id] = False
return isolated_subnets
@classmethod
def should_enable_metadata(cls, conf, network):
"""Determine whether the metadata proxy is needed for a network
This method returns True for truly isolated networks (ie: not attached
to a router), when the enable_isolated_metadata flag is True.
This method also returns True when enable_metadata_network is True,
and the network passed as a parameter has a subnet in the link-local
CIDR, thus characterizing it as a "metadata" network. The metadata
network is used by solutions which do not leverage the l3 agent for
providing access to the metadata service via logical routers built
with 3rd party backends.
"""
if conf.enable_metadata_network and conf.enable_isolated_metadata:
# check if the network has a metadata subnet
meta_cidr = netaddr.IPNetwork(METADATA_DEFAULT_CIDR)
if any(netaddr.IPNetwork(s.cidr) in meta_cidr
for s in network.subnets):
return True
if not conf.use_namespaces or not conf.enable_isolated_metadata:
return False
isolated_subnets = cls.get_isolated_subnets(network)
return any(isolated_subnets[subnet.id] for subnet in network.subnets)
@classmethod
def lease_update(cls):
network_id = os.environ.get(cls.NEUTRON_NETWORK_ID_KEY)
dhcp_relay_socket = os.environ.get(cls.NEUTRON_RELAY_SOCKET_PATH_KEY)
action = sys.argv[1]
if action not in ('add', 'del', 'old'):
sys.exit()
mac_address = sys.argv[2]
ip_address = sys.argv[3]
if action == 'del':
lease_remaining = 0
else:
lease_remaining = int(os.environ.get('DNSMASQ_TIME_REMAINING', 0))
data = dict(network_id=network_id, mac_address=mac_address,
ip_address=ip_address, lease_remaining=lease_remaining)
if os.path.exists(dhcp_relay_socket):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(dhcp_relay_socket)
sock.send(jsonutils.dumps(data))
sock.close()
class DeviceManager(object):
def __init__(self, conf, root_helper, plugin):
self.conf = conf
self.root_helper = root_helper
self.plugin = plugin
if not conf.interface_driver:
msg = _('An interface driver must be specified')
LOG.error(msg)
raise SystemExit(1)
try:
self.driver = importutils.import_object(
conf.interface_driver, conf)
except Exception as e:
msg = (_("Error importing interface driver '%(driver)s': "
"%(inner)s") % {'driver': conf.interface_driver,
'inner': e})
LOG.error(msg)
raise SystemExit(1)
def get_interface_name(self, network, port):
"""Return interface(device) name for use by the DHCP process."""
return self.driver.get_device_name(port)
def get_device_id(self, network):
"""Return a unique DHCP device ID for this host on the network."""
# There could be more than one dhcp server per network, so create
# a device id that combines host and network ids
return commonutils.get_dhcp_agent_device_id(network.id, self.conf.host)
def _set_default_route(self, network, device_name):
"""Sets the default gateway for this dhcp namespace.
This method is idempotent and will only adjust the route if adjusting
it would change it from what it already is. This makes it safe to call
and avoids unnecessary perturbation of the system.
"""
device = ip_lib.IPDevice(device_name,
self.root_helper,
network.namespace)
gateway = device.route.get_gateway()
if gateway:
gateway = gateway['gateway']
for subnet in network.subnets:
skip_subnet = (
subnet.ip_version != 4
or not subnet.enable_dhcp
or subnet.gateway_ip is None)
if skip_subnet:
continue
if gateway != subnet.gateway_ip:
m = _('Setting gateway for dhcp netns on net %(n)s to %(ip)s')
LOG.debug(m, {'n': network.id, 'ip': subnet.gateway_ip})
device.route.add_gateway(subnet.gateway_ip)
return
# No subnets on the network have a valid gateway. Clean it up to avoid
# confusion from seeing an invalid gateway here.
if gateway is not None:
msg = _('Removing gateway for dhcp netns on net %s')
LOG.debug(msg, network.id)
device.route.delete_gateway(gateway)
def setup_dhcp_port(self, network):
"""Create/update DHCP port for the host if needed and return port."""
device_id = self.get_device_id(network)
subnets = {}
dhcp_enabled_subnet_ids = []
for subnet in network.subnets:
if subnet.enable_dhcp:
dhcp_enabled_subnet_ids.append(subnet.id)
subnets[subnet.id] = subnet
dhcp_port = None
for port in network.ports:
port_device_id = getattr(port, 'device_id', None)
if port_device_id == device_id:
port_fixed_ips = []
for fixed_ip in port.fixed_ips:
port_fixed_ips.append({'subnet_id': fixed_ip.subnet_id,
'ip_address': fixed_ip.ip_address})
if fixed_ip.subnet_id in dhcp_enabled_subnet_ids:
dhcp_enabled_subnet_ids.remove(fixed_ip.subnet_id)
# If there are dhcp_enabled_subnet_ids here that means that
# we need to add those to the port and call update.
if dhcp_enabled_subnet_ids:
port_fixed_ips.extend(
[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
dhcp_port = self.plugin.update_dhcp_port(
port.id, {'port': {'network_id': network.id,
'fixed_ips': port_fixed_ips}})
if not dhcp_port:
raise exceptions.Conflict()
else:
dhcp_port = port
# break since we found port that matches device_id
break
# check for a reserved DHCP port
if dhcp_port is None:
LOG.debug(_('DHCP port %(device_id)s on network %(network_id)s'
' does not yet exist. Checking for a reserved port.'),
{'device_id': device_id, 'network_id': network.id})
for port in network.ports:
port_device_id = getattr(port, 'device_id', None)
if port_device_id == constants.DEVICE_ID_RESERVED_DHCP_PORT:
dhcp_port = self.plugin.update_dhcp_port(
port.id, {'port': {'network_id': network.id,
'device_id': device_id}})
if dhcp_port:
break
# DHCP port has not yet been created.
if dhcp_port is None:
LOG.debug(_('DHCP port %(device_id)s on network %(network_id)s'
' does not yet exist.'), {'device_id': device_id,
'network_id': network.id})
port_dict = dict(
name='',
admin_state_up=True,
device_id=device_id,
network_id=network.id,
tenant_id=network.tenant_id,
fixed_ips=[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
dhcp_port = self.plugin.create_dhcp_port({'port': port_dict})
if not dhcp_port:
raise exceptions.Conflict()
# Convert subnet_id to subnet dict
fixed_ips = [dict(subnet_id=fixed_ip.subnet_id,
ip_address=fixed_ip.ip_address,
subnet=subnets[fixed_ip.subnet_id])
for fixed_ip in dhcp_port.fixed_ips]
ips = [DictModel(item) if isinstance(item, dict) else item
for item in fixed_ips]
dhcp_port.fixed_ips = ips
return dhcp_port
def setup(self, network):
"""Create and initialize a device for network's DHCP on this host."""
port = self.setup_dhcp_port(network)
interface_name = self.get_interface_name(network, port)
if ip_lib.ensure_device_is_ready(interface_name,
self.root_helper,
network.namespace):
LOG.debug(_('Reusing existing device: %s.'), interface_name)
else:
self.driver.plug(network.id,
port.id,
interface_name,
port.mac_address,
namespace=network.namespace)
ip_cidrs = []
for fixed_ip in port.fixed_ips:
subnet = fixed_ip.subnet
net = netaddr.IPNetwork(subnet.cidr)
ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen)
ip_cidrs.append(ip_cidr)
if (self.conf.enable_isolated_metadata and
self.conf.use_namespaces):
ip_cidrs.append(METADATA_DEFAULT_CIDR)
self.driver.init_l3(interface_name, ip_cidrs,
namespace=network.namespace)
# ensure that the dhcp interface is first in the list
if network.namespace is None:
device = ip_lib.IPDevice(interface_name,
self.root_helper)
device.route.pullup_route(interface_name)
if self.conf.use_namespaces:
self._set_default_route(network, interface_name)
return interface_name
def update(self, network, device_name):
"""Update device settings for the network's DHCP on this host."""
if self.conf.use_namespaces:
self._set_default_route(network, device_name)
def destroy(self, network, device_name):
"""Destroy the device used for the network's DHCP on this host."""
self.driver.unplug(device_name, namespace=network.namespace)
self.plugin.release_dhcp_port(network.id,
self.get_device_id(network))
| |
"""
Script to convert SPN learned by Poon, Domingos method [1] (using their Java implemntation)
to format readable by the implementation of Zhao et al, [2] (C++)
[1] Sum-Product Networks: A New Deep Architecture , UAI 2011
http://spn.cs.washington.edu/spn/
[2] A Unified Approach for Learning the Parameters of Sum-Product Networks, NIPS 2016
http://www.cs.cmu.edu/%7Ehzhao1/papers/ICML2016/spn_release.zip
"""
import os
import pandas as pd
import numpy as np
import argparse
import re
from xml.dom import minidom
INPUT_DIM_1 = 28
INPUT_DIM_2 = 28
UNUSED_NODE_WEIGHT = 0.01
VAR_VARIANCE = 1.0
DCMP_DELIM = '@'
CLASSIFIER_MODE = False
BASE_CLASS_NODE_ID = 1000000
LABEL_NODE_BASE_ID = 1000100
PROD_NODE_BASE_ID = 1001000
K = 10
ROOT_NODE_ID = (BASE_CLASS_NODE_ID + K + 1) if CLASSIFIER_MODE else 0
pn_re = re.compile(r'(<)(\d+) (\d+) (\d+) (\d+)(>)')
def get_edges(id1,target_ids, weights=[]):
edges = []
for i,t_id in enumerate(target_ids):
if weights:
assert(len(weights) == len(target_ids))
edges.append(get_edge_row(id1,t_id,weight=weights[i]))
else:
edges.append(get_edge_row(id1,t_id))
return pd.DataFrame(edges)
def get_edge_row(id1,id2,weight=-1):
edge_row = pd.Series({'id1': id1, 'id2': id2})
if weight != -1:
edge_row['weight'] = weight
edge_row['str_rep'] = '%d,%d,%f' % (edge_row['id1'],edge_row['id2'],edge_row['weight'])
else:
edge_row['str_rep'] = '%d,%d' % (edge_row['id1'],edge_row['id2'])
return edge_row
def get_node_row(n_id,n_type,prnts):
node_row = pd.Series({'id': n_id, 'type': n_type, 'prnts': prnts})
node_row['str_rep'] = '%d,%s' % (node_row['id'],node_row['type'])
return node_row
def create_classifier_nodes(K,root_node_row):
classifier_nodes = []
label_nodes = []
prod_nodes = []
for k in range(K):
copy_row = root_node_row.copy()
copy_row['id'] = BASE_CLASS_NODE_ID + (k)
copy_row['prnts'] = 1
copy_row['label'] = k
copy_row['str_rep'] = '%d,%s' % (copy_row['id'],copy_row['type'])
classifier_nodes.append(copy_row)
pn = get_node_row(PROD_NODE_BASE_ID+k,'PRD',1)
pn['label'] = k
prod_nodes.append(pn)
prod_nodes_df = pd.DataFrame(prod_nodes)
classifier_nodes_df = pd.DataFrame(classifier_nodes)
new_root_row = root_node_row.copy()
new_root_row['id'] = ROOT_NODE_ID
new_root_row['prnts'] = 0
new_root_row['str_rep'] = '%d,%s' % (new_root_row['id'],new_root_row['type'])
new_root_df = pd.DataFrame([new_root_row])
for k in range(K):
label_node_t = { 'id': LABEL_NODE_BASE_ID + k, 'type': 'BINNODE', 'prnts': 1,
'var_id': INPUT_DIM_1*INPUT_DIM_2 + (k),'T': 1.0, 'F':0.0 , 'label': k}
label_node_t['str_rep'] = '%d,%s,%d,%f,%f' % (label_node_t['id'],label_node_t['type'],label_node_t['var_id'],
label_node_t['F'],label_node_t['T'])
label_node_f = { 'id': LABEL_NODE_BASE_ID + k + 10, 'type': 'BINNODE',
'prnts': 1, 'var_id': INPUT_DIM_1*INPUT_DIM_2 + (k),'T': 0.0, 'F':1.0, 'label': k }
label_node_f['str_rep'] = '%d,%s,%d,%f,%f' % (label_node_f['id'],label_node_f['type'],label_node_f['var_id'],
label_node_f['F'],label_node_f['T'])
label_nodes.append(pd.Series(label_node_t))
label_nodes.append(pd.Series(label_node_f))
label_nodes_df = pd.DataFrame(label_nodes)
all_nodes_df = pd.concat((new_root_df,prod_nodes_df,classifier_nodes_df,label_nodes_df))
return all_nodes_df
def create_classifier_edges(cn_df,orig_root_edges_df):
orig_root_w = list(orig_root_edges_df['weight'])
cls_edges = pd.DataFrame()
# create root edges
for k in range(K):
target_id = cn_df[(cn_df['type'] == 'SUM') & (cn_df['label'] == k)]['id'].max()
f_labels = list(cn_df[(cn_df['type'] == 'BINNODE') & (cn_df['label'] != k)
& (cn_df['F'] == 1)]['id'])
t_label = list(cn_df[(cn_df['type'] == 'BINNODE') & (cn_df['label'] == k)
& (cn_df['T'] == 1)]['id'])
pr_edges = get_edges(cn_df[(cn_df['type'] == 'PRD') & (cn_df['label'] == k)]['id'].max(),
target_ids= [target_id] + f_labels + t_label )
cls_edges = pd.concat((pr_edges,cls_edges,get_edges(target_id,list(orig_root_edges_df['id2']),weights=orig_root_w)))
new_root_edges = get_edges(ROOT_NODE_ID,list(cn_df[(cn_df['type'] == 'PRD') & (cn_df['label'] < K)]['id']), [(1.0/K)]*K)
return pd.concat((new_root_edges,cls_edges))
def get_abs_id(regions_dict,reg_idx,idx):
region = regions_dict[reg_idx]
if region['type'] == 'coarse':
return region['sum_nodes'][idx]['abs_id']
else:
return region['abs_id'][idx]
def parse_prod_node(prod_node_str):
prod_node = {}
regs = [int(x) for x in prod_node_str.strip('@').strip().split(' ')]
sum_nodes = [(regs[0],regs[2]),(regs[1],regs[3])]
prod_node['type'] = 'PRD'
prod_node['chds'] = sum_nodes
return prod_node
def parse_sum_node(sum_node_str):
sum_node = {}
if DCMP_DELIM in sum_node_str:
sp_str = sum_node_str.split(':')
sum_node['type'] = 'SUM'
sum_node['cnt'] = float(sp_str[0])
un_weights = []
prod_nodes = []
for i in np.arange(1,len(sp_str),step=2):
prod_nodes.append(parse_prod_node(sp_str[i]))
un_weights.append(float(sp_str[i+1]))
sum_node['chds'] = prod_nodes
# Originally weights are un-normalized, so we normalize them here
sum_node['weights'] = np.array(un_weights) / np.sum(un_weights)
return sum_node
def region_to_id(a1,a2,b1,b2):
region_id = ((a1*INPUT_DIM_1+a2-1)*INPUT_DIM_2+b1)*INPUT_DIM_2+b2-1
return region_id
def load_region(region_xml):
region = {}
if 'MEAN' in [node.nodeName for node in region_xml.childNodes]:
region['type'] = 'fine'
region['means'] = [float(x) for x in region_xml.childNodes[3].childNodes[0].data.strip().split(' ')[2:]]
region['sum_nodes'] = []
else:
region['type'] = 'coarse'
sum_nodes_str = region_xml.childNodes[1].childNodes[0].data.strip().split('\n')
region['sum_nodes'] = [parse_sum_node(x) for x in sum_nodes_str[1:]]
coords = [int(x) for x in region_xml.childNodes[0].data.strip().split(' ')]
a1,a2,b1,b2 = coords
region['id'] = region_to_id(a1,a2,b1,b2)
region['coords'] = coords
return region
def convert_and_parse_xml(src_model_fname):
dst_model_fname = os.path.basename(src_model_fname).split('.')[0] + '.xml.mdl'
with open(dst_model_fname, 'wb') as wfile:
wfile.write('<MODEL>\n')
with open(src_model_fname, 'rb') as rfile:
for line in rfile.readlines():
newline = line
if '<CNT>' in line:
newline = line.strip() + '</CNT>'
elif '<MEAN>' in line:
newline = line.strip() + '</MEAN>'
elif pn_re.findall(line):
newline = pn_re.sub(r'@ \2 \3 \4 \5 @',line)
wfile.write(newline.strip() + os.linesep)
wfile.write('</MODEL>\n')
xmldoc = minidom.parse(dst_model_fname)
os.remove(dst_model_fname)
return xmldoc
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"model_path",
help="Path to model definitions generated using Java code.",
type=str
)
parser.add_argument(
"outname",
help="Filename to save output to.",
type=str
)
args = parser.parse_args()
variables = []
nodes = []
edges = []
region_dict_list = []
abs_id_map = {}
regions_dict = {}
cnt = 0
src_model_fname = args.model_path
dst_model_fname = args.outname
print "Parsing raw model file..."
xmldoc = convert_and_parse_xml(src_model_fname)
region_list = xmldoc.getElementsByTagName('REGION')
for region_node in region_list:
region = load_region(region_node)
regions_dict[region['id']] = region
region_dict_list.append(region)
if region['type'] == 'fine':
cnt += 1
nc = 0 # global node counter
region_dict_list.reverse()
print "Collecting variable and sum nodes..."
for region in region_dict_list:
if region['type'] == 'coarse':
for j,sn in enumerate(region['sum_nodes']):
if sn:
node_row = {'id': nc,'address': (region['id'],j),'type':'SUM'}
node_row['str_rep'] = '%d,%s' % (nc,node_row['type'])
nodes.append(node_row)
sn['abs_id'] = nc
nc += 1
else: # fine.
region['abs_id'] = []
var_idx = region['coords'][0]*INPUT_DIM_2 + region['coords'][2]
for i,m in enumerate(region['means']):
node_row = { 'id': nc, 'var_id': var_idx, 'mean': m, 'type':'NORMALNODE' }
node_row['str_rep'] = '%d,%s,%d,%f,%f' % (nc,node_row['type'],node_row['var_id'],
node_row['mean'],VAR_VARIANCE)
nodes.append(node_row)
region['abs_id'].append(nc)
nc += 1
print "Collecting product nodes..."
for region in region_dict_list:
if region['type'] == 'coarse':
for j,sn in enumerate(region['sum_nodes']):
if sn:
for pn in sn['chds']:
pn_chds = [get_abs_id(regions_dict,reg_idx,idx) for (reg_idx,idx) in pn['chds']]
node_row = { 'id':nc, 'chds': pn_chds, 'type':'PRD'}
node_row['str_rep'] = '%d,%s' % (nc,node_row['type'])
nodes.append(node_row)
pn['abs_id'] = nc
nc += 1
nodes_df = pd.DataFrame(nodes)
nodes_df['prnts'] = 0
print "Collecting edges..."
edges = []
sum_nodes_df = nodes_df[nodes_df['type'] == 'SUM']
for i,sn_row in sum_nodes_df.iterrows():
reg_id, idx = sn_row['address']
sn = regions_dict[reg_id]['sum_nodes'][idx]
for i,chd in enumerate(sn['chds']):
sum_edge_row = {'id1': sn['abs_id'], 'id2': chd['abs_id'], 'weight': sn['weights'][i] }
nodes_df.set_value(chd['abs_id'],'prnts',1)
sum_edge_row['str_rep'] = '%d,%d,%f' % (sum_edge_row['id1'],sum_edge_row['id2'],sum_edge_row['weight'])
edges.append(sum_edge_row)
prd_nodes_df = nodes_df[nodes_df['type'] == 'PRD']
for i,pn_row in prd_nodes_df.iterrows():
for j,chd in enumerate(pn_row['chds']):
prd_edge_row = {'id1': pn_row['id'], 'id2': chd }
nodes_df.set_value(chd,'prnts',1)
prd_edge_row['str_rep'] = '%d,%d' % (prd_edge_row['id1'],prd_edge_row['id2'])
edges.append(prd_edge_row)
edges_df = pd.DataFrame(edges)
if CLASSIFIER_MODE:
# Add nodes and edges used in classification mode
classifier_nodes_df = create_classifier_nodes(K,nodes_df.loc[0])
root_edges = edges_df[edges_df['id1']== 0 ]
cls_edges = create_classifier_edges(classifier_nodes_df,root_edges)
edges_df = pd.concat((cls_edges,edges_df))
nodes_df = pd.concat((classifier_nodes_df,nodes_df))
# Remove nodes with no parents unless it's the root node
valid_nodes_df = nodes_df[(nodes_df['prnts'] > 0) | (nodes_df['id'] == ROOT_NODE_ID)]
if CLASSIFIER_MODE:
# remove old root and connected edges
valid_nodes_df = valid_nodes_df[(valid_nodes_df['id'] != 0)]
edges_df = edges_df[edges_df['id1'] != 0 ]
print "Exporting SPN in spn_opt format..."
with open(dst_model_fname,'wb') as f_out:
f_out.write('#NODES\n')
f_out.writelines([x+os.linesep for x in list(valid_nodes_df['str_rep'])])
f_out.write('#EDGES\n')
f_out.writelines([x+os.linesep for x in list(edges_df['str_rep'])])
print "Done!"
| |
#!/usr/bin/Python3.5
# --coding:utf-8--
from .bing_search_api import BingSearchAPI
import math
import os
import random
import re
from .pos_tagger import POS_tag_cleaner
from .collocations_method_5_V3 import POS_Check
import string
def cache_abs_path(cache_rel_path):
script_dir = os.path.dirname(__file__)
return os.path.join(script_dir, cache_rel_path)
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def extract_int_number_results(num_results_for_query):
if num_results_for_query != '0':
try:
return int( (num_results_for_query).replace(',', '').split(' ')[-2] )
except Exception as e:
print("Error extracting number of results, message: {error:s}".format(error = str(e)))
print(serp.num_results_for_query)
return 0
def min(a, b):
return a if a < b else b
# Statistical technique
def Collocations_Method_3(_bing_api_key, _n_grams_from_input_text_file, _input_file_path, _collocation_corpora, _apply_POS_restrictions, _verbose):
#print(_n_grams_from_input_text_file)
if _verbose:
# A file to save the verbose output of the program
_output_file_verbose = str(_input_file_path).replace(_input_file_path.split('/')[-1], 'verbose.txt')
_output_file_verbose = open(_output_file_verbose, 'a')
print("\n----------------------------------------------------------------------------------------", file = _output_file_verbose)
print("\tMethod-3: Statistical technique - Extracting collocations:", file = _output_file_verbose)
print("----------------------------------------------------------------------------------------\n\n", file = _output_file_verbose)
print("\tMethod-3: Statistical technique - Extracting collocations ...")
# Obtain the path of this script to go to the TAGGED-FULL folder
script_path = os.path.dirname(os.path.realpath(__file__))
# Extracting words from the files in TAGGED-FULL folder
# WH-adverbs are stored to the list 'WRB'
WRB = script_path+'/TAGGED_Full/WRB1'
f = open(WRB, 'r')
t = f.readlines()
f.close()
t = set(t) # Removes duplicates from the list
WRB = [] # creates an empty list
for i in t:
i = i.strip('\n')
WRB.append(i)
# WH-pronouns, possessive are stored to the list 'WHP'
WHP = script_path+'/TAGGED_Full/WP$1'
f = open(WHP, 'r')
t = f.readlines()
f.close()
t = set(t) # Removes duplicates from the list
WHP = [] # creates an empty list
for i in t:
i = i.strip('\n')
WHP.append(i)
# WH-pronouns are stored to the list 'WP'
WP = script_path+'/TAGGED_Full/WP1'
f = open(WP, 'r')
t = f.readlines()
f.close()
t = set(t) # Removes duplicates from the list
WP = [] # creates an empty list
for i in t:
i = i.strip('\n')
WP.append(i)
# WDT: WH-determiner
# that what whatever which whichever
# stored in the list 'WDT'
WDT = script_path+'/TAGGED_Full/WDT1'
f = open(WDT, 'r')
t = f.readlines()
f.close()
t = set(t) # Removes duplicates from the list
WDT = [] # creates an empty list
for i in t:
i = i.strip('\n')
WDT.append(i)
# VBZ: verb, present tense, 3rd person singular
# bases reconstructs marks mixes displeases seals carps weaves snatches
# slumps stretches authorizes smolders pictures emerges stockpiles
# seduces fizzes uses bolsters slaps speaks pleads ...
# Stored in the list 'VBZ'
VBZ = script_path+'/TAGGED_Full/VBZ1'
f = open(VBZ, 'r')
t = f.readlines()
f.close()
t = set(t) # Removes duplicates from the list
VBZ = [] # creates an empty list
for i in t:
i = i.strip('\n')
VBZ.append(i)
# VBP: verb, present tense, not 3rd person singular
# predominate wrap resort sue twist spill cure lengthen brush terminate
# appear tend stray glisten obtain comprise detest tease attract
# emphasize mold postpone sever return wag ...
# Stored in the list 'VBP'
VBP = script_path+'/TAGGED_Full/VBP1'
f = open(VBP, 'r')
t = f.readlines()
f.close()
t = set(t) # Removes duplicates from the list
VBP = [] # creates an empty list
for i in t:
i = i.strip('\n')
VBP.append(i)
# VBN: verb, past participle
# multihulled dilapidated aerosolized chaired languished panelized used
# experimented flourished imitated reunifed factored condensed sheared
# unsettled primed dubbed desired ...
# Stored in the list 'VBN'
VBN = script_path+'/TAGGED_Full/VBN1'
f = open(VBN, 'r')
t = f.readlines()
f.close()
t = set(t) # Removes duplicates from the list
VBN = [] # creates an empty list
for i in t:
i = i.strip('\n')
VBN.append(i)
# VBG: verb, present participle or gerund
# telegraphing stirring focusing angering judging stalling lactating
# hankerin' alleging veering capping approaching traveling besieging
# encrypting interrupting erasing wincing ...
# Stored in the list 'VBG'
VBG = script_path+'/TAGGED_Full/VBG1'
f = open(VBG, 'r')
t = f.readlines()
f.close()
t = set(t) # Removes duplicates from the list
VBG = [] # creates an empty list
for i in t:
i = i.strip('\n')
VBG.append(i)
# VBD: verb, past tense
# dipped pleaded swiped regummed soaked tidied convened halted registered
# cushioned exacted snubbed strode aimed adopted belied figgered
# speculated wore appreciated contemplated ...
# Stored in the list 'VBD'
VBD = script_path+'/TAGGED_Full/VBD1'
f = open(VBD, 'r')
t = f.readlines()
f.close()
t = set(t) # Removes duplicates from the list
VBD = [] # creates an empty list
for i in t:
i = i.strip('\n')
VBD.append(i)
# VB: verb, base form
# ask assemble assess assign assume atone attention avoid bake balkanize
# bank begin behold believe bend benefit bevel beware bless boil bomb
# boost brace break bring broil brush build ...
# Stored in the list 'VB'
VB = script_path+'/TAGGED_Full/VB1'
f = open(VB, 'r')
t = f.readlines()
f.close()
t = set(t) # Removes duplicates from the list
VB = [] # creates an empty list
for i in t:
i = i.strip('\n')
VB.append(i)
# TO: "to" as preposition or infinitive marker
# to
# Stored in the list 'TO'
TO = script_path+'/TAGGED_Full/TO1'
f = open(TO, 'r')
t = f.readlines()
f.close()
t = set(t) # Removes duplicates from the list
TO = [] # creates an empty list
for i in t:
i = i.strip('\n')
TO.append(i)
# RP: particle
# aboard about across along apart around aside at away back before behind
# by crop down ever fast for forth from go high i.e. in into just later
# low more off on open out over per pie raising start teeth that through
# under unto up up-pp upon whole with you
# Stored in the list 'RP'
RP = script_path+'/TAGGED_Full/RP1'
f = open(RP, 'r')
t = f.readlines()
f.close()
t = set(t) # Removes duplicates from the list
RP = [] # creates an empty list
for i in t:
i = i.strip('\n')
RP.append(i)
# RBS: adverb, superlative
# best biggest bluntest earliest farthest first furthest hardest
# heartiest highest largest least less most nearest second tightest worst
# Stored in the list 'RBS'
RBS = script_path+'/TAGGED_Full/RBS1'
f = open(RBS, 'r')
t = f.readlines()
f.close()
t = set(t) # Removes duplicates from the list
RBS = [] # creates an empty list
for i in t:
i = i.strip('\n')
RBS.append(i)
# RBR: adverb, comparative
# further gloomier grander graver greater grimmer harder harsher
# healthier heavier higher however larger later leaner lengthier less-
# perfectly lesser lonelier longer louder lower more ...
# Stored in the list 'RBR'
RBR = script_path+'/TAGGED_Full/RBR1'
f = open(RBR, 'r')
t = f.readlines()
f.close()
t = set(t) # Removes duplicates from the list
RBR = [] # creates an empty list
for i in t:
i = i.strip('\n')
RBR.append(i)
# RB: adverb
# occasionally unabatingly maddeningly adventurously professedly
# stirringly prominently technologically magisterially predominately
# swiftly fiscally pitilessly ...
# Stored in the list 'RB'
RB = script_path+'/TAGGED_Full/RB1'
f = open(RB, 'r')
t = f.readlines()
f.close()
t = set(t) # Removes duplicates from the list
RB = [] # creates an empty list
for i in t:
i = i.strip('\n')
RB.append(i)
# PRP$: pronoun, possessive
# her his mine my our ours their thy your
# Stored in the list 'PRP'
PRP = script_path+'/TAGGED_Full/PRP$1'
f = open(PRP, 'r')
t = f.readlines()
f.close()
t = set(t) # Removes duplicates from the list
PRP = [] # creates an empty list
for i in t:
i = i.strip('\n')
PRP.append(i)
# PRP: pronoun, personal
# hers herself him himself hisself it itself me myself one oneself ours
# ourselves ownself self she thee theirs them themselves they thou thy us
# Stored in the list 'PRPP'
PRPP = script_path+'/TAGGED_Full/PRP1'
f = open(PRPP, 'r')
t = f.readlines()
f.close()
t = set(t) # Removes duplicates from the list
PRPP = [] # creates an empty list
for i in t:
i = i.strip('\n')
PRPP.append(i)
# PDT: pre-determiner
# all both half many quite such sure this
# Stored in the list 'PDT'
PDT = script_path+'/TAGGED_Full/PDT1'
f = open(PDT, 'r')
t = f.readlines()
f.close()
t = set(t) # Removes duplicates from the list
PDT = [] # creates an empty list
for i in t:
i = i.strip('\n')
PDT.append(i)
# NNS: noun, common, plural
# undergraduates scotches bric-a-brac products bodyguards facets coasts
# divestitures storehouses designs clubs fragrances averages
# subjectivists apprehensions muses factory-jobs ...
# Stored in the list 'NNS'
NNS = script_path+'/TAGGED_Full/NNS1'
f = open(NNS, 'r')
t = f.readlines()
f.close()
t = set(t) # Removes duplicates from the list
NNS = [] # creates an empty list
for i in t:
i = i.strip('\n')
NNS.append(i)
# NNPS: noun, proper, plural
# Americans Americas Amharas Amityvilles Amusements Anarcho-Syndicalists
# Andalusians Andes Andruses Angels Animals Anthony Antilles Antiques
# Apache Apaches Apocrypha ...
# Stored in the list 'NNPS'
NNPS = script_path+'/TAGGED_Full/NNPS1'
f = open(NNPS, 'r')
t = f.readlines()
f.close()
t = set(t) # Removes duplicates from the list
NNPS = [] # creates an empty list
for i in t:
i = i.strip('\n')
NNPS.append(i)
# NNP: noun, proper, singular
# Motown Venneboerger Czestochwa Ranzer Conchita Trumplane Christos
# Oceanside Escobar Kreisler Sawyer Cougar Yvette Ervin ODI Darryl CTCA
# Shannon A.K.C. Meltex Liverpool ...
# Stored in the list 'NNP'
NNP = script_path+'/TAGGED_Full/NNP1'
f = open(NNP, 'r')
t = f.readlines()
f.close()
t = set(t) # Removes duplicates from the list
NNP = [] # creates an empty list
for i in t:
i = i.strip('\n')
NNP.append(i)
# NN: noun, common, singular or mass
# common-carrier cabbage knuckle-duster Casino afghan shed thermostat
# investment slide humour falloff slick wind hyena override subhumanity
# machinist ...
# Stored in the list 'NN'
NN = script_path+'/TAGGED_Full/NN1'
f = open(NN, 'r')
t = f.readlines()
f.close()
t = set(t) # Removes duplicates from the list
NN = [] # creates an empty list
for i in t:
i = i.strip('\n')
NN.append(i)
# MD: modal auxiliary
# can cannot could couldn't dare may might must need ought shall should
# shouldn't will would
# Stored in the list 'MD'
MD = script_path+'/TAGGED_Full/MD1'
f = open(MD, 'r')
t = f.readlines()
f.close()
t = set(t) # Removes duplicates from the list
MD = [] # creates an empty list
for i in t:
i = i.strip('\n')
MD.append(i)
# JJS: adjective, superlative
# calmest cheapest choicest classiest cleanest clearest closest commonest
# corniest costliest crassest creepiest crudest cutest darkest deadliest
# dearest deepest densest dinkiest ...
# Stored in the list 'JJS'
JJS = script_path+'/TAGGED_Full/JJS1'
f = open(JJS, 'r')
t = f.readlines()
f.close()
t = set(t) # Removes duplicates from the list
JJS = [] # creates an empty list
for i in t:
i = i.strip('\n')
JJS.append(i)
# JJR: adjective, comparative
# bleaker braver breezier briefer brighter brisker broader bumper busier
# calmer cheaper choosier cleaner clearer closer colder commoner costlier
# cozier creamier crunchier cuter ...
# Stored in the list 'JJR'
JJR = script_path+'/TAGGED_Full/JJR1'
f = open(JJR, 'r')
t = f.readlines()
f.close()
t = set(t) # Removes duplicates from the list
JJR = [] # creates an empty list
for i in t:
i = i.strip('\n')
JJR.append(i)
# JJ: adjective or numeral, ordinal
# third ill-mannered pre-war regrettable oiled calamitous first separable
# ectoplasmic battery-powered participatory fourth still-to-be-named
# multilingual multi-disciplinary ...
# Stored in the list 'JJ'
JJ = script_path+'/TAGGED_Full/JJ1'
f = open(JJ, 'r')
t = f.readlines()
f.close()
t = set(t) # Removes duplicates from the list
JJ = [] # creates an empty list
for i in t:
i = i.strip('\n')
JJ.append(i)
# IN: preposition or conjunction, subordinating
# astride among uppon whether out inside pro despite on by throughout
# below within for towards near behind atop around if like until below
# next into if beside ...
# Stored in the list 'IN'
IN = script_path+'/TAGGED_Full/IN1'
f = open(IN, 'r')
t = f.readlines()
f.close()
t = set(t) # Removes duplicates from the list
IN = [] # creates an empty list
for i in t:
i = i.strip('\n')
IN.append(i)
# EX: existential there
# there
# Stored in the list 'EX'
EX = script_path+'/TAGGED_Full/EX1'
f = open(EX, 'r')
t = f.readlines()
f.close()
t = set(t) # Removes duplicates from the list
EX = [] # creates an empty list
for i in t:
i = i.strip('\n')
EX.append(i)
# DT: determiner
# all an another any both del each either every half la many much nary
# neither no some such that the them these this those
# Stored in the list 'DT'
DT = script_path+'/TAGGED_Full/DT1'
f = open(DT, 'r')
t = f.readlines()
f.close()
t = set(t) # Removes duplicates from the list
DT = [] # creates an empty list
for i in t:
i = i.strip('\n')
DT.append(i)
# CD: numeral, cardinal
# mid-1890 nine-thirty forty-two one-tenth ten million 0.5 one forty-
# seven 1987 twenty '79 zero two 78-degrees eighty-four IX '60s .025
# fifteen 271,124 dozen quintillion DM2,000 ...
# Stored in the list 'CD'
CD = script_path+'/TAGGED_Full/CD1'
f = open(CD, 'r')
t = f.readlines()
f.close()
t = set(t) # Removes duplicates from the list
CD = [] # creates an empty list
for i in t:
i = i.strip('\n')
CD.append(i)
# CC: conjunction, coordinating
# & 'n and both but either et for less minus neither nor or plus so
# therefore times v. versus vs. whether yet
# Stored in the list 'CC'
CC = script_path+'/TAGGED_Full/CC1'
f = open(CC, 'r')
t = f.readlines()
f.close()
t = set(t) # Removes duplicates from the list
CC = [] # creates an empty list
for i in t:
i = i.strip('\n')
CC.append(i)
# Creating output files
_stat_05_path = str(_input_file_path).replace(_input_file_path.split('/')[-1], 'collocations_Stat>05.txt')
if os.path.isfile(_stat_05_path):
os.remove(_stat_05_path)
_stat_06_path = str(_input_file_path).replace(_input_file_path.split('/')[-1], 'collocations_Stat>06.txt')
if os.path.isfile(_stat_06_path):
os.remove(_stat_06_path)
_stat_07_path = str(_input_file_path).replace(_input_file_path.split('/')[-1], 'collocations_Stat>07.txt')
if os.path.isfile(_stat_07_path):
os.remove(_stat_07_path)
_stat_08_path = str(_input_file_path).replace(_input_file_path.split('/')[-1], 'collocations_Stat>08.txt')
if os.path.isfile(_stat_08_path):
os.remove(_stat_08_path)
_stat_09_path = str(_input_file_path).replace(_input_file_path.split('/')[-1], 'collocations_Stat>09.txt')
if os.path.isfile(_stat_09_path):
os.remove(_stat_09_path)
_stat_mean_sd_path = str(_input_file_path).replace(_input_file_path.split('/')[-1], 'N_gram_Stat_Mean_SD_range.txt')
if os.path.isfile(_stat_mean_sd_path):
os.remove(_stat_mean_sd_path)
range_zero = str(_input_file_path).replace(_input_file_path.split('/')[-1], 'Stat_Mean_SD_range_0txt')
if os.path.isfile(range_zero):
os.remove(range_zero)
range_1l = str(_input_file_path).replace(_input_file_path.split('/')[-1], 'Stat_Mean_SD_range_1_left.txt')
if os.path.isfile(range_1l):
os.remove(range_1l)
range_2l = str(_input_file_path).replace(_input_file_path.split('/')[-1], 'Stat_Mean_SD_range_2_left.txt')
if os.path.isfile(range_2l):
os.remove(range_2l)
range_3l = str(_input_file_path).replace(_input_file_path.split('/')[-1], 'Stat_Mean_SD_range_3_left.txt')
if os.path.isfile(range_3l):
os.remove(range_3l)
range_4l = str(_input_file_path).replace(_input_file_path.split('/')[-1], 'Stat_Mean_SD_range_4_left.txt')
if os.path.isfile(range_4l):
os.remove(range_4l)
range_1r = str(_input_file_path).replace(_input_file_path.split('/')[-1], 'Stat_Mean_SD_range_1_right.txt')
if os.path.isfile(range_1r):
os.remove(range_1r)
range_2r = str(_input_file_path).replace(_input_file_path.split('/')[-1], 'Stat_Mean_SD_range_2_right.txt')
if os.path.isfile(range_2r):
os.remove(range_2r)
range_3r = str(_input_file_path).replace(_input_file_path.split('/')[-1], 'Stat_Mean_SD_range_3_right.txt')
if os.path.isfile(range_3r):
os.remove(range_3r)
range_4r = str(_input_file_path).replace(_input_file_path.split('/')[-1], 'Stat_Mean_SD_range_4_right.txt')
if os.path.isfile(range_4r):
os.remove(range_4r)
range_statistic = str(_input_file_path).replace(_input_file_path.split('/')[-1], 'Collocations_Stat_Mean_SD.txt')
if os.path.isfile(range_statistic):
os.remove(range_statistic)
auxiliary_verb_list = []
auxiliary_verb_list.extend(("be", "am", "are", "arent", "is", "was", "were", "being", "can", "cant", "could", "do","dont",\
"did", "does", "doing", "have", "havent", "hadnt", "had", "has", "hasnt", "having", "may", "might", "mightnt", "maynt",\
"mustnt", "must", "shall", "should", "will", "neednt", "wont", "would", "wouldnt", "ought", "been", "cannot", "couldnt",\
"dare", "darent", "need", "shouldnt", "isnt", "wasnt", "werent", "oughtnot", "didnt", "doesnt", "dont"))
if _apply_POS_restrictions:
_n_grams_from_input_text_file = list(filter(POS_Check, _n_grams_from_input_text_file))
# A list to store n-gram phrases that are collocations
statistical_collocations = []
# A list to store n-gram phrases that are not collocations
n_grams_not_collocations = []
# A dictionary to store search queries as keys and its search results total as value
search_queries = {}
# A dictionary to save the Phrase search totals
_phrase_search_total_dictionary = {}
# A dictionary for n_gram and its replacement phrases
_n_gram_replacement_phrases = {}
#if we use the input text files
_n_grams_from_input_text_file_not_have_replacements = []
#choose_input_replacement_phrases = input("Do you want to read the replacement phrases from the files?")
choose_input_replacement_phrases = "Y"
_replacement_queries_from_files = {}
if choose_input_replacement_phrases.upper() == "Y":
try:
with open("method3_replacement_phrases.cache", 'r') as input_file:
for line in input_file:
_temp_phrases = line.split('|')
_temp_phrases = list(map(lambda x: x.rstrip(' ').lstrip(' ').strip('\n'), _temp_phrases))
if _temp_phrases[0] not in _replacement_queries_from_files:
_replacement_queries_from_files[_temp_phrases[0]] = []
for i in range(1, len(_temp_phrases)):
_replacement_queries_from_files[_temp_phrases[0]].append(_temp_phrases[i])
except FileNotFoundError as e:
print("Error: %s" % (str(e)))
for _phrase in _n_grams_from_input_text_file:
_phrase = _phrase.rstrip(' ').lstrip(' ').strip('\n')
clean_phrase = POS_tag_cleaner(_phrase)
if clean_phrase in _replacement_queries_from_files:
_n_gram_replacement_phrases[clean_phrase] = _replacement_queries_from_files[clean_phrase]
search_queries[clean_phrase] = 0
for _temp_phrase in _n_gram_replacement_phrases[clean_phrase]:
search_queries[_temp_phrase] = 0
else:
_n_grams_from_input_text_file_not_have_replacements.append(_phrase)
with open("method3_replacement_phrases.cache", 'a') as output_replacements_cache:
for _n_gram in _n_grams_from_input_text_file_not_have_replacements:
_n_gram = _n_gram.rstrip(' ').lstrip(' ').strip('\n') # To remove any right most trailing space. This can be an issue when n-grams are directly passed
if _verbose:
print("\n%s:" %(_n_gram), file = _output_file_verbose)
if _n_gram in statistical_collocations or _n_gram in n_grams_not_collocations:
# If a particular n-gram phrase is checked if it is a collocation before,
# it will be present in one of the lists, statistical_collocations OR n_grams_not_collocations
# Hence, we move on to the next n-gram
continue
else:
# Before checking if the n-gram is a collocation we check if atlease one
# POS tag is from the valid POS tag list: {Noun, Verb, Adverb, Adjective} if
# _apply_POS_restrictions is set to True
#Consider the replacement words
# Removing pos tags from the phrase
_search_phrase = POS_tag_cleaner(_n_gram)
#_search_phrase = re.sub(r'_.*? ', ' ', _n_gram + ' ').rstrip(' ')
if _verbose:
print("\tSearch phrase: %s" %(_search_phrase), file = _output_file_verbose)
# Add the n-gram itself to search queries
# Total search results returned for the phrase
# Using Bing search API
# _verbose is set to 'False' as we don't want to print the search result twice
if 'B' in _collocation_corpora:
#_phrase_search_totals, _bing_api_key = bing_search_total(False, _search_phrase, _bing_api_key)
#_phrase_search_total_dictionary[_search_phrase] = _phrase_search_totals
search_queries[_search_phrase] = 0
_n_gram_replacement_phrases[_search_phrase] = []
#if _verbose:
# print("\tSearch result total of the phrase: %d" %(_phrase_search_totals), file = _output_file_verbose)
# list to save the search result totals of the phrases with words replaced
_list_of_search_result_totals = []
# Splitting n-gram at spaces - a list of all words in the n-gram along with their POS tags
_pos_tagged_word_in_ngram = _n_gram.split(' ')
# Replace each word of the list and get search result totals
for _pos_tagged_word in _pos_tagged_word_in_ngram:
_word, _pos_tag = _pos_tagged_word.split('_') # As word and it's POS are linked by an underscore
# Each word is to be replaced with 5 random words of the same POS
# If any POS word list has < 5 words, it will be replaced with the words available
# A list to store the _replacement_words
_replacement_words = []
if(_pos_tag == 'CC'):
if(len(CC) > 4):
_replacement_words = random.sample(CC, 5)
else:
_replacement_words = CC
elif(_pos_tag == 'CD'):
if(len(CD) > 4):
_replacement_words = random.sample(CD, 5)
else:
_replacement_words = CD
elif(_pos_tag == 'DT'):
if(len(DT) > 4):
_replacement_words = random.sample(DT, 5)
else:
_replacement_words = DT
elif(_pos_tag == 'EX'):
if(len(EX) > 4):
_replacement_words = random.sample(EX, 5)
else:
_replacement_words = EX
elif(_pos_tag == 'IN'):
if(len(IN) > 4):
_replacement_words = random.sample(IN, 5)
else:
_replacement_words = IN
if(_pos_tag == 'VB'):
if(len(VB) > 4):
_replacement_words = random.sample(VB, 5)
else:
_replacement_words = VB
elif(_pos_tag == 'VBP'):
if(len(VBP) > 4):
_replacement_words = random.sample(VBP, 5)
else:
_replacement_words = VBP
elif(_pos_tag == 'WP$'):
if(len(WHP) > 4):
_replacement_words = random.sample(WHP, 5)
else:
_replacement_words = WHP
elif(_pos_tag == 'JJ'):
if(len(JJ) > 4):
_replacement_words = random.sample(JJ, 5)
else:
_replacement_words = JJ
elif(_pos_tag == 'JJR'):
if(len(JJR) > 4):
_replacement_words = random.sample(JJR, 5)
else:
_replacement_words = JJR
elif(_pos_tag == 'JJS'):
if(len(JJS) > 4):
_replacement_words = random.sample(JJS, 5)
else:
_replacement_words = JJS
elif(_pos_tag == 'MD'):
if(len(MD) > 4):
_replacement_words = random.sample(MD, 5)
else:
_replacement_words = MD
elif(_pos_tag == 'NN'):
if(len(NN) > 4):
_replacement_words = random.sample(NN, 5)
else:
_replacement_words = NN
elif(_pos_tag == 'VBD'):
if(len(VBD) > 4):
_replacement_words = random.sample(VBD, 5)
else:
_replacement_words = VBD
elif(_pos_tag == 'VBZ'):
if(len(VBZ) > 4):
_replacement_words = random.sample(VBZ, 5)
else:
_replacement_words = VBZ
elif(_pos_tag == 'WRB'):
if(len(WRB) > 4):
_replacement_words = random.sample(WRB, 5)
else:
_replacement_words = WRB
elif(_pos_tag == 'NNP'):
if(len(NNP) > 4):
_replacement_words = random.sample(NNP, 5)
else:
_replacement_words = NNP
elif(_pos_tag == 'NNPS'):
if(len(NNPS) > 4):
_replacement_words = random.sample(NNPS, 5)
else:
_replacement_words = NNPS
elif(_pos_tag == 'NNS'):
if(len(NNS) > 4):
_replacement_words = random.sample(NNS, 5)
else:
_replacement_words = NNS
elif(_pos_tag == 'PDT'):
if(len(PDT) > 4):
_replacement_words = random.sample(PDT, 5)
else:
_replacement_words = PDT
elif(_pos_tag == 'PRP'):
if(len(PRPP) > 4):
_replacement_words = random.sample(PRPP, 5)
else:
_replacement_words = PRPP
elif(_pos_tag == 'VBG'):
if(len(VBG) > 4):
_replacement_words = random.sample(VBG, 5)
else:
_replacement_words = VBG
elif(_pos_tag == 'WDT'):
if(len(WDT) > 4):
_replacement_words = random.sample(WDT, 5)
else:
_replacement_words = WDT
elif(_pos_tag == 'PRP$'):
if(len(PRP) > 4):
_replacement_words = random.sample(PRP, 5)
else:
_replacement_words = PRP
elif(_pos_tag == 'RB'):
if(len(RB) > 4):
_replacement_words = random.sample(RB, 5)
else:
_replacement_words = RB
elif(_pos_tag == 'RBR'):
if(len(RB) > 4):
_replacement_words = random.sample(RBR, 5)
else:
_replacement_words = RBR
elif(_pos_tag == 'RBS'):
if(len(RBS) > 4):
_replacement_words = random.sample(RBS, 5)
else:
_replacement_words = RBS
elif(_pos_tag == 'RP'):
if(len(RP) > 4):
_replacement_words = random.sample(RP, 5)
else:
_replacement_words = RP
elif(_pos_tag == 'TO'):
if(len(TO) > 4):
_replacement_words = random.sample(TO, 5)
else:
_replacement_words = TO
elif(_pos_tag == 'VBN'):
if(len(VBN) > 4):
_replacement_words = random.sample(VBN, 5)
else:
_replacement_words = VBN
if _verbose:
print("\tReplacement words chosen for the word %s: %s" %(_word, _replacement_words), file = _output_file_verbose)
# word in the phrase is replaced with the replacement words and obtain the search totals
# A maximum of 5 queries can be created, lets name them _search_query_1,
# _search_query_2, _search_query_3, _search_query_4, _search_query_5
_search_query_1 = ""
_search_query_2 = ""
_search_query_3 = ""
_search_query_4 = ""
_search_query_5 = ""
# Depending on the number of replacement words present in the list of _replacement_words, we'll have up to 5 queries
# If there is atleast one word for replacement
if len(_replacement_words) > 0:
_search_query_1 = _search_phrase.replace(_word, _replacement_words[0].lstrip(' ').rstrip(' ').strip('\n'))
print(_search_phrase, end = "", file = output_replacements_cache)
print('|' + _search_query_1, end = "", file = output_replacements_cache)
if 'B' in _collocation_corpora:
#_search_total, _bing_api_key = bing_search_total(False, _search_query_1, _bing_api_key)
search_queries[_search_query_1] = 0
_n_gram_replacement_phrases[_search_phrase].append(_search_query_1)
# Append the search total to the list iff it is non-zero (>= 0 always)
#if not _search_total == 0:
# _list_of_search_result_totals.append(_search_total)
#if _verbose:
# print("\t\tSearch query 1: \t%s\n\t\tSearch total: \t\t%d" %(_search_query_1, _search_total), file = _output_file_verbose)
# If there are at least two words for replacement
if len(_replacement_words) > 1:
_search_query_2 = _search_phrase.replace(_word, _replacement_words[1].lstrip(' ').rstrip(' ').strip('\n'))
print('|' + _search_query_2, end = "", file = output_replacements_cache)
if 'B' in _collocation_corpora:
#_search_total, _bing_api_key = bing_search_total(False, _search_query_2, _bing_api_key)
search_queries[_search_query_2] = 0
_n_gram_replacement_phrases[_search_phrase].append(_search_query_2)
#if not _search_total == 0:
# _list_of_search_result_totals.append(_search_total)
#if _verbose:
# print("\t\tSearch query 2: \t%s\n\t\tSearch total: \t\t%d" %(_search_query_2, _search_total), file = _output_file_verbose)
# If there are at least 3 words for replacement
if len(_replacement_words) > 2:
_search_query_3 = _search_phrase.replace(_word, _replacement_words[2].lstrip(' ').rstrip(' ').strip('\n'))
print('|' + _search_query_3, end = "", file = output_replacements_cache)
if 'B' in _collocation_corpora:
#_search_total, _bing_api_key = bing_search_total(False, _search_query_3, _bing_api_key)
search_queries[_search_query_3] = 0
_n_gram_replacement_phrases[_search_phrase].append(_search_query_3)
#if not _search_total == 0:
# _list_of_search_result_totals.append(_search_total)
#if _verbose:
# print("\t\tSearch query 3: \t%s\n\t\tSearch total: \t\t%d" %(_search_query_3, _search_total), file = _output_file_verbose)
# If there are at least 4 words for replacement
if len(_replacement_words) > 3:
_search_query_4 = _search_phrase.replace(_word, _replacement_words[3].lstrip(' ').rstrip(' ').strip('\n'))
print('|' + _search_query_4, end = "", file = output_replacements_cache)
if 'B' in _collocation_corpora:
#_search_total, _bing_api_key = bing_search_total(False, _search_query_4, _bing_api_key)
search_queries[_search_query_4] = 0
_n_gram_replacement_phrases[_search_phrase].append(_search_query_4)
#if not _search_total == 0:
# _list_of_search_result_totals.append(_search_total)
#if _verbose:
# print("\t\tSearch query 4: \t%s\n\t\tSearch total: \t\t%d" %(_search_query_4, _search_total), file = _output_file_verbose)
# If there are at least 5 words for replacement
if len(_replacement_words) > 4:
_search_query_5 = _search_phrase.replace(_word, _replacement_words[4].lstrip(' ').rstrip(' ').strip('\n'))
print('|' + _search_query_5, end = "", file = output_replacements_cache)
if 'B' in _collocation_corpora:
#_search_total, _bing_api_key = bing_search_total(False, _search_query_5, _bing_api_key)
search_queries[_search_query_5] = 0
_n_gram_replacement_phrases[_search_phrase].append(_search_query_5)
#if not _search_total == 0:
# _list_of_search_result_totals.append(_search_total)
#if _verbose:
# print("\t\tSearch query 5: \t%s\n\t\tSearch total: \t\t%d" %(_search_query_5, _search_total), file = _output_file_verbose)
print("", file = output_replacements_cache)
# A variable to point to the sum of search totals of the replacement phrases
#_sum_of_replacement_search_totals = 0
#try:
# for _search_total in _list_of_search_result_totals:
# _sum_of_replacement_search_totals += _search_total
#except Exception as e:
# if _verbose:
# print("\tERROR while calculating the sum of replacement search totals\n\t\t%s" %(str(e)), file = _output_file_verbose)
# print("\tERROR while calculating the sum of replacement search totals\n\t\t%s" %(str(e)))
# Average of all non-zero replacement search totals
# _sum_of_replacement_search_totals / total number of replacement search totals
if not len(_list_of_search_result_totals) == 0:
_average_of_replacement_search_totals = float(_sum_of_replacement_search_totals) / len(_list_of_search_result_totals)
if _verbose:
print("\tList of non-zero search totals: %s" %(_list_of_search_result_totals), file = _output_file_verbose)
print("\tAverage of all non-zero search totals: %f" %(_average_of_replacement_search_totals), file = _output_file_verbose)
else:
n_grams_not_collocations.append(_n_gram)
if _verbose:
print("\tThere are no non-zero search results.\n\tMoving on to the next n-gram ...", file = _output_file_verbose)
continue
# Calculating statistical value
# If average is equals to zero OR search result of the phrase, we consider statistical_value of that phrase as zero,
# to avoid 'division by zero' scenario
if (_average_of_replacement_search_totals == 0) or (_phrase_search_totals == 0):
_stat = 0
else:
_stat = 1 - (_average_of_replacement_search_totals / float(_phrase_search_totals))
if _verbose:
print("\tStatistical value: %f" %(_stat), file = _output_file_verbose)
# Removing duplicates from the lists , statistical_collocations and n_grams_not_collocations
#statistical_collocations = list(set(statistical_collocations))
#n_grams_not_collocations = list(set(n_grams_not_collocations))
#proxy_file_name = input("Name of the file containing proxies to use: ?")
# Configure the settings for GoogleScraper
if len(search_queries) > 0:
#queries_with_quotes = [('"' + x + '"') for x in search_queries_to_be_physicall_requested]
_bing_search = BingSearchAPI(_bing_api_key)
for bing_query in search_queries:
#bing_query_with_quotes = '"' + bing_query + '"'
search_queries[bing_query], _bing_api_key = _bing_search.search_total(False, bing_query)
doCalculation = True
while doCalculation:
#Set up the possible values of c
good_input = False
while not good_input:
try:
#start_c_value, end_c_value = map(float, input("Please enter the constant 'c' values' range start and end: ").split(" "))
start_c_value=13.1
end_c_value=13.1
#_c_increment = float(input("Please enter the amount of increase on constant 'c' value: "))
_c_increment = 0.1
#_c_decimal = eval(input("Please enter the number of decimals on constant 'c': "))
_c_decimal = 1
good_input = True
except Exception as e:
print("ERROR Input, message: {0}".format(str(e)))
string_values_of_c = []
_c_temp = start_c_value
while _c_temp <= end_c_value:
string_values_of_c.append("{number:.{decimal}f}".format(number = _c_temp, decimal = _c_decimal))
_c_temp += _c_increment
if _verbose:
print("Values of 'c' constant: " + str(string_values_of_c))
for _string_c_value in string_values_of_c:
_c_value = float(_string_c_value)
if _verbose:
print("\nNew standard c value for method 3: " + _string_c_value, file = _output_file_verbose)
n_grams_not_collocations = []
statistical_collocations = []
for _n_gram, replacements in _n_gram_replacement_phrases.items():
_list_of_search_result_totals = []
_sum_of_replacement_search_totals = 0
# Calculate the sum of the replacement phrases
for replacement in replacements:
number_result = search_queries[replacement]
if number_result > 0:
_list_of_search_result_totals.append(number_result)
_sum_of_replacement_search_totals += number_result
if not len(_list_of_search_result_totals) == 0:
_average_of_replacement_search_totals = float(_sum_of_replacement_search_totals) / len(_list_of_search_result_totals)
_phrase_search_total_dictionary[_n_gram] = search_queries[_n_gram]
if search_queries[_n_gram] > _c_value * _average_of_replacement_search_totals:
statistical_collocations.append(_n_gram)
if _verbose:
print("\Satisfied standard c value specified. Phrase added.\t" + _n_gram, file = _output_file_verbose)
else:
n_grams_not_collocations.append(_n_gram)
if _verbose:
print("\tList of non-zero search totals: %s" %(_list_of_search_result_totals), file = _output_file_verbose)
print("\tAverage of all non-zero search totals: %f" %(_average_of_replacement_search_totals), file = _output_file_verbose)
else:
n_grams_not_collocations.append(_n_gram)
if _verbose:
print("\tThere are no non-zero search results.\n\tMoving on to the next n-gram ...", file = _output_file_verbose)
continue
# Output text file to save collocations
_output_folder_path = str(_input_file_path).replace(_input_file_path.split('/')[-1], '') + 'c' + _string_c_value + '/'
#Check if the path exists, and creates if neccessary
if not os.path.exists(_output_folder_path):
os.makedirs(_output_folder_path)
_output_file_path_statistical_collocations = _output_folder_path + 'collocations_statistic.txt'
_output_file_statistical_collocations = open(_output_file_path_statistical_collocations, 'w')
for _collocation in statistical_collocations:
_output_file_statistical_collocations.write(_collocation + '\n')
_output_file_statistical_collocations.close()
if _verbose:
print("\n\tMethod-3: Statistical - Collocations are written to the file:\n\t%s" %(_output_file_path_statistical_collocations), file = _output_file_verbose)
# Output text file to save n-grams that are not collocations
_output_file_path_statistical_not_collocations = _output_folder_path + 'not_collocations_statistical.txt'
_output_file_statistical_not_collocations = open(_output_file_path_statistical_not_collocations, 'w')
for _n_gram in n_grams_not_collocations:
_output_file_statistical_not_collocations.write(_n_gram + '\n')
_output_file_statistical_not_collocations.close()
if _verbose:
print("\n\tMethod-3: Statistical - N-grams that are not collocations are written to the file:\n\t%s" %(_output_file_path_statistical_not_collocations), file = _output_file_verbose)
if _verbose:
print("\n----------------------------------------------------------------------------------------", file = _output_file_verbose)
print("\tMethod-3: Statistical technique - Extracting collocations:", file = _output_file_verbose)
print("----------------------------------------------------------------------------------------\n\n", file = _output_file_verbose)
print("\t\tMethod-3: Collocation extraction - successful")
#redo = input("Do you want to redo method 3 substitution with other c values? (Enter NOWAY to exit): ")
redo = "NOWAY"
if redo.upper() == "NOWAY":
doCalculation = False
if _verbose:
_output_file_verbose.close()
return statistical_collocations, n_grams_not_collocations, _phrase_search_total_dictionary
| |
"""
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
"""
import tempfile
import jinja2.exceptions
import pytest
import salt.modules.debian_ip as debian_ip
import salt.utils.files
import salt.utils.platform
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase, skipIf
try:
from salt.utils.odict import OrderedDict as odict
except ImportError:
from collections import OrderedDict as odict
# Big pile of interface data for unit tests
# To skip, search for 'DebianIpTestCase'
# fmt: off
test_interfaces = [
# Structure
#{'iface_name': 'ethX', 'iface_type': 'eth', 'enabled': True,
# 'skip_test': bool(), # True to disable this test
# 'build_interface': dict(), # data read from sls
# 'get_interface(): OrderedDict(), # data read from interfaces file
# 'return': list()}, # jinja-rendered data
# IPv4-only interface; single address
{'iface_name': 'eth1', 'iface_type': 'eth', 'enabled': True,
'build_interface': {
'proto': 'static',
'ipaddr': '192.168.4.9',
'netmask': '255.255.255.0',
'gateway': '192.168.4.1',
'enable_ipv6': False,
'noifupdown': True,
},
'get_interface': odict([('eth1', odict([('enabled', True), ('data', odict([
('inet', odict([
('addrfam', 'inet'),
('proto', 'static'),
('filename', None),
('address', '192.168.4.9'),
('netmask', '255.255.255.0'),
('gateway', '192.168.4.1'),
])),
]))]))]),
'return': [
'auto eth1\n',
'iface eth1 inet static\n',
' address 192.168.4.9\n',
' netmask 255.255.255.0\n',
' gateway 192.168.4.1\n',
'\n']},
# IPv6-only; single address
{'iface_name': 'eth2', 'iface_type': 'eth', 'enabled': True,
'build_interface': {
'ipv6proto': 'static',
'ipv6ipaddr': '2001:db8:dead:beef::3',
'ipv6netmask': '64',
'ipv6gateway': '2001:db8:dead:beef::1',
'enable_ipv6': True,
'noifupdown': True,
},
'get_interface': odict([('eth2', odict([('enabled', True), ('data', odict([
('inet6', odict([
('addrfam', 'inet6'),
('proto', 'static'),
('filename', None),
('address', '2001:db8:dead:beef::3'),
('netmask', 64),
('gateway', '2001:db8:dead:beef::1'),
])),
]))]))]),
'return': [
'auto eth2\n',
'iface eth2 inet6 static\n',
' address 2001:db8:dead:beef::3\n',
' netmask 64\n',
' gateway 2001:db8:dead:beef::1\n',
'\n']},
# IPv6-only; multiple addrs; no gw; first addr from ipv6addr
{'iface_name': 'eth3', 'iface_type': 'eth', 'enabled': True,
'build_interface': {
'ipv6proto': 'static',
'ipv6ipaddr': '2001:db8:dead:beef::5/64',
'ipv6ipaddrs': [
'2001:db8:dead:beef::7/64',
'2001:db8:dead:beef::8/64',
'2001:db8:dead:beef::9/64'],
'enable_ipv6': True,
'noifupdown': True,
},
'get_interface': odict([('eth3', odict([('enabled', True), ('data', odict([
('inet6', odict([
('addrfam', 'inet6'),
('proto', 'static'),
('filename', None),
('address', '2001:db8:dead:beef::5/64'),
('addresses', [
'2001:db8:dead:beef::7/64',
'2001:db8:dead:beef::8/64',
'2001:db8:dead:beef::9/64',
]),
])),
]))]))]),
'return': [
'auto eth3\n',
'iface eth3 inet6 static\n',
' address 2001:db8:dead:beef::5/64\n',
' address 2001:db8:dead:beef::7/64\n',
' address 2001:db8:dead:beef::8/64\n',
' address 2001:db8:dead:beef::9/64\n',
'\n']},
# IPv6-only; multiple addresses
{'iface_name': 'eth4', 'iface_type': 'eth', 'enabled': True,
'build_interface': {
'ipv6proto': 'static',
'ipv6ipaddrs': [
'2001:db8:dead:beef::5/64',
'2001:db8:dead:beef::7/64',
'2001:db8:dead:beef::8/64',
'2001:db8:dead:beef::9/64'],
'ipv6gateway': '2001:db8:dead:beef::1',
'enable_ipv6': True,
'noifupdown': True,
},
'get_interface': odict([('eth4', odict([('enabled', True), ('data', odict([
('inet6', odict([
('addrfam', 'inet6'),
('proto', 'static'),
('filename', None),
('address', '2001:db8:dead:beef::5/64'),
('addresses', [
'2001:db8:dead:beef::7/64',
'2001:db8:dead:beef::8/64',
'2001:db8:dead:beef::9/64',
]),
('gateway', '2001:db8:dead:beef::1'),
])),
]))]))]),
'return': [
'auto eth4\n',
'iface eth4 inet6 static\n',
' address 2001:db8:dead:beef::5/64\n',
' address 2001:db8:dead:beef::7/64\n',
' address 2001:db8:dead:beef::8/64\n',
' address 2001:db8:dead:beef::9/64\n',
' gateway 2001:db8:dead:beef::1\n',
'\n']},
# IPv4 and IPv6 settings with v4 disabled
{'iface_name': 'eth5', 'iface_type': 'eth', 'enabled': True,
'build_interface': {
'proto': 'static',
'ipaddr': '192.168.4.9',
'netmask': '255.255.255.0',
'gateway': '192.168.4.1',
'ipv6proto': 'static',
'ipv6ipaddr': '2001:db8:dead:beef::3',
'ipv6netmask': '64',
'ipv6gateway': '2001:db8:dead:beef::1',
'enable_ipv4': False,
'noifupdown': True,
},
'get_interface': odict([('eth5', odict([('enabled', True), ('data', odict([
('inet6', odict([
('addrfam', 'inet6'),
('proto', 'static'),
('filename', None),
('address', '2001:db8:dead:beef::3'),
('netmask', 64),
('gateway', '2001:db8:dead:beef::1'),
])),
]))]))]),
'return': [
'auto eth5\n',
'iface eth5 inet6 static\n',
' address 2001:db8:dead:beef::3\n',
' netmask 64\n',
' gateway 2001:db8:dead:beef::1\n',
'\n']},
# IPv4 and IPv6 settings with v6 disabled
{'iface_name': 'eth6', 'iface_type': 'eth', 'enabled': True,
'build_interface': {
'proto': 'static',
'ipaddr': '192.168.4.9',
'netmask': '255.255.255.0',
'gateway': '192.168.4.1',
'ipv6proto': 'static',
'ipv6ipaddr': '2001:db8:dead:beef::3',
'ipv6netmask': '64',
'ipv6gateway': '2001:db8:dead:beef::1',
'enable_ipv6': False,
'noifupdown': True,
},
'get_interface': odict([('eth6', odict([('enabled', True), ('data', odict([
('inet', odict([
('addrfam', 'inet'),
('proto', 'static'),
('filename', None),
('address', '192.168.4.9'),
('netmask', '255.255.255.0'),
('gateway', '192.168.4.1'),
])),
]))]))]),
'return': [
'auto eth6\n',
'iface eth6 inet static\n',
' address 192.168.4.9\n',
' netmask 255.255.255.0\n',
' gateway 192.168.4.1\n',
'\n']},
# IPv4 and IPv6; shared/overridden settings
{'iface_name': 'eth7', 'iface_type': 'eth', 'enabled': True,
'build_interface': {
'proto': 'static',
'ipaddr': '192.168.4.9',
'netmask': '255.255.255.0',
'gateway': '192.168.4.1',
'ipv6proto': 'static',
'ipv6ipaddr': '2001:db8:dead:beef::3',
'ipv6netmask': '64',
'ipv6gateway': '2001:db8:dead:beef::1',
'ttl': '18', # shared
'ipv6ttl': '15', # overridden for v6
'mtu': '1480', # shared
'enable_ipv6': True,
'noifupdown': True,
},
'get_interface': odict([('eth7', odict([('enabled', True), ('data', odict([
('inet', odict([
('addrfam', 'inet'),
('proto', 'static'),
('filename', None),
('address', '192.168.4.9'),
('netmask', '255.255.255.0'),
('gateway', '192.168.4.1'),
('ttl', 18),
('mtu', 1480),
])),
('inet6', odict([
('addrfam', 'inet6'),
('proto', 'static'),
('filename', None),
('address', '2001:db8:dead:beef::3'),
('netmask', 64),
('gateway', '2001:db8:dead:beef::1'),
('ttl', 15),
('mtu', 1480),
])),
]))]))]),
'return': [
'auto eth7\n',
'iface eth7 inet static\n',
' address 192.168.4.9\n',
' netmask 255.255.255.0\n',
' gateway 192.168.4.1\n',
' ttl 18\n',
' mtu 1480\n',
'iface eth7 inet6 static\n',
' address 2001:db8:dead:beef::3\n',
' netmask 64\n',
' gateway 2001:db8:dead:beef::1\n',
' ttl 15\n',
' mtu 1480\n',
'\n']},
# Slave iface
{'iface_name': 'eth8', 'iface_type': 'slave', 'enabled': True,
'build_interface': {
'master': 'bond0',
'noifupdown': True,
},
'get_interface': odict([('eth8', odict([('enabled', True), ('data', odict([
('inet', odict([
('addrfam', 'inet'),
('proto', 'manual'),
('filename', None),
('bonding', odict([
('master', 'bond0'),
])),
('bonding_keys', ['master']),
])),
]))]))]),
'return': [
'auto eth8\n',
'iface eth8 inet manual\n',
' bond-master bond0\n',
'\n']},
# Bond; with address IPv4 and IPv6 address; slaves as string
{'iface_name': 'bond9', 'iface_type': 'bond', 'enabled': True,
'build_interface': {
'proto': 'static',
'ipaddr': '10.1.0.14',
'netmask': '255.255.255.0',
'gateway': '10.1.0.1',
'ipv6proto': 'static',
'ipv6ipaddr': '2001:db8:dead:c0::3',
'ipv6netmask': '64',
'ipv6gateway': '2001:db8:dead:c0::1',
'mode': '802.3ad',
'slaves': 'eth4 eth5',
'enable_ipv6': True,
'noifupdown': True,
},
'get_interface': odict([('bond9', odict([('enabled', True), ('data', odict([
('inet', odict([
('addrfam', 'inet'),
('proto', 'static'),
('filename', None),
('address', '10.1.0.14'),
('netmask', '255.255.255.0'),
('gateway', '10.1.0.1'),
('bonding', odict([
('ad_select', '0'),
('downdelay', '200'),
('lacp_rate', '0'),
('miimon', '100'),
('mode', '4'),
('slaves', 'eth4 eth5'),
('updelay', '0'),
('use_carrier', 'on'),
])),
('bonding_keys', [
'ad_select',
'downdelay',
'lacp_rate',
'miimon',
'mode',
'slaves',
'updelay',
'use_carrier',
]),
])),
('inet6', odict([
('addrfam', 'inet6'),
('proto', 'static'),
('filename', None),
('address', '2001:db8:dead:c0::3'),
('netmask', 64),
('gateway', '2001:db8:dead:c0::1'),
('bonding', odict([
('ad_select', '0'),
('downdelay', '200'),
('lacp_rate', '0'),
('miimon', '100'),
('mode', '4'),
('slaves', 'eth4 eth5'),
('updelay', '0'),
('use_carrier', 'on'),
])),
('bonding_keys', [
'ad_select',
'downdelay',
'lacp_rate',
'miimon',
'mode',
'slaves',
'updelay',
'use_carrier',
]),
])),
]))]))]),
'return': [
'auto bond9\n',
'iface bond9 inet static\n',
' address 10.1.0.14\n',
' netmask 255.255.255.0\n',
' gateway 10.1.0.1\n',
' bond-ad_select 0\n',
' bond-downdelay 200\n',
' bond-lacp_rate 0\n',
' bond-miimon 100\n',
' bond-mode 4\n',
' bond-slaves eth4 eth5\n',
' bond-updelay 0\n',
' bond-use_carrier on\n',
'iface bond9 inet6 static\n',
' address 2001:db8:dead:c0::3\n',
' netmask 64\n',
' gateway 2001:db8:dead:c0::1\n',
' bond-ad_select 0\n',
' bond-downdelay 200\n',
' bond-lacp_rate 0\n',
' bond-miimon 100\n',
' bond-mode 4\n',
' bond-slaves eth4 eth5\n',
' bond-updelay 0\n',
' bond-use_carrier on\n',
'\n']},
# Bond; with address IPv4 and IPv6 address; slaves as list
{'iface_name': 'bond10', 'iface_type': 'bond', 'enabled': True,
'build_interface': {
'proto': 'static',
'ipaddr': '10.1.0.14',
'netmask': '255.255.255.0',
'gateway': '10.1.0.1',
'ipv6proto': 'static',
'ipv6ipaddr': '2001:db8:dead:c0::3',
'ipv6netmask': '64',
'ipv6gateway': '2001:db8:dead:c0::1',
'mode': '802.3ad',
'slaves': ['eth4', 'eth5'],
'enable_ipv6': True,
'noifupdown': True,
},
'get_interface': odict([('bond10', odict([('enabled', True), ('data', odict([
('inet', odict([
('addrfam', 'inet'),
('proto', 'static'),
('filename', None),
('address', '10.1.0.14'),
('netmask', '255.255.255.0'),
('gateway', '10.1.0.1'),
('bonding', odict([
('ad_select', '0'),
('downdelay', '200'),
('lacp_rate', '0'),
('miimon', '100'),
('mode', '4'),
('slaves', 'eth4 eth5'),
('updelay', '0'),
('use_carrier', 'on'),
])),
('bonding_keys', [
'ad_select',
'downdelay',
'lacp_rate',
'miimon',
'mode',
'slaves',
'updelay',
'use_carrier',
]),
])),
('inet6', odict([
('addrfam', 'inet6'),
('proto', 'static'),
('filename', None),
('address', '2001:db8:dead:c0::3'),
('netmask', 64),
('gateway', '2001:db8:dead:c0::1'),
('bonding', odict([
('ad_select', '0'),
('downdelay', '200'),
('lacp_rate', '0'),
('miimon', '100'),
('mode', '4'),
('slaves', 'eth4 eth5'),
('updelay', '0'),
('use_carrier', 'on'),
])),
('bonding_keys', [
'ad_select',
'downdelay',
'lacp_rate',
'miimon',
'mode',
'slaves',
'updelay',
'use_carrier',
]),
])),
]))]))]),
'return': [
'auto bond10\n',
'iface bond10 inet static\n',
' address 10.1.0.14\n',
' netmask 255.255.255.0\n',
' gateway 10.1.0.1\n',
' bond-ad_select 0\n',
' bond-downdelay 200\n',
' bond-lacp_rate 0\n',
' bond-miimon 100\n',
' bond-mode 4\n',
' bond-slaves eth4 eth5\n',
' bond-updelay 0\n',
' bond-use_carrier on\n',
'iface bond10 inet6 static\n',
' address 2001:db8:dead:c0::3\n',
' netmask 64\n',
' gateway 2001:db8:dead:c0::1\n',
' bond-ad_select 0\n',
' bond-downdelay 200\n',
' bond-lacp_rate 0\n',
' bond-miimon 100\n',
' bond-mode 4\n',
' bond-slaves eth4 eth5\n',
' bond-updelay 0\n',
' bond-use_carrier on\n',
'\n']},
# Bond VLAN; with IPv4 address
{'iface_name': 'bond0.11', 'iface_type': 'vlan', 'enabled': True,
'build_interface': {
'proto': 'static',
'ipaddr': '10.7.0.8',
'netmask': '255.255.255.0',
'gateway': '10.7.0.1',
'slaves': 'eth6 eth7',
'mode': '802.3ad',
'enable_ipv6': False,
'noifupdown': True,
},
'get_interface': odict([('bond0.11', odict([('enabled', True), ('data', odict([
('inet', odict([
('addrfam', 'inet'),
('proto', 'static'),
('filename', None),
('vlan_raw_device', 'bond1'),
('address', '10.7.0.8'),
('netmask', '255.255.255.0'),
('gateway', '10.7.0.1'),
('mode', '802.3ad'),
])),
]))]))]),
'return': [
'auto bond0.11\n',
'iface bond0.11 inet static\n',
' vlan-raw-device bond1\n',
' address 10.7.0.8\n',
' netmask 255.255.255.0\n',
' gateway 10.7.0.1\n',
' mode 802.3ad\n',
'\n']},
# Bond; without address
{'iface_name': 'bond0.12', 'iface_type': 'vlan', 'enabled': True,
'build_interface': {
'proto': 'static',
'slaves': 'eth6 eth7',
'mode': '802.3ad',
'enable_ipv6': False,
'noifupdown': True,
},
'get_interface': odict([('bond0.12', odict([('enabled', True), ('data', odict([
('inet', odict([
('addrfam', 'inet'),
('proto', 'static'),
('filename', None),
('vlan_raw_device', 'bond1'),
('mode', '802.3ad'),
])),
]))]))]),
'return': [
'auto bond0.12\n',
'iface bond0.12 inet static\n',
' vlan-raw-device bond1\n',
' mode 802.3ad\n',
'\n']},
# Bridged interface
{'iface_name': 'br0', 'iface_type': 'bridge', 'enabled': True,
'build_interface': {
'proto': 'static',
'ipaddr': '192.168.4.10',
'netmask': '255.255.255.0',
'gateway': '192.168.4.1',
'bridge_ports': 'eth1',
'enable_ipv6': False,
'noifupdown': True,
},
'get_interface': odict([('br0', odict([('enabled', True), ('data', odict([
('inet', odict([
('addrfam', 'inet'),
('proto', 'static'),
('filename', None),
('address', '192.168.4.10'),
('netmask', '255.255.255.0'),
('gateway', '192.168.4.1'),
('bridging', odict([
('ports', 'eth1'),
])),
('bridging_keys', ['ports']),
])),
]))]))]),
'return': [
'auto br0\n',
'iface br0 inet static\n',
' address 192.168.4.10\n',
' netmask 255.255.255.0\n',
' gateway 192.168.4.1\n',
' bridge_ports eth1\n',
'\n']},
# DNS NS as list
{'iface_name': 'eth13', 'iface_type': 'eth', 'enabled': True,
'build_interface': {
'proto': 'static',
'ipaddr': '192.168.4.9',
'netmask': '255.255.255.0',
'gateway': '192.168.4.1',
'enable_ipv6': False,
'noifupdown': True,
'dns': ['8.8.8.8', '8.8.4.4'],
},
'get_interface': odict([('eth13', odict([('enabled', True), ('data', odict([
('inet', odict([
('addrfam', 'inet'),
('proto', 'static'),
('filename', None),
('address', '192.168.4.9'),
('netmask', '255.255.255.0'),
('gateway', '192.168.4.1'),
('dns_nameservers', ['8.8.8.8', '8.8.4.4']),
])),
]))]))]),
'return': [
'auto eth13\n',
'iface eth13 inet static\n',
' address 192.168.4.9\n',
' netmask 255.255.255.0\n',
' gateway 192.168.4.1\n',
' dns-nameservers 8.8.8.8 8.8.4.4\n',
'\n']},
# DNS NS as string
{'iface_name': 'eth14', 'iface_type': 'eth', 'enabled': True,
'build_interface': {
'proto': 'static',
'ipaddr': '192.168.4.9',
'netmask': '255.255.255.0',
'gateway': '192.168.4.1',
'enable_ipv6': False,
'noifupdown': True,
'dns': '8.8.8.8 8.8.4.4',
},
'get_interface': odict([('eth14', odict([('enabled', True), ('data', odict([
('inet', odict([
('addrfam', 'inet'),
('proto', 'static'),
('filename', None),
('address', '192.168.4.9'),
('netmask', '255.255.255.0'),
('gateway', '192.168.4.1'),
('dns_nameservers', ['8.8.8.8', '8.8.4.4']),
])),
]))]))]),
'return': [
'auto eth14\n',
'iface eth14 inet static\n',
' address 192.168.4.9\n',
' netmask 255.255.255.0\n',
' gateway 192.168.4.1\n',
' dns-nameservers 8.8.8.8 8.8.4.4\n',
'\n']},
# Loopback; with IPv4 and IPv6 address
{'iface_name': 'lo15', 'iface_type': 'eth', 'enabled': True,
'build_interface': {
'proto': 'loopback',
'ipaddr': '192.168.4.9',
'netmask': '255.255.255.0',
'gateway': '192.168.4.1',
'enable_ipv6': True,
'ipv6proto': 'loopback',
'ipv6ipaddr': 'fc00::1',
'ipv6netmask': '128',
'ipv6_autoconf': False,
'noifupdown': True,
},
'get_interface': odict([('lo15', odict([('enabled', True), ('data', odict([
('inet', odict([
('addrfam', 'inet'),
('proto', 'loopback'),
('filename', None),
('address', '192.168.4.9'),
('netmask', '255.255.255.0'),
('gateway', '192.168.4.1'),
])),
('inet6', odict([
('addrfam', 'inet6'),
('proto', 'loopback'),
('filename', None),
('address', 'fc00::1'),
('netmask', 128),
])),
]))]))]),
'return': [
'auto lo15\n',
'iface lo15 inet loopback\n',
' address 192.168.4.9\n',
' netmask 255.255.255.0\n',
' gateway 192.168.4.1\n',
'iface lo15 inet6 loopback\n',
' address fc00::1\n',
' netmask 128\n',
'\n']},
# Loopback; with only IPv6 address; enabled=False
{'iface_name': 'lo16', 'iface_type': 'eth', 'enabled': False,
'build_interface': {
'enable_ipv6': True,
'ipv6proto': 'loopback',
'ipv6ipaddr': 'fc00::1',
'ipv6netmask': '128',
'ipv6_autoconf': False,
'noifupdown': True,
},
'get_interface': odict([('lo16', odict([('data', odict([
('inet6', odict([
('addrfam', 'inet6'),
('proto', 'loopback'),
('filename', None),
('address', 'fc00::1'),
('netmask', 128),
])),
]))]))]),
'return': [
'iface lo16 inet6 loopback\n',
' address fc00::1\n',
' netmask 128\n',
'\n']},
# Loopback; without address
{'iface_name': 'lo17', 'iface_type': 'eth', 'enabled': True,
'build_interface': {
'proto': 'loopback',
'enable_ipv6': False,
'noifupdown': True,
},
'get_interface': odict([('lo17', odict([('enabled', True), ('data', odict([
('inet', odict([
('addrfam', 'inet'),
('proto', 'loopback'),
('filename', None),
])),
]))]))]),
'return': [
'auto lo17\n',
'iface lo17 inet loopback\n',
'\n']},
# IPv4=DHCP; IPv6=Static; with IPv6 netmask
{'iface_name': 'eth18', 'iface_type': 'eth', 'enabled': True,
'build_interface': {
'proto': 'dhcp',
'enable_ipv6': True,
'ipv6proto': 'static',
'ipv6ipaddr': '2001:db8:dead:c0::3',
'ipv6netmask': '64',
'ipv6gateway': '2001:db8:dead:c0::1',
'noifupdown': True,
},
'get_interface': odict([('eth18', odict([('enabled', True), ('data', odict([
('inet', odict([
('addrfam', 'inet'),
('proto', 'dhcp'),
('filename', None),
])),
('inet6', odict([
('addrfam', 'inet6'),
('proto', 'static'),
('filename', None),
('address', '2001:db8:dead:c0::3'),
('netmask', 64),
('gateway', '2001:db8:dead:c0::1'),
])),
]))]))]),
'return': [
'auto eth18\n',
'iface eth18 inet dhcp\n',
'iface eth18 inet6 static\n',
' address 2001:db8:dead:c0::3\n',
' netmask 64\n',
' gateway 2001:db8:dead:c0::1\n',
'\n']},
# IPv4=DHCP; IPv6=Static; without IPv6 netmask
{'iface_name': 'eth19', 'iface_type': 'eth', 'enabled': True,
'build_interface': {
'proto': 'dhcp',
'enable_ipv6': True,
'ipv6proto': 'static',
'ipv6ipaddr': '2001:db8:dead:c0::3/64',
'ipv6gateway': '2001:db8:dead:c0::1',
'noifupdown': True,
},
'get_interface': odict([('eth19', odict([('enabled', True), ('data', odict([
('inet', odict([
('addrfam', 'inet'),
('proto', 'dhcp'),
('filename', None),
])),
('inet6', odict([
('addrfam', 'inet6'),
('proto', 'static'),
('filename', None),
('address', '2001:db8:dead:c0::3/64'),
('gateway', '2001:db8:dead:c0::1'),
])),
]))]))]),
'return': [
'auto eth19\n',
'iface eth19 inet dhcp\n',
'iface eth19 inet6 static\n',
' address 2001:db8:dead:c0::3/64\n',
' gateway 2001:db8:dead:c0::1\n',
'\n']},
# IPv6-only; static with autoconf and accept_ra forced
{'iface_name': 'eth20', 'iface_type': 'eth', 'enabled': True,
'build_interface': {
'ipv6proto': 'static',
'ipv6ipaddr': '2001:db8:dead:beef::3/64',
'ipv6gateway': '2001:db8:dead:beef::1',
'enable_ipv6': True,
'autoconf': 1,
'accept_ra': 2,
'noifupdown': True,
},
'get_interface': odict([('eth20', odict([('enabled', True), ('data', odict([
('inet6', odict([
('addrfam', 'inet6'),
('proto', 'static'),
('filename', None),
('autoconf', 1),
('address', '2001:db8:dead:beef::3/64'),
('gateway', '2001:db8:dead:beef::1'),
('accept_ra', 2),
])),
]))]))]),
'return': [
'auto eth20\n',
'iface eth20 inet6 static\n',
' autoconf 1\n',
' address 2001:db8:dead:beef::3/64\n',
' gateway 2001:db8:dead:beef::1\n',
' accept_ra 2\n',
'\n']},
]
# fmt: on
@skipIf(salt.utils.platform.is_windows(), "Do not run these tests on Windows")
class DebianIpTestCase(TestCase, LoaderModuleMockMixin):
"""
Test cases for salt.modules.debian_ip
"""
def setup_loader_modules(self):
return {debian_ip: {}}
# 'build_bond' function tests: 3
def test_build_bond(self):
"""
Test if it create a bond script in /etc/modprobe.d with the passed
settings and load the bonding kernel module.
"""
with patch(
"salt.modules.debian_ip._parse_settings_bond", MagicMock(return_value={})
), patch("salt.modules.debian_ip._write_file", MagicMock(return_value=True)):
mock = MagicMock(return_value=1)
with patch.dict(debian_ip.__grains__, {"osrelease": mock}):
mock = MagicMock(return_value=True)
with patch.dict(
debian_ip.__salt__, {"kmod.load": mock, "pkg.install": mock}
):
self.assertEqual(debian_ip.build_bond("bond0"), "")
def test_error_message_iface_should_process_non_str_expected(self):
values = [1, True, False, "no-kaboom"]
iface = "ethtest"
option = "test"
msg = debian_ip._error_msg_iface(iface, option, values)
self.assertTrue(msg.endswith("[1|True|False|no-kaboom]"), msg)
def test_error_message_network_should_process_non_str_expected(self):
values = [1, True, False, "no-kaboom"]
msg = debian_ip._error_msg_network("fnord", values)
self.assertTrue(msg.endswith("[1|True|False|no-kaboom]"), msg)
def test_build_bond_exception(self):
"""
Test if it create a bond script in /etc/modprobe.d with the passed
settings and load the bonding kernel module.
"""
with patch(
"salt.modules.debian_ip._parse_settings_bond", MagicMock(return_value={})
):
mock = MagicMock(return_value=1)
with patch.dict(debian_ip.__grains__, {"osrelease": mock}):
mock = MagicMock(
side_effect=jinja2.exceptions.TemplateNotFound("error")
)
with patch.object(jinja2.Environment, "get_template", mock):
self.assertEqual(debian_ip.build_bond("bond0"), "")
def test_build_bond_data(self):
"""
Test if it create a bond script in /etc/modprobe.d with the passed
settings and load the bonding kernel module.
"""
with patch(
"salt.modules.debian_ip._parse_settings_bond", MagicMock(return_value={})
), patch("salt.modules.debian_ip._read_temp", MagicMock(return_value=True)):
mock = MagicMock(return_value=1)
with patch.dict(debian_ip.__grains__, {"osrelease": mock}):
self.assertTrue(debian_ip.build_bond("bond0", test="True"))
# 'build_routes' function tests: 2
def test_build_routes(self):
"""
Test if it add route scripts for a network interface using up commands.
"""
with patch(
"salt.modules.debian_ip._parse_routes",
MagicMock(return_value={"routes": []}),
), patch(
"salt.modules.debian_ip._write_file_routes", MagicMock(return_value=True)
), patch(
"salt.modules.debian_ip._read_file", MagicMock(return_value="salt")
):
self.assertEqual(debian_ip.build_routes("eth0"), "saltsalt")
def test_build_routes_exception(self):
"""
Test if it add route scripts for a network interface using up commands.
"""
with patch(
"salt.modules.debian_ip._parse_routes",
MagicMock(return_value={"routes": []}),
):
self.assertTrue(debian_ip.build_routes("eth0", test="True"))
mock = MagicMock(side_effect=jinja2.exceptions.TemplateNotFound("err"))
with patch.object(jinja2.Environment, "get_template", mock):
self.assertEqual(debian_ip.build_routes("eth0"), "")
# 'down' function tests: 1
def test_down(self):
"""
Test if it shutdown a network interface
"""
self.assertEqual(debian_ip.down("eth0", "slave"), None)
mock = MagicMock(return_value="Salt")
with patch.dict(debian_ip.__salt__, {"cmd.run": mock}):
self.assertEqual(debian_ip.down("eth0", "eth"), "Salt")
# 'get_bond' function tests: 1
def test_get_bond(self):
"""
Test if it return the content of a bond script
"""
self.assertEqual(debian_ip.get_bond("bond0"), "")
# '_parse_interfaces' function tests: 1
def test_parse_interfaces(self):
"""
Test if it returns the correct data for parsed configuration file
"""
with tempfile.NamedTemporaryFile(mode="r", delete=True) as tfile:
for iface in test_interfaces:
iname = iface["iface_name"]
if iface.get("skip_test", False):
continue
with salt.utils.files.fopen(str(tfile.name), "w") as fh:
fh.writelines(iface["return"])
for inet in ["inet", "inet6"]:
if inet in iface["get_interface"][iname]["data"]:
iface["get_interface"][iname]["data"][inet]["filename"] = str(
tfile.name
)
self.assertDictEqual(
debian_ip._parse_interfaces([str(tfile.name)]),
iface["get_interface"],
)
# 'get_interface' function tests: 1
def test_get_interface(self):
"""
Test if it return the contents of an interface script
"""
for iface in test_interfaces:
if iface.get("skip_test", False):
continue
with patch.object(
debian_ip,
"_parse_interfaces",
MagicMock(return_value=iface["get_interface"]),
):
self.assertListEqual(
debian_ip.get_interface(iface["iface_name"]), iface["return"]
)
# 'build_interface' function tests: 1
def test_build_interface(self):
"""
Test if it builds an interface script for a network interface.
"""
with patch(
"salt.modules.debian_ip._write_file_ifaces", MagicMock(return_value="salt")
):
self.assertEqual(
debian_ip.build_interface("eth0", "eth", "enabled"),
["s\n", "a\n", "l\n", "t\n"],
)
self.assertTrue(
debian_ip.build_interface("eth0", "eth", "enabled", test="True")
)
with patch.object(
debian_ip, "_parse_settings_eth", MagicMock(return_value={"routes": []})
):
for eth_t in ["bridge", "slave", "bond"]:
self.assertRaises(
AttributeError,
debian_ip.build_interface,
"eth0",
eth_t,
"enabled",
)
self.assertTrue(
debian_ip.build_interface("eth0", "eth", "enabled", test="True")
)
with tempfile.NamedTemporaryFile(mode="r", delete=True) as tfile:
with patch("salt.modules.debian_ip._DEB_NETWORK_FILE", str(tfile.name)):
for iface in test_interfaces:
if iface.get("skip_test", False):
continue
# Skip tests that require __salt__['pkg.install']()
if iface["iface_type"] in ["bridge", "pppoe", "vlan"]:
continue
self.assertListEqual(
debian_ip.build_interface(
iface=iface["iface_name"],
iface_type=iface["iface_type"],
enabled=iface["enabled"],
interface_file=tfile.name,
**iface["build_interface"]
),
iface["return"],
)
# 'up' function tests: 1
def test_up(self):
"""
Test if it start up a network interface
"""
self.assertEqual(debian_ip.down("eth0", "slave"), None)
mock = MagicMock(return_value="Salt")
with patch.dict(debian_ip.__salt__, {"cmd.run": mock}):
self.assertEqual(debian_ip.up("eth0", "eth"), "Salt")
# 'get_network_settings' function tests: 1
def test_get_network_settings(self):
"""
Test if it return the contents of the global network script.
"""
with patch.dict(
debian_ip.__grains__, {"osfullname": "Ubuntu", "osrelease": "14"}
), patch(
"salt.modules.debian_ip._parse_hostname",
MagicMock(return_value="SaltStack"),
), patch(
"salt.modules.debian_ip._parse_domainname",
MagicMock(return_value="saltstack.com"),
):
mock_avai = MagicMock(return_value=True)
with patch.dict(
debian_ip.__salt__,
{"service.available": mock_avai, "service.status": mock_avai},
):
self.assertEqual(
debian_ip.get_network_settings(),
[
"NETWORKING=yes\n",
"HOSTNAME=SaltStack\n",
"DOMAIN=saltstack.com\n",
],
)
mock = MagicMock(
side_effect=jinja2.exceptions.TemplateNotFound("error")
)
with patch.object(jinja2.Environment, "get_template", mock):
self.assertEqual(debian_ip.get_network_settings(), "")
# 'get_routes' function tests: 1
def test_get_routes(self):
"""
Test if it return the routes for the interface
"""
with patch("salt.modules.debian_ip._read_file", MagicMock(return_value="salt")):
self.assertEqual(debian_ip.get_routes("eth0"), "saltsalt")
# 'apply_network_settings' function tests: 1
@pytest.mark.slow_test
def test_apply_network_settings(self):
"""
Test if it apply global network configuration.
"""
mock = MagicMock(return_value=True)
with patch.dict(
debian_ip.__salt__,
{"network.mod_hostname": mock, "service.stop": mock, "service.start": mock},
):
self.assertEqual(debian_ip.apply_network_settings(), True)
# 'build_network_settings' function tests: 1
def test_build_network_settings(self):
"""
Test if it build the global network script.
"""
with patch(
"salt.modules.debian_ip._parse_network_settings",
MagicMock(
return_value={
"networking": "yes",
"hostname": "Salt.saltstack.com",
"domainname": "saltstack.com",
"search": "test.saltstack.com",
}
),
), patch(
"salt.modules.debian_ip._write_file_network", MagicMock(return_value=True)
):
with patch.dict(
debian_ip.__grains__, {"osfullname": "Ubuntu", "osrelease": "14"}
):
mock = MagicMock(return_value=True)
with patch.dict(
debian_ip.__salt__,
{
"service.available": mock,
"service.disable": mock,
"service.enable": mock,
},
):
self.assertEqual(
debian_ip.build_network_settings(),
[
"NETWORKING=yes\n",
"HOSTNAME=Salt\n",
"DOMAIN=saltstack.com\n",
"SEARCH=test.saltstack.com\n",
],
)
mock = MagicMock(
side_effect=jinja2.exceptions.TemplateNotFound("error")
)
with patch.object(jinja2.Environment, "get_template", mock):
self.assertEqual(debian_ip.build_network_settings(), "")
with patch.dict(
debian_ip.__grains__, {"osfullname": "Ubuntu", "osrelease": "10"}
):
mock = MagicMock(return_value=True)
with patch.dict(
debian_ip.__salt__,
{
"service.available": mock,
"service.disable": mock,
"service.enable": mock,
},
):
mock = MagicMock(
side_effect=jinja2.exceptions.TemplateNotFound("error")
)
with patch.object(jinja2.Environment, "get_template", mock):
self.assertEqual(debian_ip.build_network_settings(), "")
with patch.object(
debian_ip, "_read_temp", MagicMock(return_value=True)
):
self.assertTrue(debian_ip.build_network_settings(test="True"))
| |
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Violin Memory, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib2
from xml.dom import minidom
import xml.etree.ElementTree as ET
from cinder.volume.drivers.violin.vxg.core.error import *
class XGRequest(object):
"""Request to XML gateway."""
def __init__(self, type="query", nodes=[], action=None, event=None,
flat=False, values_only=False):
if type not in ["query", "set", "action", "event"]:
raise TypeError("Unknown request type %s." % (type))
self.type = type
self.nodes = nodes
if values_only and not flat:
raise TypeError("values_only requires flat = True")
self.flat = flat
self.values_only = values_only
if type == "action" and action is None:
raise TypeError("Missing action name for action request.")
if type != "action" and action is not None:
raise TypeError("Action name specified for non-action request.")
self.action = action
if type == "event" and event is None:
raise TypeError("Missing event name for event request.")
if type != "event" and event is not None:
raise TypeError("Event name specified for non-event request.")
self.event = event
def __repr__(self):
return ('<XGRequest type:%s action:%s nodes:%r>'
% (self.type, self.action, self.nodes))
def to_xml(self, pretty_print=True):
"""Return an XML document describing this XGRequest.
Arguments:
pretty_print -- Get a properly formatted XML doc as opposed
to a single-line string with XML tags (bool)
Returns:
This request object as an XML string.
"""
root = ET.Element('xg-request')
req = ET.SubElement(root, '%s-request' % (self.type,))
if self.action is not None:
action = ET.SubElement(req, 'action-name')
action.text = self.action
if self.event is not None:
event = ET.SubElement(req, 'event-name')
event.text = self.event
if len(self.nodes) > 0:
nodes = ET.SubElement(req, 'nodes')
for n in self.nodes:
nodes.append(n.as_element_tree(self.type))
if pretty_print:
return self._pretty_print(root)
else:
return ET.tostring(root)
def _pretty_print(self, node):
"""Return a properly formatted XML document with newlines and
spaces.
Arguments:
node -- An instance of xml.etree.Element
Returns:
A properly formatted XML document.
"""
reparsed = minidom.parseString(ET.tostring(node))
return self._tighten_xml(reparsed.toprettyxml(' ', "\n", 'UTF-8'))
def _tighten_xml(self, xml):
"""Tighten the value and close tags to the opening tag in an
XML document.
The XML gateway will not be able to process a document that is
formatted like so:
<tag>
tagValue
</tag>
Unfortunately, this is how toprettyxml() outputs the XML. So the
purpose of this function is to turn the above into this:
<tag>tagValue</tag>
Arguments:
xml -- XML output from the toprettyxml() function
Returns:
A properly formatted XML document.
"""
newxml = []
prevLeadingSpaces = 0
leadingSpaces = 0
ascended = False
for line in xml.split('\n'):
leadingSpaces = len(line) - len(line.lstrip())
if leadingSpaces > prevLeadingSpaces:
# Increase in indent, just append
newxml.append(line)
ascended = True
elif leadingSpaces < prevLeadingSpaces:
if ascended:
# Single close tag, merge lines
value = newxml.pop().lstrip()
newxml[-1] += value + line.lstrip()
else:
# Multiple closing tags, so just append
newxml.append(line)
ascended = False
else:
# Same indent, just append
newxml.append(line)
prevLeadingSpaces = leadingSpaces
return '\n'.join(newxml)
class XGQuery(XGRequest):
"""Class for XML Gateway queries.
"""
def __init__(self, nodes=[], flat=False, values_only=False):
super(XGQuery, self).__init__('query', nodes,
flat=flat, values_only=values_only)
class XGAction(XGRequest):
"""Class for XML Gateway actions.
"""
def __init__(self, action, nodes=[], flat=False, values_only=False):
super(XGAction, self).__init__('action', nodes, action,
flat=flat, values_only=values_only)
class XGEvent(XGRequest):
"""Class for XML Gateway events.
"""
def __init__(self, *args, **kwargs):
raise Exception("Not yet implemented.")
class XGSet(XGRequest):
"""Class for XML Gateway set operations.
"""
def __init__(self, nodes=[], flat=False, values_only=False):
super(XGSet, self).__init__('set', nodes,
flat=flat, values_only=values_only)
class BasicJsonRequest(urllib2.Request):
"""A basic JSON request.
Certain JSON requests need this type of request, but for the most part,
this class exists to be subclassed.
"""
_mixins = {'X-Requested-With': 'XMLHttpRequest'}
def __init__(self, *args, **kwargs):
if len(args) > 2:
args[2].update(self._mixins)
else:
kwargs.setdefault('headers', {})
kwargs['headers'].update(self._mixins)
urllib2.Request.__init__(self, *args, **kwargs)
class RESTRequest(BasicJsonRequest):
"""A core request type for JSON sessions.
"""
_mixins = {'X-Requested-With': 'XMLHttpRequest',
'Content-Type': 'application/json'}
class GetRequest(RESTRequest):
"""A core request type for JSON sessions.
Implements HTTP GET requests.
"""
def get_method(self):
return 'GET'
class PostRequest(RESTRequest):
"""A core request type for JSON sessions.
Implements HTTP POST requests.
"""
pass
class PutRequest(RESTRequest):
"""A core request type for JSON sessions.
Implements HTTP PUT requests.
"""
def get_method(self):
return 'PUT'
class DeleteRequest(RESTRequest):
"""A core request type for JSON sessions.
Implements HTTP DELETE requests.
"""
def get_method(self):
return 'DELETE'
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class VirtualMachineImagesOperations(object):
"""VirtualMachineImagesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client Api Version. Constant value: "2017-12-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-12-01"
self.config = config
def get(
self, location, publisher_name, offer, skus, version, custom_headers=None, raw=False, **operation_config):
"""Gets a virtual machine image.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name: A valid image publisher.
:type publisher_name: str
:param offer: A valid image publisher offer.
:type offer: str
:param skus: A valid image SKU.
:type skus: str
:param version: A valid image SKU version.
:type version: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: VirtualMachineImage or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.compute.v2017_12_01.models.VirtualMachineImage or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions/{version}'
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'offer': self._serialize.url("offer", offer, 'str'),
'skus': self._serialize.url("skus", skus, 'str'),
'version': self._serialize.url("version", version, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineImage', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list(
self, location, publisher_name, offer, skus, filter=None, top=None, orderby=None, custom_headers=None, raw=False, **operation_config):
"""Gets a list of all virtual machine image versions for the specified
location, publisher, offer, and SKU.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name: A valid image publisher.
:type publisher_name: str
:param offer: A valid image publisher offer.
:type offer: str
:param skus: A valid image SKU.
:type skus: str
:param filter: The filter to apply on the operation.
:type filter: str
:param top:
:type top: int
:param orderby:
:type orderby: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype:
list[~azure.mgmt.compute.v2017_12_01.models.VirtualMachineImageResource]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions'
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'offer': self._serialize.url("offer", offer, 'str'),
'skus': self._serialize.url("skus", skus, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[VirtualMachineImageResource]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_offers(
self, location, publisher_name, custom_headers=None, raw=False, **operation_config):
"""Gets a list of virtual machine image offers for the specified location
and publisher.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name: A valid image publisher.
:type publisher_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype:
list[~azure.mgmt.compute.v2017_12_01.models.VirtualMachineImageResource]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers'
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[VirtualMachineImageResource]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_publishers(
self, location, custom_headers=None, raw=False, **operation_config):
"""Gets a list of virtual machine image publishers for the specified Azure
location.
:param location: The name of a supported Azure region.
:type location: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype:
list[~azure.mgmt.compute.v2017_12_01.models.VirtualMachineImageResource]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers'
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[VirtualMachineImageResource]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_skus(
self, location, publisher_name, offer, custom_headers=None, raw=False, **operation_config):
"""Gets a list of virtual machine image SKUs for the specified location,
publisher, and offer.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name: A valid image publisher.
:type publisher_name: str
:param offer: A valid image publisher offer.
:type offer: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype:
list[~azure.mgmt.compute.v2017_12_01.models.VirtualMachineImageResource]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus'
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'offer': self._serialize.url("offer", offer, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[VirtualMachineImageResource]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| |
# Copyright (c) 2008-2009 Mikeal Rogers <mikeal.rogers@gmail.com>
# Copyright (c) 2009 Domen Kozar <domen@dev.si>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import commands
import tempfile
import logging
import signal
import killableprocess
import subprocess
import sys, os
if not sys.version.startswith('2.4'):
import urlparse
else:
# python 2.4
from windmill.tools import urlparse_25 as urlparse
from StringIO import StringIO
import windmill
logger = logging.getLogger(__name__)
"""
Colossus:/System/Library/CoreServices/RemoteManagement/ARDAgent.app/Contents/Support mikeal$ ./networksetup -getwebproxy "AirPort" [14:12]
cp: /Library/Preferences/SystemConfiguration/preferences.plist.old: Permission denied
Enabled: No
Server: localhost
Port: 4444
Authenticated Proxy Enabled: 0
Colossus:/System/Library/CoreServices/RemoteManagement/ARDAgent.app/Contents/Support mikeal$ ./networksetup -setwebproxystate "AirPort" on [14:12]
cp: /Library/Preferences/SystemConfiguration/preferences.plist.old: Permission denied
Colossus:/System/Library/CoreServices/RemoteManagement/ARDAgent.app/Contents/Support mikeal$ ./networksetup -getwebproxy "AirPort" [14:13]
cp: /Library/Preferences/SystemConfiguration/preferences.plist.old: Permission denied
Enabled: Yes
Server: localhost
Port: 4444
Authenticated Proxy Enabled: 0
Colossus:/System/Library/CoreServices/RemoteManagement/ARDAgent.app/Contents/Support mikeal$ whoami [14:13]
Usage: networksetup -setwebproxy <networkservice> <domain> <port number> <authenticated> <username> <password>
mikeal
"""
html_redirection = """
<html>
<head>
<script type="text/javascript">
var i = function(){
window.location = "{replace}";
}
</script>
</head>
<body onload="i();">
</body>
<html>"""
def getoutput(l):
tmp = tempfile.mktemp()
x = open(tmp, 'w')
subprocess.call(l, stdout=x, stderr=x)
x.close(); x = open(tmp, 'r')
r = x.read() ; x.close()
os.remove(tmp)
return r
def dprint(s):
if len(s) is not 0:
print s.rstrip('\n')
if 'Library/Preferences/SystemConfiguration/preferences.plist.old' in s:
print "** To remove this error `chmod -R 777` the directory that shows the permission error"
def find_default_interface_name():
if windmill.settings['NETWORK_INTERFACE_NAME'] is not None:
return windmill.settings['NETWORK_INTERFACE_NAME']
target_host = urlparse.urlparse(windmill.settings['TEST_URL']).hostname
x = ['/sbin/route', 'get', target_host]
interface_id = [l for l in getoutput(x).splitlines() if 'interface' in l][0].split(":")[-1].strip()
all_inet = getoutput([windmill.settings['NETWORKSETUP_BINARY'], '-listallhardwareports']).splitlines()
try:
i = all_inet.index([l for l in all_inet if 'Device: '+interface_id in l][0])
interface_name = all_inet[i - 1].split(':')[-1].strip()
# interface_name = [ l for l in all_inet if l.find(interface_id) is not -1 ][0].split('\n')[0].split(':')[-1]
# if interface_name[0] == ' ':
# interface_name = interface_name.strip()
# if interface_name[-1] == ' ':
# interface_name = interface_name.rstrip()
except IndexError:
print "ERROR: Cannot figure out interface name, please set NETWORK_INTERFACE_NAME in local settings file"
from windmill.bin import admin_lib
admin_lib.teardown(admin_lib.shell_objects_dict)
sys.exit()
# interfaces = getoutput().split('\n\n')
# print 'interfaces::\n', '\n'.join(interfaces)
# for line in interfaces:
# if not line.startswith('(') and line.find('(1)') is not -1:
# line = '(1)'+line.split('(1)')[-1]
# if line.find('Device: '+interface) is not -1:
# interface_name = ' '.join(line.splitlines()[0].split()[1:])
return interface_name
class Safari(object):
def __init__(self):
self.safari_binary = windmill.settings['SAFARI_BINARY']
self.test_url = windmill.settings['TEST_URL']
def create_redirect(self):
self.redirection_page = tempfile.mktemp(suffix='.html')
f = open(self.redirection_page, 'w')
test_url = windmill.get_test_url(windmill.settings['TEST_URL'])
f.write( html_redirection.replace('{replace}', test_url) )
f.flush() ; f.close()
def set_proxy_mac(self):
"""Set local Proxy"""
self.netsetup_binary = windmill.settings['NETWORKSETUP_BINARY']
interface_name = find_default_interface_name()
uri = urlparse.urlparse(self.test_url)
set_proxy_command = [ self.netsetup_binary, '-setwebproxy',
interface_name, 'localhost',
str(windmill.settings['SERVER_HTTP_PORT'])
]
dprint(getoutput(set_proxy_command))
enable_proxy_command = [ self.netsetup_binary, '-setwebproxystate',
interface_name, 'on'
]
dprint(getoutput(enable_proxy_command))
if windmill.has_ssl:
set_ssl_proxy_command = [ self.netsetup_binary, '-setsecurewebproxy',
interface_name, 'localhost',
str(windmill.settings['SERVER_HTTP_PORT'])
]
dprint(getoutput(set_proxy_command))
enable_ssl_proxy_command = [ self.netsetup_binary, '-setsecurewebproxystate',
interface_name, 'on'
]
dprint(getoutput(enable_proxy_command))
self.create_redirect()
self.interface_name = interface_name
def unset_proxy_mac(self):
getoutput([self.netsetup_binary, '-setwebproxystate', self.interface_name, 'off'])
getoutput([self.netsetup_binary, '-setsecurewebproxystate', self.interface_name, 'off'])
def set_proxy_windows(self):
self.create_redirect()
import ie
self.ie_obj = ie.InternetExplorer()
self.ie_obj.set_proxy()
def unset_proxy_windows(self):
self.ie_obj.unset_proxy()
def start(self):
"""Start Safari"""
if sys.platform == 'darwin':
self.set_proxy_mac()
elif sys.platform in ('cygwin', 'win32'):
self.set_proxy_windows()
# Workaround for bug in nose
if hasattr(sys.stdout, 'fileno'):
kwargs = {'stdout':sys.stdout ,'stderr':sys.stderr, 'stdin':sys.stdin}
else:
kwargs = {'stdout':sys.__stdout__ ,'stderr':sys.__stderr__, 'stdin':sys.stdin}
self.p_handle = killableprocess.runCommand([self.safari_binary, self.redirection_page], **kwargs)
logger.info([self.safari_binary, self.redirection_page])
def kill(self, kill_signal=None):
"""Stop Safari"""
if sys.platform == 'darwin':
self.unset_proxy_mac()
elif sys.platform in ('cygwin', 'win32'):
self.unset_proxy_windows()
try:
self.p_handle.kill(group=True)
except:
logger.error('Cannot kill Safari')
def stop(self):
self.kill(signal.SIGTERM)
def is_alive(self):
if self.p_handle.poll() is None:
return False
return True
| |
import PIL.Image
import PIL.ImageColor
import PIL.ImageEnhance
import zeit.cms.repository.folder
import zeit.connector.interfaces
import zeit.content.image.interfaces
import zope.app.appsetup.product
import zope.component
import zope.interface
import zope.security.proxy
class ImageTransform(object):
zope.interface.implements(zeit.content.image.interfaces.ITransform)
zope.component.adapts(zeit.content.image.interfaces.IImage)
MAXIMUM_IMAGE_SIZE = 5000
def __init__(self, context):
self.context = context
try:
self.image = PIL.Image.open(
zope.security.proxy.removeSecurityProxy(context.open()))
self.image.load()
except IOError:
raise zeit.content.image.interfaces.ImageProcessingError(
"Cannot transform image %s" % context.__name__)
def thumbnail(self, width, height, filter=PIL.Image.ANTIALIAS):
image = self.image.copy()
image.thumbnail((width, height), filter)
return self._construct_image(image)
def resize(self, width=None, height=None, filter=PIL.Image.ANTIALIAS):
if width is None and height is None:
raise TypeError('Need at least one of width and height.')
orig_width, orig_height = self.image.size
if width is None:
width = orig_width * height / orig_height
elif height is None:
height = orig_height * width / orig_width
image = self.image.resize((width, height), filter)
return self._construct_image(image)
def create_variant_image(
self, variant, size=None, fill_color=None, format=None):
"""Create variant image from source image.
Will crop the image according to the zoom, focus point and size. In
addition, the image is scaled down to size (if given) and image
enhancements, like brightness, are applied.
The default variant skips cropping, but still applies image
enhancements, so it can be used as a high quality preview of image
enhancements in the frontend.
"""
if not variant.is_default:
image = self._crop_variant_image(variant, size=size)
else:
# Alpha channel is usually activated when cropping,
# so we must do it by hand since we skipped cropping
image = self._enable_alpha_channel(self.image)
# Apply enhancements like brightness
if variant.brightness is not None:
image = PIL.ImageEnhance.Brightness(image).enhance(
variant.brightness)
if variant.contrast is not None:
image = PIL.ImageEnhance.Contrast(image).enhance(
variant.contrast)
if variant.saturation is not None:
image = PIL.ImageEnhance.Color(image).enhance(
variant.saturation)
if variant.sharpness is not None:
image = PIL.ImageEnhance.Sharpness(image).enhance(
variant.sharpness)
# Optionally fill the background of transparent images
if fill_color is not None and self._color_mode == 'RGBA':
fill_color = PIL.ImageColor.getrgb('#' + fill_color)
opaque = PIL.Image.new('RGB', image.size, fill_color)
opaque.paste(image, (0, 0), image)
image = opaque
return self._construct_image(image, format)
def _crop_variant_image(self, variant, size=None):
"""Crop variant image from source image.
Determines crop position using zoom, focus point and size constraint.
The result image will have the exact dimensions that are predefined by
the size argument, if provided. Otherwise it depends on the variant
ratio and zoom only, giving back the best image quality, i.e. will not
scale down.
"""
source_width, source_height = self.image.size
if (source_width == 0 or source_height == 0):
return self.image
zoomed_width = source_width
zoomed_height = source_height
if variant.zoom > 0:
zoomed_width = int(source_width * variant.zoom)
zoomed_height = int(source_height * variant.zoom)
target_ratio = variant.ratio
if target_ratio is None:
target_ratio = float(source_width) / float(source_height)
target_width, target_height = self._fit_ratio_to_image(
zoomed_width, zoomed_height, target_ratio)
if size:
w, h = size
override_ratio = float(w) / float(h)
target_width, target_height = self._fit_ratio_to_image(
target_width, target_height, override_ratio)
x, y = self._determine_crop_position(
variant, target_width, target_height)
image = self._crop(
self.image, x, y, x + target_width, y + target_height)
if size:
w, h = size
if w > self.MAXIMUM_IMAGE_SIZE:
w = self.MAXIMUM_IMAGE_SIZE
if h > self.MAXIMUM_IMAGE_SIZE:
h = self.MAXIMUM_IMAGE_SIZE
image = image.resize((w, h), PIL.Image.ANTIALIAS)
return image
def _fit_ratio_to_image(self, source_width, source_height, target_ratio):
"""Calculate the biggest (width, height) inside the source that adheres
to target ratio"""
original_ratio = float(source_width) / float(source_height)
if target_ratio > original_ratio:
width = source_width
height = int(source_width / target_ratio)
else:
width = int(source_height * target_ratio)
height = source_height
return width, height
def _determine_crop_position(self, variant, target_width, target_height):
width, height = self.image.size
x = int(width * variant.focus_x - target_width * variant.focus_x)
y = int(height * variant.focus_y - target_height * variant.focus_y)
return x, y
def _crop(self, pil_image, x1, y1, x2, y2):
pil_image = pil_image.crop((x1, y1, x2, y2))
pil_image = self._enable_alpha_channel(pil_image)
return pil_image
@property
def _color_mode(self):
# XXX This is a rather crude heuristic.
return 'RGBA' if self.context.format == 'PNG' else 'RGB'
def _enable_alpha_channel(self, pil_image):
"""Enable alpha channel for PNG images by converting to RGBA."""
if pil_image.mode != self._color_mode:
pil_image = pil_image.convert(self._color_mode)
return pil_image
def _construct_image(self, pil_image, format=None):
image = zeit.content.image.image.TemporaryImage()
if not format:
format = self.context.format
image.mimeType = self.context.mimeType
else:
image.mimeType = 'image/' + format.lower() # XXX crude heuristic.
# XXX Maybe encoder setting should be made configurable.
if format in ('JPG', 'JPEG'):
options = {'progressive': True, 'quality': 85, 'optimize': True}
elif format == 'PNG':
options = {'optimize': True}
elif format == 'WEBP':
options = {'quality': 85}
else:
options = {}
pil_image.save(image.open('w'), format, **options)
image.__parent__ = self.context
image_times = zope.dublincore.interfaces.IDCTimes(self.context, None)
if image_times and image_times.modified:
thumb_times = zope.dublincore.interfaces.IDCTimes(image)
thumb_times.modified = image_times.modified
return image
@zope.component.adapter(zeit.content.image.interfaces.IImage)
@zope.interface.implementer(zeit.content.image.interfaces.IPersistentThumbnail)
def persistent_thumbnail_factory(context):
config = zope.app.appsetup.product.getProductConfiguration(
'zeit.content.image') or {}
method_name = config.get('thumbnail-method', 'thumbnail')
width = config.get('thumbnail-width', 50)
if width:
width = int(width)
else:
width = None
height = config.get('thumbnail-height', 50)
if height:
height = int(height)
else:
height = None
thumbnail_container = zeit.content.image.interfaces.IThumbnailFolder(
context)
image_name = context.__name__
if image_name not in thumbnail_container:
transform = zeit.content.image.interfaces.ITransform(context)
method = getattr(transform, method_name)
thumbnail = method(width, height)
thumbnail_properties = (
zeit.connector.interfaces.IWebDAVWriteProperties(thumbnail))
image_properties = zeit.connector.interfaces.IWebDAVReadProperties(
context)
for (name, namespace), value in image_properties.items():
if namespace != 'DAV:':
thumbnail_properties[(name, namespace)] = value
thumbnail_properties.pop(zeit.connector.interfaces.UUID_PROPERTY, None)
thumbnail_container[image_name] = thumbnail
return thumbnail_container[image_name]
@zope.component.adapter(zeit.content.image.interfaces.IImage)
@zope.interface.implementer(zeit.content.image.interfaces.IThumbnailFolder)
def thumbnail_folder_factory(context):
name = u'thumbnails'
folder = context.__parent__
if name not in folder:
folder[name] = zeit.cms.repository.folder.Folder()
return folder[name]
| |
# Copyright 2014 SolidBuilds.com. All rights reserved
#
# Authors: Ling Thio <ling.thio@gmail.com>
from flask import redirect, render_template, render_template_string, Blueprint, jsonify
from flask import request, url_for
from flask_user import current_user, login_required, roles_accepted, roles_required
from app import app, db
from app.core.models import UserProfileForm, FeatureRequest, User, Product, UsersRoles, Role
from flask_wtf import csrf
from datetime import datetime
from sqlalchemy import func
core_blueprint = Blueprint('core', __name__, url_prefix='/')
# The Home page is accessible to anyone
@core_blueprint.route('')
def home_page():
if not current_user.is_authenticated:
return render_template('core/guest_page.html')
else:
return render_template('core/home_page.html')
# The User page is accessible to authenticated users (users that have logged in)
@core_blueprint.route('user')
@login_required # Limits access to authenticated users
def user_page():
return render_template('core/user_page.html')
# The Admin page is accessible to users with the 'admin' role
@core_blueprint.route('admin')
@roles_accepted('admin') # Limits access to users with the 'admin' role
def admin_page():
return render_template('core/admin_page.html')
@core_blueprint.route('user/profile', methods=['GET', 'POST'])
@login_required
def user_profile_page():
# Initialize form
form = UserProfileForm(request.form, current_user)
# Process valid POST
if request.method == 'POST' and form.validate():
# Copy form fields to user_profile fields
form.populate_obj(current_user)
# Save user_profile
db.session.commit()
# Redirect to home page
return redirect(url_for('core.home_page'))
# Process GET or invalid POST
return render_template('core/user_profile_page.html',
form=form)
# Register blueprint
app.register_blueprint(core_blueprint)
# Feature Route
@app.route('/features')
@login_required
def feature_request():
# Test if user is IWS user or client
if current_user.roles[0].name == 'client':
features = FeatureRequest.query.filter(FeatureRequest.user_id == current_user.id)
return render_template('core/feature_requests.html',
features=features)
else:
features = FeatureRequest.query.all()
return render_template('core/feature_requests.html',
features=features)
@app.route('/new_feature', methods=['POST'])
@login_required
def new_feature():
if current_user.roles[0].name == 'client':
#If feature added by client
#get the count of FR gp : global priority
gp = db.session.query(func.count(FeatureRequest.id)).scalar()
#by default the priority will be in the end
gp = gp +1
#get the count of FR cp : client priority
cp = db.session.query(func.count(FeatureRequest.id)).filter(FeatureRequest.user_id == current_user.id).scalar()
#by default the priority will be in the end
cp = cp +1
date_object = datetime.strptime(request.json['target_date'], '%m-%d-%Y')
feature = FeatureRequest(title=request.json['title'],description=request.json['description'],
target_date=date_object,ticket_url=request.json['ticket_url'],
user_id=current_user.id,product_id=request.json['product_id'],
global_priority=gp,client_priority=cp)
db.session.add(feature)
db.session.commit()
#id = cur.lastrowid
return jsonify({"title": request.json['title'],
"description": request.json['description'],
"client_priority": cp,
"global_priority": gp,
"target_date": request.json['target_date'],
"ticket_url": request.json['ticket_url'],
"client_id": request.json['client_id'],
"id": feature.id,
"product_id": request.json['product_id']})
else:
#If feature added by IWS USER
#get the count of FR gp : global priority
gp = db.session.query(func.count(FeatureRequest.id)).scalar()
#by default the priority will be in the end
gp = gp +1
#get the count of FR cp : client priority
cp = db.session.query(func.count(FeatureRequest.id)).filter(FeatureRequest.user_id == request.json['client_id']).scalar()
cp = cp + 1
date_object = datetime.strptime(request.json['target_date'], '%m-%d-%Y')
feature = FeatureRequest(title=request.json['title'],description=request.json['description'],
target_date=date_object,ticket_url=request.json['ticket_url'],
user_id=request.json['client_id'],product_id=request.json['product_id'],
global_priority=gp,client_priority=cp)
db.session.add(feature)
db.session.commit()
#id = cur.lastrowid
return jsonify({"title": request.json['title'],
"description": request.json['description'],
"client_priority": cp,
"global_priority": gp,
"target_date": request.json['target_date'],
"ticket_url": request.json['ticket_url'],
"client_id": request.json['client_id'],
"id": feature.id,
"product_id": request.json['product_id']})
@app.route('/save_priorities', methods=['POST'])
@login_required
def save_priorities():
if current_user.roles[0].name == 'client':
id_feature = request.json['id']
client_priority = request.json['priority']
global_pri = request.json['global_priority']
fr = FeatureRequest.query.filter_by(id=id_feature).first()
fr.global_priority = global_pri
fr.client_priority = client_priority
db.session.commit()
return jsonify(reponse=dict(result="ok"))
else:
id_feature = request.json['id']
client_priority = request.json['priority']
global_pri = request.json['global_priority']
fr = FeatureRequest.query.filter_by(id=id_feature).first()
fr.global_priority = global_pri
fr.client_priority = client_priority
db.session.commit()
return jsonify(reponse=dict(result="ok"))
@app.route('/update_feature', methods=['POST'])
@login_required
def update_feature():
id_feature = request.json['id']
date_object = datetime.strptime(request.json['target_date'], '%m-%d-%Y')
fr = FeatureRequest.query.filter_by(id=id_feature).first()
if fr:
fr.title = request.json['title']
fr.ticket_url = request.json['ticket_url']
fr.target_date = date_object
fr.product_id = request.json['product_id']
#db.session.query(FeatureRequest).filter_by(id = id_feature).update({'global_priority': int(priority)})
db.session.commit()
return jsonify(reponse=dict(result="ok"))
else:
return jsonify(reponse=dict(result="error"))
@app.route('/delete_feature', methods=['POST'])
@login_required
def delete_feature():
id_feature = request.json['id']
fr = FeatureRequest.query.filter_by(id=id_feature).first()
if fr:
FeatureRequest.query.filter_by(id=id_feature).delete()
db.session.commit()
return jsonify(reponse=dict(result="ok"))
else:
return jsonify(reponse=dict(result="error"))
@app.route('/features_list')
@login_required
def features_list():
if current_user.roles[0].name == 'client':
cur = FeatureRequest.query.filter(FeatureRequest.user_id == current_user.id).order_by(FeatureRequest.client_priority)
entries = [dict(id=row.id,title=row.title,
target_date=row.target_date,description=row.description,ticket_url=row.ticket_url, client_priority=row.client_priority,
global_priority=row.global_priority, client_id = row.user_id, product_id=row.product_id) for row in cur]
return jsonify(features=entries)
else:
cur = FeatureRequest.query.order_by(FeatureRequest.global_priority).all()
entries = [dict(id=row.id,title=row.title,
target_date=row.target_date,description=row.description,ticket_url=row.ticket_url, client_priority=row.client_priority,
global_priority=row.global_priority, client_id = row.user_id, product_id=row.product_id) for row in cur]
return jsonify(features=entries)
# Client Route
@app.route('/clients')
@roles_required('admin')
@login_required
def clients():
clients = User.query.join(User.roles).filter(Role.name == 'client').group_by(User).all()
return render_template('core/clients.html',
clients=clients)
@app.route('/clients_list')
@login_required
def clients_list():
cur = User.query.join(User.roles).filter(Role.name == 'client').group_by(User).order_by(User.id).all()
entries = [dict(company_name=row.company_name,email=row.email,
description="",id=row.id,last_name=row.last_name, first_name=row.first_name,priority=row.priority) for row in cur]
return jsonify(clients=entries)
@app.route('/new_client', methods=['POST'])
@roles_required('admin')
@login_required
def new_client():
email = request.json['email']
user = User.query.filter(User.email==email).first()
if not user:
user = User(email=email, first_name=request.json['first_name'], last_name=request.json['last_name'],
password = app.user_manager.hash_password(request.json['password']),
company_name=request.json['company_name'], active=True,confirmed_at=datetime.utcnow())
role = Role.query.filter(Role.name == 'client').first()
user.roles.append(role)
db.session.add(user)
db.session.commit()
return jsonify({"email": request.json['email'],
"first_name": request.json['first_name'],
"result": "OK",
"last_name": request.json['last_name'],
"company_name": request.json['company_name'],
})
else:
return jsonify({"result":"Error","msg":"email exist"})
# Product Route
@app.route('/products')
@roles_required('admin')
@login_required
def products():
products = Product.query.all()
return render_template('core/products.html',
products=products)
@app.route('/products_list')
@login_required
def products_list():
cur = Product.query.all()
entries = [dict(id=row.id,product_name=row.product_name, description=row.description) for row in cur]
return jsonify(products=entries)
@app.route('/new_product', methods=['POST'])
@roles_required('admin')
@login_required
def new_product():
product = Product.query.filter(Product.product_name==request.json['product_name']).first()
if not product:
product = Product(product_name=request.json['product_name'], description=request.json['description'])
db.session.add(product)
db.session.commit()
return jsonify({"product_name": request.json['product_name'],
"description": request.json['description'],
"id": product.id,
"result": "OK"
})
else:
return jsonify({"result":"Error","msg":"product name exist"})
# User Route
@app.route('/users')
@roles_required('admin')
@login_required
def users():
users = User.query.join(User.roles).filter(Role.name == 'user').group_by(User).all()
return render_template('core/users.html',
users=users)
@app.route('/users_list')
@roles_required('admin')
@login_required
def users_list():
cur = User.query.join(User.roles).filter(Role.name == 'user').group_by(User).order_by(User.id).all()
entries = [dict(email=row.email, id=row.id,last_name=row.last_name, first_name=row.first_name) for row in cur]
return jsonify(users=entries)
@app.route('/new_user', methods=['POST'])
@roles_required('admin')
@login_required
def new_user():
email = request.json['email']
user = User.query.filter(User.email==email).first()
if not user:
user = User(email=email, first_name=request.json['first_name'], last_name=request.json['last_name'],
password = app.user_manager.hash_password(request.json['password']),
active=True,confirmed_at=datetime.utcnow())
role = Role.query.filter(Role.name == 'user').first()
user.roles.append(role)
db.session.add(user)
db.session.commit()
return jsonify({"email": request.json['email'],
"first_name": request.json['first_name'],
"result": "OK",
"last_name": request.json['last_name'],
"id": user.id
})
else:
return jsonify({"result":"Error","msg":"email exist"})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.