gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# -*- mode: python; coding: utf-8 -*-
# Copyright 2018 the HERA Collaboration
# Licensed under the 2-clause BSD license.
"""
Keep track of geo-located stations.
Top modules are generally called by external (to CM) scripts.
Bottom part is the class that does the work.
"""
import copy
import warnings
from sqlalchemy import func
from pyuvdata import utils as uvutils
from numpy import radians
from . import mc, cm_partconnect, cm_utils, geo_location, cm_sysdef
def cofa(session=None):
"""
Return location class of current COFA.
Parameters
----------
session: db session to use
"""
h = Handling(session)
located = h.cofa()
h.close()
return located
def get_location(location_names, query_date='now', session=None):
"""
Get a GeoLocation object with lon/lat attributes for a location name.
This is the wrapper for other modules outside cm to call.
Parameters
----------
location_names : list of str
location names, either a station (geo_location key) or an antenna
query_date : str
Date for query. Anything that `get_astropytime` can translate.
session : Session object
Session to use.
Returns
-------
list of GeoLocation objects
objects corresponding to location_names, wtih lon/lat attributes added.
"""
query_date = cm_utils.get_astropytime(query_date)
h = Handling(session)
located = h.get_location(location_names, query_date)
h.close()
return located
def show_it_now(fignm=None): # pragma: no cover
"""
Show plot.
Used in scripts to actually make plot (as opposed to within python). Seems to be needed...
Parameters
----------
fignm: string/int for figure
"""
import matplotlib.pyplot as plt
if fignm is not None:
plt.figure(fignm)
plt.show()
class Handling:
"""
Class to allow various manipulations of geo_locations and their properties etc.
Parameters
----------
session : Session object
session on current database. If session is None, a new session
on the default database is created and used.
"""
coord = {'E': 'easting', 'N': 'northing', 'Z': 'elevation'}
hera_zone = [34, 'J']
lat_corr = {'J': 10000000}
def __init__(self, session=None, testing=False):
if session is None: # pragma: no cover
db = mc.connect_to_mc_db(None)
self.session = db.sessionmaker()
else:
self.session = session
self.get_station_types()
self.testing = testing
self.axes_set = False
self.fp_out = None
self.graph = False
self.station_types_plotted = False
def close(self):
"""Close the session."""
self.session.close()
def cofa(self):
"""
Get the current center of array.
Returns
-------
GeoLocation object
GeoLocation object for the center of the array.
"""
current_cofa = self.station_types['cofa']['Stations']
located = self.get_location(current_cofa, 'now')
if len(located) > 1: # pragma: no cover
s = "{} has multiple cofa values.".format(str(current_cofa))
warnings.warn(s)
return located
def get_station_types(self):
"""
Add a dictionary of sub-arrays (station_types) to the object.
[station_type_name]{'Prefix', 'Description':'...', 'plot_marker':'...', 'stations':[]}
"""
self.station_types = {}
for sta in self.session.query(geo_location.StationType):
self.station_types[sta.station_type_name.lower()] = {
'Prefix': sta.prefix.upper(),
'Description': sta.description,
'Marker': sta.plot_marker,
'Stations': set()}
for loc in self.session.query(geo_location.GeoLocation):
self.station_types[loc.station_type_name]['Stations'].add(loc.station_name)
expected_prefix = self.station_types[loc.station_type_name]['Prefix'].upper()
actual_prefix = loc.station_name[:len(expected_prefix)].upper()
if expected_prefix != actual_prefix: # pragma: no cover
s = ("Prefixes don't match: expected {} but got {} for {}"
.format(expected_prefix, actual_prefix, loc.station_name))
warnings.warn(s)
def set_graph(self, graph_it):
"""
Set the graph attribute.
Parameters
----------
graph_it : bool
Flag indicating whether a graph should be made.
"""
self.graph = graph_it
def start_file(self, fname):
"""
Open file for writing.
Parameters
----------
fname : str
File name to write to.
"""
import os.path as op
self.file_type = fname.split('.')[-1]
write_header = False
if op.isfile(fname): # pragma: no cover
print("{} exists so appending to it".format(fname))
else:
write_header = True
print("Writing to new {}".format(fname))
if not self.testing: # pragma: no cover
self.fp_out = open(fname, 'a')
if write_header:
self.fp_out.write("{}\n".format(self._loc_line('header')))
def is_in_database(self, station_name, db_name='geo_location'):
"""
Check to see if a station_name is in the specified database table.
Parameters
----------
station_name : str
Name of station.
db_name : str
Name of database table
Returns
-------
bool
True if station_name is present in specified table, False otherwise.
"""
if db_name == 'geo_location':
station = self.session.query(geo_location.GeoLocation).filter(
func.upper(geo_location.GeoLocation.station_name) == station_name.upper())
elif db_name == 'connections':
station = self.session.query(cm_partconnect.Connections).filter(
func.upper(cm_partconnect.Connections.upstream_part) == station_name.upper())
else:
raise ValueError('db not found.')
if station.count() > 0:
station_present = True
else:
station_present = False
return station_present
def find_antenna_at_station(self, station, query_date):
"""
Get antenna details for a station.
Parameters
----------
station : str
station name
query_date : string, int, Time, datetime
Date to get information for, anything that can be parsed by `get_astropytime`.
Returns
-------
tuple of str
(antenna_name, antenna_revision) for the antenna
that was active at the date query_date, or None if no antenna was active
at the station. Raises a warning if the database lists multiple active
connections at the station at query_date.
"""
query_date = cm_utils.get_astropytime(query_date)
connected_antenna = self.session.query(cm_partconnect.Connections).filter(
(func.upper(cm_partconnect.Connections.upstream_part) == station.upper())
& (cm_partconnect.Connections.start_gpstime <= query_date.gps))
antenna_connected = []
for conn in connected_antenna:
if conn.stop_gpstime is None or query_date.gps <= conn.stop_gpstime:
antenna_connected.append(copy.copy(conn))
if len(antenna_connected) == 0:
return None, None
elif len(antenna_connected) > 1:
warning_string = 'More than one active connection for {}'.format(
antenna_connected[0].upstream_part)
warnings.warn(warning_string)
return None, None
return antenna_connected[0].downstream_part, antenna_connected[0].down_part_rev
def find_station_of_antenna(self, antenna, query_date):
"""
Get station for an antenna.
Parameters
----------
antenna : float, int, str
antenna number as float, int, or string. If needed, it prepends the 'A'
query_date : string, int, Time, datetime
Date to get information for, anything that can be parsed by `get_astropytime`.
Returns
-------
str
station the antenna is connected to at query date.
"""
query_date = cm_utils.get_astropytime(query_date)
if isinstance(antenna, (int, float)):
antenna = 'A' + str(int(antenna))
elif antenna[0].upper() != 'A':
antenna = 'A' + antenna
connected_antenna = self.session.query(cm_partconnect.Connections).filter(
(func.upper(cm_partconnect.Connections.downstream_part) == antenna.upper())
& (cm_partconnect.Connections.start_gpstime <= query_date.gps))
ctr = 0
for conn in connected_antenna:
if conn.stop_gpstime is None or query_date.gps <= conn.stop_gpstime:
antenna_connected = copy.copy(conn)
ctr += 1
if ctr == 0:
return None
elif ctr > 1:
raise ValueError('More than one active connection between station and antenna')
return antenna_connected.upstream_part
def get_location(self, to_find_list, query_date):
"""
Get GeoLocation objects for a list of station_names.
Parameters
----------
to_find_list : list of str
station names to find
query_date : string, int, Time, datetime
Date to get information for, anything that can be parsed by `get_astropytime`.
Returns
-------
list of GeoLocation objects
GeoLocation objects corresponding to station names.
"""
import cartopy.crs as ccrs
latlon_p = ccrs.Geodetic()
utm_p = ccrs.UTM(self.hera_zone[0])
lat_corr = self.lat_corr[self.hera_zone[1]]
locations = []
self.query_date = cm_utils.get_astropytime(query_date)
for station_name in to_find_list:
for a in self.session.query(geo_location.GeoLocation).filter(
(func.upper(geo_location.GeoLocation.station_name) == station_name.upper())
& (geo_location.GeoLocation.created_gpstime < self.query_date.gps)):
a.gps2Time()
a.desc = self.station_types[a.station_type_name]['Description']
a.lon, a.lat = latlon_p.transform_point(a.easting, a.northing - lat_corr, utm_p)
a.X, a.Y, a.Z = uvutils.XYZ_from_LatLonAlt(radians(a.lat), radians(a.lon),
a.elevation)
locations.append(copy.copy(a))
if self.fp_out is not None and not self.testing: # pragma: no cover
self.fp_out.write('{}\n'.format(self._loc_line(a)))
return locations
def _loc_line(self, loc):
"""
Return a list or str of the given locations, depending if loc is list or not.
Parameters
----------
loc : geo_location class, list of them, or 'header'
List of geo_location class (or single)
fmt : str
Type of format; either 'csv' or not
Return
------
list-of-str or str : a single line containing the data
if loc=='header' it returns the header line
"""
if loc == 'header':
if self.file_type == 'csv':
return "name,easting,northing,longitude,latitude,elevation,X,Y,Z"
else:
return ("name easting northing longitude latitude elevation"
" X Y Z")
is_list = True
if not isinstance(loc, list):
loc = [loc]
is_list = False
ret = []
for a in loc:
if self.file_type == 'csv':
s = '{},{},{},{},{},{},{},{},{}'.format(
a.station_name, a.easting, a.northing, a.lon, a.lat,
a.elevation, a.X, a.Y, a.Z)
else:
s = '{:6s} {:.2f} {:.2f} {:.6f} {:.6f} {:.1f} {:.6f} {:.6f} {:.6f}'.format(
a.station_name, a.easting, a.northing, a.lon, a.lat,
a.elevation, a.X, a.Y, a.Z)
ret.append(s)
if not is_list:
ret = ret[0]
return ret
def print_loc_info(self, loc_list):
"""
Print out location information as returned from get_location.
Parameters
----------
loc_list : list of str
List of location_names to print information for.
"""
if loc_list is None or len(loc_list) == 0:
print("No locations found.")
return
for a in loc_list:
print('station_name: ', a.station_name)
print('\teasting: ', a.easting)
print('\tnorthing: ', a.northing)
print('\tlon/lat: ', a.lon, a.lat)
print('\televation: ', a.elevation)
print('\tX, Y, Z: {}, {}, {}'.format(a.X, a.Y, a.Z))
print('\tstation description ({}): {}'.format(a.station_type_name, a.desc))
print('\tcreated: ', cm_utils.get_time_for_display(a.created_date))
def parse_station_types_to_check(self, sttc):
"""
Parse station strings to list of stations.
Parameters
----------
sttc : str or list of str
Stations to check, can be a list of stations or "all" or "default".
Returns
-------
list of str
List of startions.
"""
self.get_station_types()
if isinstance(sttc, str):
if sttc.lower() == 'all':
return list(self.station_types.keys())
elif sttc.lower() == 'default':
sttc = cm_sysdef.hera_zone_prefixes
else:
sttc = [sttc]
sttypes = set()
for s in sttc:
if s.lower() in self.station_types.keys():
sttypes.add(s.lower())
else:
for k, st in self.station_types.items():
if s.upper() == st['Prefix'][:len(s)].upper():
sttypes.add(k.lower())
return list(sttypes)
def get_ants_installed_since(self, query_date, station_types_to_check='all'):
"""
Get list of antennas installed since query_date.
Parameters
----------
query_date : astropy Time
Date to get limit check for installation.
station_types_to_check : str or list of str
Stations types to limit check.
"""
import cartopy.crs as ccrs
station_types_to_check = self.parse_station_types_to_check(station_types_to_check)
dt = query_date.gps
latlon_p = ccrs.Geodetic()
utm_p = ccrs.UTM(self.hera_zone[0])
lat_corr = self.lat_corr[self.hera_zone[1]]
found_stations = []
for a in self.session.query(geo_location.GeoLocation).filter(
geo_location.GeoLocation.created_gpstime >= dt):
if a.station_type_name.lower() in station_types_to_check:
a.gps2Time()
a.desc = self.station_types[a.station_type_name]['Description']
a.lon, a.lat = latlon_p.transform_point(a.easting, a.northing - lat_corr, utm_p)
a.X, a.Y, a.Z = uvutils.XYZ_from_LatLonAlt(radians(a.lat), radians(a.lon),
a.elevation)
found_stations.append(copy.copy(a))
if self.fp_out is not None and not self.testing: # pragma: no cover
self.fp_out.write('{}\n'.format(self._loc_line(a)))
return found_stations
def get_antenna_label(self, label_to_show, stn, query_date):
"""
Get a label for a station.
Parameters
----------
label_to_show : str
Specify label type, one of ["name", "num", "ser"]
stn : GeoLocation object
station to get label for.
query_date : string, int, Time, datetime
Date to get information for, anything that can be parsed by `get_astropytime`.
Returns
-------
str
station label
"""
if label_to_show == 'name':
return stn.station_name
ant, rev = self.find_antenna_at_station(stn.station_name, query_date)
if ant is None:
return None
if label_to_show == 'num':
return ant.strip('A')
if label_to_show == 'ser':
p = self.session.query(cm_partconnect.Parts).filter(
(cm_partconnect.Parts.hpn == ant)
& (cm_partconnect.Parts.hpn_rev == rev))
if p.count() == 1:
return p.first().manufacturer_number.replace('S/N', '')
else:
return '-'
return None
def plot_stations(self, locations, **kwargs): # pragma: no cover
"""
Plot a list of stations.
Parameters
----------
stations_to_plot_list : list of str
list containing station_names (note: NOT antenna_numbers)
kwargs : dict
arguments for marker_color, marker_shape, marker_size, label, xgraph, ygraph
"""
if not len(locations) or not self.graph or self.testing:
return
displaying_label = bool(kwargs['label'])
if displaying_label:
label_to_show = kwargs['label'].lower()
fig_label = "{} vs {} Antenna Positions".format(kwargs['xgraph'], kwargs['ygraph'])
import matplotlib.pyplot as plt
for a in locations:
pt = {'easting': a.easting, 'northing': a.northing, 'elevation': a.elevation}
X = pt[self.coord[kwargs['xgraph']]]
Y = pt[self.coord[kwargs['ygraph']]]
plt.plot(X, Y, color=kwargs['marker_color'], label=a.station_name,
marker=kwargs['marker_shape'], markersize=kwargs['marker_size'])
if displaying_label:
labeling = self.get_antenna_label(label_to_show, a, self.query_date)
if labeling:
plt.annotate(labeling, xy=(X, Y), xytext=(X + 2, Y))
if not self.axes_set:
self.axes_set = True
if kwargs['xgraph'].upper() != 'Z' and kwargs['ygraph'].upper() != 'Z':
plt.axis('equal')
plt.xlabel(kwargs['xgraph'] + ' [m]')
plt.ylabel(kwargs['ygraph'] + ' [m]')
plt.title(fig_label)
return
def plot_all_stations(self):
"""Plot all stations."""
if not self.graph:
return
import os.path
import numpy
import matplotlib.pyplot as plt
p = numpy.loadtxt(os.path.join(mc.data_path, "HERA_350.txt"), usecols=(1, 2, 3))
if not self.testing: # pragma: no cover
plt.plot(p[:, 0], p[:, 1], marker='o', color='0.8', linestyle='none')
return len(p[:, 0])
def get_active_stations(self, query_date, station_types_to_use, hookup_type=None):
"""
Get active stations.
Parameters
----------
query_date : string, int, Time, datetime
Date to get avtive stations for, anything that can be parsed by `get_astropytime`.
station_types_to_use : str or list of str
Stations to use, can be a list of stations or "all" or "default".
hookup_type : str
hookup_type to use
Returns
-------
list of GeoLocation objects
List of GeoLocation objects for all active stations.
"""
from . import cm_hookup, cm_revisions
query_date = cm_utils.get_astropytime(query_date)
hookup = cm_hookup.Hookup(self.session)
hookup_dict = hookup.get_hookup(
hookup.hookup_list_to_cache, at_date=query_date,
hookup_type=hookup_type)
self.station_types_to_use = self.parse_station_types_to_check(station_types_to_use)
active_stations = []
for st in self.station_types_to_use:
for loc in self.station_types[st]['Stations']:
if cm_revisions.get_full_revision(loc, hookup_dict):
active_stations.append(loc)
if len(active_stations):
print("{}.....".format(12 * '.'))
print("{:12s} {:3d}".format('active', len(active_stations)))
return self.get_location(active_stations, query_date)
def plot_station_types(self, query_date, station_types_to_use, **kwargs):
"""
Plot the various sub-array types.
Parameters
----------
query_date : string, int, Time, datetime
Date to get avtive stations for, anything that can be parsed by `get_astropytime`.
station_types_to_use : str or list of str
station_types or prefixes to plot.
kwargs : dict
matplotlib arguments for marker_color, marker_shape, marker_size, label, xgraph, ygraph
"""
if self.station_types_plotted:
return
self.station_types_plotted = True
self.axes_set = False
station_types_to_use = self.parse_station_types_to_check(station_types_to_use)
total_plotted = 0
for st in sorted(station_types_to_use):
kwargs['marker_color'] = self.station_types[st]['Marker'][0]
kwargs['marker_shape'] = self.station_types[st]['Marker'][1]
kwargs['marker_size'] = 5
stations_to_plot = self.get_location(self.station_types[st]['Stations'], query_date)
self.plot_stations(stations_to_plot, **kwargs)
if len(stations_to_plot):
print("{:12s} {:3d}".format(st, len(stations_to_plot)))
total_plotted += len(stations_to_plot)
print("{:12s} ---".format(' '))
print("{:12s} {:3d}".format('Total', total_plotted))
| |
"""mrecords
Defines the equivalent of recarrays for maskedarray.
Masked arrays already support named fields, but masking works only by records.
By comparison, mrecarrays support masking individual fields.
:author: Pierre Gerard-Marchant
"""
#!!!: * We should make sure that no field is called '_mask','mask','_fieldmask',
#!!!: or whatever restricted keywords.
#!!!: An idea would be to no bother in the first place, and then rename the
#!!!: invalid fields with a trailing underscore...
#!!!: Maybe we could just overload the parser function ?
__author__ = "Pierre GF Gerard-Marchant"
import sys
import numpy as np
from numpy import bool_, dtype, \
ndarray, recarray, array as narray
import numpy.core.numerictypes as ntypes
from numpy.core.records import fromarrays as recfromarrays, \
fromrecords as recfromrecords
_byteorderconv = np.core.records._byteorderconv
_typestr = ntypes._typestr
import numpy.ma as ma
from numpy.ma import MAError, MaskedArray, masked, nomask, masked_array,\
getdata, getmaskarray, filled
_check_fill_value = ma.core._check_fill_value
import warnings
__all__ = ['MaskedRecords','mrecarray',
'fromarrays','fromrecords','fromtextfile','addfield',
]
reserved_fields = ['_data','_mask','_fieldmask', 'dtype']
def _getformats(data):
"Returns the formats of each array of arraylist as a comma-separated string."
if hasattr(data,'dtype'):
return ",".join([desc[1] for desc in data.dtype.descr])
formats = ''
for obj in data:
obj = np.asarray(obj)
formats += _typestr[obj.dtype.type]
if issubclass(obj.dtype.type, ntypes.flexible):
formats += `obj.itemsize`
formats += ','
return formats[:-1]
def _checknames(descr, names=None):
"""Checks that the field names of the descriptor ``descr`` are not some
reserved keywords. If this is the case, a default 'f%i' is substituted.
If the argument `names` is not None, updates the field names to valid names.
"""
ndescr = len(descr)
default_names = ['f%i' % i for i in range(ndescr)]
if names is None:
new_names = default_names
else:
if isinstance(names, (tuple, list)):
new_names = names
elif isinstance(names, str):
new_names = names.split(',')
else:
raise NameError("illegal input names %s" % `names`)
nnames = len(new_names)
if nnames < ndescr:
new_names += default_names[nnames:]
ndescr = []
for (n, d, t) in zip(new_names, default_names, descr.descr):
if n in reserved_fields:
if t[0] in reserved_fields:
ndescr.append((d,t[1]))
else:
ndescr.append(t)
else:
ndescr.append((n,t[1]))
return np.dtype(ndescr)
def _get_fieldmask(self):
mdescr = [(n,'|b1') for n in self.dtype.names]
fdmask = np.empty(self.shape, dtype=mdescr)
fdmask.flat = tuple([False]*len(mdescr))
return fdmask
class MaskedRecords(MaskedArray, object):
"""
*IVariables*:
_data : {recarray}
Underlying data, as a record array.
_mask : {boolean array}
Mask of the records. A record is masked when all its fields are masked.
_fieldmask : {boolean recarray}
Record array of booleans, setting the mask of each individual field of each record.
_fill_value : {record}
Filling values for each field.
"""
_defaultfieldmask = nomask
_defaulthardmask = False
#............................................
def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None,
formats=None, names=None, titles=None,
byteorder=None, aligned=False,
mask=nomask, hard_mask=False, fill_value=None, keep_mask=True,
copy=False,
**options):
#
self = recarray.__new__(cls, shape, dtype=dtype, buf=buf, offset=offset,
strides=strides, formats=formats, names=names,
titles=titles, byteorder=byteorder,
aligned=aligned,)
#
mdtype = ma.make_mask_descr(self.dtype)
if mask is nomask or not np.size(mask):
if not keep_mask:
self._mask = tuple([False]*len(mdtype))
else:
mask = np.array(mask, copy=copy)
if mask.shape != self.shape:
(nd, nm) = (self.size, mask.size)
if nm == 1:
mask = np.resize(mask, self.shape)
elif nm == nd:
mask = np.reshape(mask, self.shape)
else:
msg = "Mask and data not compatible: data size is %i, "+\
"mask size is %i."
raise MAError(msg % (nd, nm))
copy = True
if not keep_mask:
self.__setmask__(mask)
self._sharedmask = True
else:
if mask.dtype == mdtype:
_mask = mask
else:
_mask = np.array([tuple([m]*len(mdtype)) for m in mask],
dtype=mdtype)
self._mask = _mask
return self
#......................................................
def __array_finalize__(self,obj):
# Make sure we have a _fieldmask by default ..
_mask = getattr(obj, '_mask', None)
if _mask is None:
objmask = getattr(obj, '_mask', nomask)
_dtype = ndarray.__getattribute__(self,'dtype')
if objmask is nomask:
_mask = ma.make_mask_none(self.shape, dtype=_dtype)
else:
mdescr = ma.make_mask_descr(_dtype)
_mask = narray([tuple([m]*len(mdescr)) for m in objmask],
dtype=mdescr).view(recarray)
# Update some of the attributes
_dict = self.__dict__
_dict.update(_mask=_mask, _fieldmask=_mask)
self._update_from(obj)
if _dict['_baseclass'] == ndarray:
_dict['_baseclass'] = recarray
return
def _getdata(self):
"Returns the data as a recarray."
return ndarray.view(self,recarray)
_data = property(fget=_getdata)
def __len__(self):
"Returns the length"
# We have more than one record
if self.ndim:
return len(self._data)
# We have only one record: return the nb of fields
return len(self.dtype)
def __getattribute__(self, attr):
try:
return object.__getattribute__(self, attr)
except AttributeError: # attr must be a fieldname
pass
fielddict = ndarray.__getattribute__(self,'dtype').fields
try:
res = fielddict[attr][:2]
except (TypeError, KeyError):
raise AttributeError, "record array has no attribute %s" % attr
# So far, so good...
_localdict = ndarray.__getattribute__(self,'__dict__')
_data = ndarray.view(self, _localdict['_baseclass'])
obj = _data.getfield(*res)
if obj.dtype.fields:
raise NotImplementedError("MaskedRecords is currently limited to"\
"simple records...")
obj = obj.view(MaskedArray)
obj._baseclass = ndarray
obj._isfield = True
# Get some special attributes
_fill_value = _localdict.get('_fill_value', None)
_mask = _localdict.get('_mask', None)
# Reset the object's mask
if _mask is not None:
try:
obj._mask = _mask[attr]
except IndexError:
# Couldn't find a mask: use the default (nomask)
pass
# Reset the field values
if _fill_value is not None:
try:
obj._fill_value = _fill_value[attr]
except ValueError:
obj._fill_value = None
return obj
def __setattr__(self, attr, val):
"Sets the attribute attr to the value val."
# Should we call __setmask__ first ?
if attr in ['_mask','mask','_fieldmask','fieldmask']:
self.__setmask__(val)
return
# Create a shortcut (so that we don't have to call getattr all the time)
_localdict = object.__getattribute__(self, '__dict__')
# Check whether we're creating a new field
newattr = attr not in _localdict
try:
# Is attr a generic attribute ?
ret = object.__setattr__(self, attr, val)
except:
# Not a generic attribute: exit if it's not a valid field
fielddict = ndarray.__getattribute__(self,'dtype').fields or {}
optinfo = ndarray.__getattribute__(self,'_optinfo') or {}
if not (attr in fielddict or attr in optinfo):
exctype, value = sys.exc_info()[:2]
raise exctype, value
else:
# Get the list of names ......
fielddict = ndarray.__getattribute__(self,'dtype').fields or {}
# Check the attribute
if attr not in fielddict:
return ret
if newattr: # We just added this one
try: # or this setattr worked on an internal
# attribute.
object.__delattr__(self, attr)
except:
return ret
# Let's try to set the field
try:
res = fielddict[attr][:2]
except (TypeError,KeyError):
raise AttributeError, "record array has no attribute %s" % attr
#
if val is masked:
_fill_value = _localdict['_fill_value']
if _fill_value is not None:
dval = _localdict['_fill_value'][attr]
else:
dval = val
mval = True
else:
dval = filled(val)
mval = getmaskarray(val)
obj = ndarray.__getattribute__(self,'_data').setfield(dval, *res)
_localdict['_mask'].__setitem__(attr, mval)
return obj
def __getitem__(self, indx):
"""Returns all the fields sharing the same fieldname base.
The fieldname base is either `_data` or `_mask`."""
_localdict = self.__dict__
_mask = ndarray.__getattribute__(self,'_mask')
_data = ndarray.view(self, _localdict['_baseclass'])
# We want a field ........
if isinstance(indx, basestring):
#!!!: Make sure _sharedmask is True to propagate back to _fieldmask
#!!!: Don't use _set_mask, there are some copies being made...
#!!!: ...that break propagation
#!!!: Don't force the mask to nomask, that wrecks easy masking
obj = _data[indx].view(MaskedArray)
obj._mask = _mask[indx]
obj._sharedmask = True
fval = _localdict['_fill_value']
if fval is not None:
obj._fill_value = fval[indx]
# Force to masked if the mask is True
if not obj.ndim and obj._mask:
return masked
return obj
# We want some elements ..
# First, the data ........
obj = np.array(_data[indx], copy=False).view(mrecarray)
obj._mask = np.array(_mask[indx], copy=False).view(recarray)
return obj
#....
def __setitem__(self, indx, value):
"Sets the given record to value."
MaskedArray.__setitem__(self, indx, value)
if isinstance(indx, basestring):
self._mask[indx] = ma.getmaskarray(value)
def __str__(self):
"Calculates the string representation."
if self.size > 1:
mstr = ["(%s)" % ",".join([str(i) for i in s])
for s in zip(*[getattr(self,f) for f in self.dtype.names])]
return "[%s]" % ", ".join(mstr)
else:
mstr = ["%s" % ",".join([str(i) for i in s])
for s in zip([getattr(self,f) for f in self.dtype.names])]
return "(%s)" % ", ".join(mstr)
#
def __repr__(self):
"Calculates the repr representation."
_names = self.dtype.names
fmt = "%%%is : %%s" % (max([len(n) for n in _names])+4,)
reprstr = [fmt % (f,getattr(self,f)) for f in self.dtype.names]
reprstr.insert(0,'masked_records(')
reprstr.extend([fmt % (' fill_value', self.fill_value),
' )'])
return str("\n".join(reprstr))
#......................................................
def view(self, obj):
"""Returns a view of the mrecarray."""
try:
if issubclass(obj, ndarray):
return ndarray.view(self, obj)
except TypeError:
pass
dtype_ = np.dtype(obj)
if dtype_.fields is None:
return self.__array__().view(dtype_)
return ndarray.view(self, obj)
def harden_mask(self):
"Forces the mask to hard"
self._hardmask = True
def soften_mask(self):
"Forces the mask to soft"
self._hardmask = False
def copy(self):
"""Returns a copy of the masked record."""
_localdict = self.__dict__
copied = self._data.copy().view(type(self))
copied._fieldmask = self._fieldmask.copy()
return copied
def tolist(self, fill_value=None):
"""Copy the data portion of the array to a hierarchical python
list and returns that list.
Data items are converted to the nearest compatible Python
type. Masked values are converted to fill_value. If
fill_value is None, the corresponding entries in the output
list will be ``None``.
"""
if fill_value is not None:
return self.filled(fill_value).tolist()
result = narray(self.filled().tolist(), dtype=object)
mask = narray(self._fieldmask.tolist())
result[mask] = None
return result.tolist()
#--------------------------------------------
# Pickling
def __getstate__(self):
"""Return the internal state of the masked array, for pickling purposes.
"""
state = (1,
self.shape,
self.dtype,
self.flags.fnc,
self._data.tostring(),
self._fieldmask.tostring(),
self._fill_value,
)
return state
#
def __setstate__(self, state):
"""Restore the internal state of the masked array, for pickling purposes.
``state`` is typically the output of the ``__getstate__`` output, and is a
5-tuple:
- class name
- a tuple giving the shape of the data
- a typecode for the data
- a binary string for the data
- a binary string for the mask.
"""
(ver, shp, typ, isf, raw, msk, flv) = state
ndarray.__setstate__(self, (shp, typ, isf, raw))
mdtype = dtype([(k,bool_) for (k,_) in self.dtype.descr])
self.__dict__['_fieldmask'].__setstate__((shp, mdtype, isf, msk))
self.fill_value = flv
#
def __reduce__(self):
"""Return a 3-tuple for pickling a MaskedArray.
"""
return (_mrreconstruct,
(self.__class__, self._baseclass, (0,), 'b', ),
self.__getstate__())
def _mrreconstruct(subtype, baseclass, baseshape, basetype,):
"""Internal function that builds a new MaskedArray from the
information stored in a pickle.
"""
_data = ndarray.__new__(baseclass, baseshape, basetype).view(subtype)
# _data._mask = ndarray.__new__(ndarray, baseshape, 'b1')
# return _data
_mask = ndarray.__new__(ndarray, baseshape, 'b1')
return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,)
mrecarray = MaskedRecords
#####---------------------------------------------------------------------------
#---- --- Constructors ---
#####---------------------------------------------------------------------------
def fromarrays(arraylist, dtype=None, shape=None, formats=None,
names=None, titles=None, aligned=False, byteorder=None,
fill_value=None):
"""Creates a mrecarray from a (flat) list of masked arrays.
Parameters
----------
arraylist : sequence
A list of (masked) arrays. Each element of the sequence is first converted
to a masked array if needed. If a 2D array is passed as argument, it is
processed line by line
dtype : {None, dtype}, optional
Data type descriptor.
shape : {None, integer}, optional
Number of records. If None, shape is defined from the shape of the
first array in the list.
formats : {None, sequence}, optional
Sequence of formats for each individual field. If None, the formats will
be autodetected by inspecting the fields and selecting the highest dtype
possible.
names : {None, sequence}, optional
Sequence of the names of each field.
fill_value : {None, sequence}, optional
Sequence of data to be used as filling values.
Notes
-----
Lists of tuples should be preferred over lists of lists for faster processing.
"""
datalist = [getdata(x) for x in arraylist]
masklist = [np.atleast_1d(getmaskarray(x)) for x in arraylist]
_array = recfromarrays(datalist,
dtype=dtype, shape=shape, formats=formats,
names=names, titles=titles, aligned=aligned,
byteorder=byteorder).view(mrecarray)
_array._mask.flat = zip(*masklist)
if fill_value is not None:
_array.fill_value = fill_value
return _array
#..............................................................................
def fromrecords(reclist, dtype=None, shape=None, formats=None, names=None,
titles=None, aligned=False, byteorder=None,
fill_value=None, mask=nomask):
"""Creates a MaskedRecords from a list of records.
Parameters
----------
reclist : sequence
A list of records. Each element of the sequence is first converted
to a masked array if needed. If a 2D array is passed as argument, it is
processed line by line
dtype : {None, dtype}, optional
Data type descriptor.
shape : {None,int}, optional
Number of records. If None, ``shape`` is defined from the shape of the
first array in the list.
formats : {None, sequence}, optional
Sequence of formats for each individual field. If None, the formats will
be autodetected by inspecting the fields and selecting the highest dtype
possible.
names : {None, sequence}, optional
Sequence of the names of each field.
fill_value : {None, sequence}, optional
Sequence of data to be used as filling values.
mask : {nomask, sequence}, optional.
External mask to apply on the data.
Notes
-----
Lists of tuples should be preferred over lists of lists for faster processing.
"""
# Grab the initial _fieldmask, if needed:
_fieldmask = getattr(reclist, '_fieldmask', None)
# Get the list of records.....
try:
nfields = len(reclist[0])
except TypeError:
nfields = len(reclist[0].dtype)
if isinstance(reclist, ndarray):
# Make sure we don't have some hidden mask
if isinstance(reclist,MaskedArray):
reclist = reclist.filled().view(ndarray)
# Grab the initial dtype, just in case
if dtype is None:
dtype = reclist.dtype
reclist = reclist.tolist()
mrec = recfromrecords(reclist, dtype=dtype, shape=shape, formats=formats,
names=names, titles=titles,
aligned=aligned, byteorder=byteorder).view(mrecarray)
# Set the fill_value if needed
if fill_value is not None:
mrec.fill_value = fill_value
# Now, let's deal w/ the mask
if mask is not nomask:
mask = np.array(mask, copy=False)
maskrecordlength = len(mask.dtype)
if maskrecordlength:
mrec._fieldmask.flat = mask
elif len(mask.shape) == 2:
mrec._fieldmask.flat = [tuple(m) for m in mask]
else:
mrec._mask = mask
if _fieldmask is not None:
mrec._fieldmask[:] = _fieldmask
return mrec
def _guessvartypes(arr):
"""Tries to guess the dtypes of the str_ ndarray `arr`, by testing element-wise
conversion. Returns a list of dtypes.
The array is first converted to ndarray. If the array is 2D, the test is performed
on the first line. An exception is raised if the file is 3D or more.
"""
vartypes = []
arr = np.asarray(arr)
if len(arr.shape) == 2 :
arr = arr[0]
elif len(arr.shape) > 2:
raise ValueError, "The array should be 2D at most!"
# Start the conversion loop .......
for f in arr:
try:
int(f)
except ValueError:
try:
float(f)
except ValueError:
try:
val = complex(f)
except ValueError:
vartypes.append(arr.dtype)
else:
vartypes.append(np.dtype(complex))
else:
vartypes.append(np.dtype(float))
else:
vartypes.append(np.dtype(int))
return vartypes
def openfile(fname):
"Opens the file handle of file `fname`"
# A file handle ...................
if hasattr(fname, 'readline'):
return fname
# Try to open the file and guess its type
try:
f = open(fname)
except IOError:
raise IOError, "No such file: '%s'" % fname
if f.readline()[:2] != "\\x":
f.seek(0,0)
return f
raise NotImplementedError, "Wow, binary file"
def fromtextfile(fname, delimitor=None, commentchar='#', missingchar='',
varnames=None, vartypes=None):
"""Creates a mrecarray from data stored in the file `filename`.
Parameters
----------
filename : {file name/handle}
Handle of an opened file.
delimitor : {None, string}, optional
Alphanumeric character used to separate columns in the file.
If None, any (group of) white spacestring(s) will be used.
commentchar : {'#', string}, optional
Alphanumeric character used to mark the start of a comment.
missingchar : {'', string}, optional
String indicating missing data, and used to create the masks.
varnames : {None, sequence}, optional
Sequence of the variable names. If None, a list will be created from
the first non empty line of the file.
vartypes : {None, sequence}, optional
Sequence of the variables dtypes. If None, it will be estimated from
the first non-commented line.
Ultra simple: the varnames are in the header, one line"""
# Try to open the file ......................
f = openfile(fname)
# Get the first non-empty line as the varnames
while True:
line = f.readline()
firstline = line[:line.find(commentchar)].strip()
_varnames = firstline.split(delimitor)
if len(_varnames) > 1:
break
if varnames is None:
varnames = _varnames
# Get the data ..............................
_variables = masked_array([line.strip().split(delimitor) for line in f
if line[0] != commentchar and len(line) > 1])
(_, nfields) = _variables.shape
# Try to guess the dtype ....................
if vartypes is None:
vartypes = _guessvartypes(_variables[0])
else:
vartypes = [np.dtype(v) for v in vartypes]
if len(vartypes) != nfields:
msg = "Attempting to %i dtypes for %i fields!"
msg += " Reverting to default."
warnings.warn(msg % (len(vartypes), nfields))
vartypes = _guessvartypes(_variables[0])
# Construct the descriptor ..................
mdescr = [(n,f) for (n,f) in zip(varnames, vartypes)]
mfillv = [ma.default_fill_value(f) for f in vartypes]
# Get the data and the mask .................
# We just need a list of masked_arrays. It's easier to create it like that:
_mask = (_variables.T == missingchar)
_datalist = [masked_array(a,mask=m,dtype=t,fill_value=f)
for (a,m,t,f) in zip(_variables.T, _mask, vartypes, mfillv)]
return fromarrays(_datalist, dtype=mdescr)
#....................................................................
def addfield(mrecord, newfield, newfieldname=None):
"""Adds a new field to the masked record array, using `newfield` as data
and `newfieldname` as name. If `newfieldname` is None, the new field name is
set to 'fi', where `i` is the number of existing fields.
"""
_data = mrecord._data
_mask = mrecord._mask
if newfieldname is None or newfieldname in reserved_fields:
newfieldname = 'f%i' % len(_data.dtype)
newfield = ma.array(newfield)
# Get the new data ............
# Create a new empty recarray
newdtype = np.dtype(_data.dtype.descr + [(newfieldname, newfield.dtype)])
newdata = recarray(_data.shape, newdtype)
# Add the exisintg field
[newdata.setfield(_data.getfield(*f),*f)
for f in _data.dtype.fields.values()]
# Add the new field
newdata.setfield(newfield._data, *newdata.dtype.fields[newfieldname])
newdata = newdata.view(MaskedRecords)
# Get the new mask .............
# Create a new empty recarray
newmdtype = np.dtype([(n,bool_) for n in newdtype.names])
newmask = recarray(_data.shape, newmdtype)
# Add the old masks
[newmask.setfield(_mask.getfield(*f),*f)
for f in _mask.dtype.fields.values()]
# Add the mask of the new field
newmask.setfield(getmaskarray(newfield),
*newmask.dtype.fields[newfieldname])
newdata._fieldmask = newmask
return newdata
| |
from django.db import models
from django.utils import timezone
from abstract import AbstractVote
from core import Makey, Tutorial
# from interactions import UserInteraction, Interaction
from review import ProductReview, ShopReview, SpaceReview
class VoteProductReview(AbstractVote):
review = models.ForeignKey(ProductReview)
class Meta:
app_label = 'catalog'
unique_together = (("user", "review"),)
def __unicode__(self):
if self.vote:
return unicode(self.user) + " upvoted " + unicode(self.review)
else:
return unicode(self.user) + " downvoted " + unicode(self.review)
# def __new_user_interaction(self):
# if self.vote:
# interaction = UserInteraction(
# user=self.user,
# event=Interaction.upvote_product_review,
# event_id=self.id,
# added_time=timezone.now())
# else:
# interaction = UserInteraction(
# user=self.user,
# event=Interaction.downvote_product_review,
# event_id=self.id,
# added_time=timezone.now())
# interaction.save()
def save(self, *args, **kwargs):
same_vote = self.__class__.objects.filter(user=self.user,
review=self.review,)
if same_vote.exists():
previous_vote = same_vote[0]
previous_vote.delete()
if previous_vote.vote:
if self.vote:
self.review.votes -= 1
self.review.save()
else:
self.review.votes -= 2
self.review.save()
super(VoteProductReview, self).save(*args, **kwargs)
# self.__new_user_interaction()
else:
if self.vote:
self.review.votes += 2
self.review.save()
super(VoteProductReview, self).save(*args, **kwargs)
# self.__new_user_interaction()
else:
self.review.votes += 1
self.review.save()
else:
if self.vote:
self.review.votes += 1
self.review.save()
else:
self.review.votes -= 1
self.review.save()
super(VoteProductReview, self).save(*args, **kwargs)
# self.__new_user_interaction()
def delete(self, *args, **kwargs):
if self.vote:
# interaction = UserInteraction.objects.get(
# user=self.user,
# event=Interaction.upvote_product_review,
# event_id=self.id)
self.review.votes -= 1
else:
# interaction = UserInteraction.objects.get(
# user=self.user,
# event=Interaction.downvote_product_review,
# event_id=self.id)
self.review.votes += 1
self.review.save()
# interaction.delete()
super(VoteProductReview, self).delete(*args, **kwargs)
class VoteShopReview(AbstractVote):
review = models.ForeignKey(ShopReview)
class Meta:
app_label = 'catalog'
unique_together = (("user", "review"),)
def __unicode__(self):
if self.vote:
return unicode(self.user) + " upvoted " + unicode(self.review)
else:
return unicode(self.user) + " downvoted " + unicode(self.review)
# def __new_user_interaction(self):
# # if self.vote:
# # interaction = UserInteraction(
# # user=self.user,
# # event=Interaction.upvote_shop_review,
# # event_id=self.id,
# # added_time=timezone.now())
# # else:
# # interaction = UserInteraction(
# # user=self.user,
# # event=Interaction.downvote_shop_review,
# # event_id=self.id,
# # added_time=timezone.now())
# # interaction.save()
def save(self, *args, **kwargs):
same_vote = self.__class__.objects.filter(user=self.user,
review=self.review,)
if same_vote.exists():
previous_vote = same_vote[0]
previous_vote.delete()
if previous_vote.vote:
if self.vote:
self.review.votes -= 1
self.review.save()
else:
self.review.votes -= 2
self.review.save()
super(VoteShopReview, self).save(*args, **kwargs)
# self.__new_user_interaction()
else:
if self.vote:
self.review.votes += 2
self.review.save()
super(VoteShopReview, self).save(*args, **kwargs)
# self.__new_user_interaction()
else:
self.review.votes += 1
self.review.save()
else:
if self.vote:
self.review.votes += 1
self.review.save()
else:
self.review.votes -= 1
self.review.save()
super(VoteShopReview, self).save(*args, **kwargs)
# self.__new_user_interaction()
def delete(self, *args, **kwargs):
if self.vote:
# interaction = UserInteraction.objects.get(
# user=self.user,
# event=Interaction.upvote_shop_review,
# event_id=self.id)
self.review.votes -= 1
else:
# interaction = UserInteraction.objects.get(
# user=self.user,
# event=Interaction.downvote_shop_review,
# event_id=self.id)
self.review.votes += 1
self.review.save()
# interaction.delete()
super(VoteShopReview, self).delete(*args, **kwargs)
class VoteSpaceReview(AbstractVote):
review = models.ForeignKey(SpaceReview)
class Meta:
app_label = 'catalog'
unique_together = (("user", "review"),)
def __unicode__(self):
if self.vote:
return unicode(self.user) + " upvoted " + unicode(self.review)
else:
return unicode(self.user) + " downvoted " + unicode(self.review)
# def __new_user_interaction(self):
# if self.vote:
# interaction = UserInteraction(
# user=self.user,
# event=Interaction.upvote_space_review,
# event_id=self.id,
# added_time=timezone.now())
# else:
# interaction = UserInteraction(
# user=self.user,
# event=Interaction.downvote_space_review,
# event_id=self.id,
# added_time=timezone.now())
# interaction.save()
def save(self, *args, **kwargs):
same_vote = self.__class__.objects.filter(user=self.user,
review=self.review,)
if same_vote.exists():
previous_vote = same_vote[0]
previous_vote.delete()
if previous_vote.vote:
if self.vote:
self.review.votes -= 1
self.review.save()
else:
self.review.votes -= 2
self.review.save()
super(VoteSpaceReview, self).save(*args, **kwargs)
# self.__new_user_interaction()
else:
if self.vote:
self.review.votes += 2
self.review.save()
super(VoteSpaceReview, self).save(*args, **kwargs)
# self.__new_user_interaction()
else:
self.review.votes += 1
self.review.save()
else:
if self.vote:
self.review.votes += 1
self.review.save()
else:
self.review.votes -= 1
self.review.save()
super(VoteSpaceReview, self).save(*args, **kwargs)
# self.__new_user_interaction()
def delete(self, *args, **kwargs):
if self.vote:
# interaction = UserInteraction.objects.get(
# user=self.user,
# event=Interaction.upvote_space_review,
# event_id=self.id)
self.review.votes -= 1
else:
# interaction = UserInteraction.objects.get(
# user=self.user,
# event=Interaction.downvote_space_review,
# event_id=self.id)
self.review.votes += 1
self.review.save()
# interaction.delete()
super(VoteSpaceReview, self).delete(*args, **kwargs)
class VoteMakey(AbstractVote):
makey = models.ForeignKey(Makey)
class Meta:
app_label = 'catalog'
unique_together = (("user", "makey"),)
def __unicode__(self):
if self.vote:
return unicode(self.user) + " upvoted " + unicode(self.makey)
else:
return unicode(self.user) + " downvoted " + unicode(self.makey)
# def __new_user_interaction(self):
# if self.vote:
# interaction = UserInteraction(
# user=self.user,
# event=Interaction.upvote_makey,
# event_id=self.id,
# added_time=timezone.now())
# else:
# interaction = UserInteraction(
# user=self.user,
# event=Interaction.downvote_makey,
# event_id=self.id,
# added_time=timezone.now())
# interaction.save()
def save(self, *args, **kwargs):
same_vote = self.__class__.objects.filter(user=self.user,
makey=self.makey,)
if same_vote.exists():
previous_vote = same_vote[0]
previous_vote.delete()
if previous_vote.vote:
if self.vote:
self.makey.votes -= 1
self.makey.save()
else:
self.makey.votes -= 2
self.makey.save()
super(VoteMakey, self).save(*args, **kwargs)
# self.__new_user_interaction()
else:
if self.vote:
self.makey.votes += 2
self.makey.save()
super(VoteMakey, self).save(*args, **kwargs)
# self.__new_user_interaction()
else:
self.makey.votes += 1
self.makey.save()
else:
if self.vote:
self.makey.votes += 1
self.makey.save()
else:
self.makey.votes -= 1
self.makey.save()
super(VoteMakey, self).save(*args, **kwargs)
# self.__new_user_interaction()
def delete(self, *args, **kwargs):
if self.vote:
# interaction = UserInteraction.objects.get(
# user=self.user,
# event=Interaction.upvote_makey,
# event_id=self.id)
self.makey.votes -= 1
else:
# interaction = UserInteraction.objects.get(
# user=self.user,
# event=Interaction.downvote_makey,
# event_id=self.id)
self.makey.votes += 1
self.makey.save()
# interaction.delete()
super(VoteMakey, self).delete(*args, **kwargs)
class VoteTutorial(AbstractVote):
tutorial = models.ForeignKey(Tutorial)
class Meta:
app_label = 'catalog'
unique_together = (("user", "tutorial"),)
def __unicode__(self):
if self.vote:
return unicode(self.user) + " upvoted " + self.tutorial.url
else:
return unicode(self.user) + " downvoted " + self.tutorial.url
# def __save_user_interaction(self):
# if self.vote:
# # interaction = UserInteraction(
# # user=self.user,
# # event=Interaction.upvote_tutorial,
# # event_id=self.id,
# # added_time=timezone.now())
# else:
# # interaction = UserInteraction(
# # user=self.user,
# # event=Interaction.downvote_tutorial,
# # event_id=self.id,
# # added_time=timezone.now())
# interaction.save()
def save(self, *args, **kwargs):
same_vote = self.__class__.objects.filter(user=self.user,
tutorial=self.tutorial,)
if same_vote.exists():
previous_vote = same_vote[0]
previous_vote.delete()
if previous_vote.vote:
if self.vote:
self.tutorial.votes -= 1
self.tutorial.save()
else:
self.tutorial.votes -= 2
self.tutorial.save()
super(VoteTutorial, self).save(*args, **kwargs)
# self.__save_user_interaction()
else:
if self.vote:
self.tutorial.votes += 2
self.tutorial.save()
super(VoteTutorial, self).save(*args, **kwargs)
# self.__save_user_interaction()
else:
self.tutorial.votes += 1
self.tutorial.save()
else:
if self.vote:
self.tutorial.votes += 1
self.tutorial.save()
else:
self.tutorial.votes -= 1
self.tutorial.save()
super(VoteTutorial, self).save(*args, **kwargs)
# self.__save_user_interaction()
def delete(self, *args, **kwargs):
if self.vote:
# interaction = UserInteraction.objects.get(
# user=self.user,
# event=Interaction.upvote_tutorial,
# event_id=self.id)
self.tutorial.votes -= 1
else:
# interaction = UserInteraction.objects.get(
# user=self.user,
# event=Interaction.downvote_tutorial,
# event_id=self.id)
self.tutorial.votes += 1
self.tutorial.save()
# interaction.delete()
super(VoteTutorial, self).delete(*args, **kwargs)
| |
#!/usr/bin/env python2.7
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Generates the appropriate build.json data for all the end2end tests."""
import yaml
import collections
import hashlib
FixtureOptions = collections.namedtuple(
'FixtureOptions',
'fullstack includes_proxy dns_resolver secure platforms ci_mac tracing')
default_unsecure_fixture_options = FixtureOptions(
True, False, True, False, ['windows', 'linux', 'mac', 'posix'], True, False)
socketpair_unsecure_fixture_options = default_unsecure_fixture_options._replace(fullstack=False, dns_resolver=False)
default_secure_fixture_options = default_unsecure_fixture_options._replace(secure=True)
uds_fixture_options = default_unsecure_fixture_options._replace(dns_resolver=False, platforms=['linux', 'mac', 'posix'])
# maps fixture name to whether it requires the security library
END2END_FIXTURES = {
'h2_compress': default_unsecure_fixture_options,
'h2_census': default_unsecure_fixture_options,
'h2_fakesec': default_secure_fixture_options._replace(ci_mac=False),
'h2_full': default_unsecure_fixture_options,
'h2_full+pipe': default_unsecure_fixture_options._replace(
platforms=['linux']),
'h2_full+trace': default_unsecure_fixture_options._replace(tracing=True),
'h2_oauth2': default_secure_fixture_options._replace(ci_mac=False),
'h2_proxy': default_unsecure_fixture_options._replace(includes_proxy=True,
ci_mac=False),
'h2_sockpair_1byte': socketpair_unsecure_fixture_options._replace(
ci_mac=False),
'h2_sockpair': socketpair_unsecure_fixture_options._replace(ci_mac=False),
'h2_sockpair+trace': socketpair_unsecure_fixture_options._replace(
ci_mac=False, tracing=True),
'h2_ssl': default_secure_fixture_options,
'h2_ssl_proxy': default_secure_fixture_options._replace(includes_proxy=True,
ci_mac=False),
'h2_uds': uds_fixture_options,
}
TestOptions = collections.namedtuple(
'TestOptions', 'needs_fullstack needs_dns proxyable secure traceable cpu_cost')
default_test_options = TestOptions(False, False, True, False, True, 1.0)
connectivity_test_options = default_test_options._replace(needs_fullstack=True)
LOWCPU = 0.1
# maps test names to options
END2END_TESTS = {
'bad_hostname': default_test_options,
'binary_metadata': default_test_options,
'call_creds': default_test_options._replace(secure=True),
'cancel_after_accept': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_after_client_done': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_after_invoke': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_before_invoke': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_in_a_vacuum': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_with_status': default_test_options._replace(cpu_cost=LOWCPU),
'compressed_payload': default_test_options._replace(proxyable=False, cpu_cost=LOWCPU),
'connectivity': connectivity_test_options._replace(proxyable=False, cpu_cost=LOWCPU),
'default_host': default_test_options._replace(needs_fullstack=True,
needs_dns=True),
'disappearing_server': connectivity_test_options,
'empty_batch': default_test_options,
'graceful_server_shutdown': default_test_options._replace(cpu_cost=LOWCPU),
'hpack_size': default_test_options._replace(proxyable=False,
traceable=False),
'high_initial_seqno': default_test_options,
'idempotent_request': default_test_options,
'invoke_large_request': default_test_options,
'large_metadata': default_test_options,
'max_concurrent_streams': default_test_options._replace(proxyable=False),
'max_message_length': default_test_options._replace(cpu_cost=LOWCPU),
'negative_deadline': default_test_options,
'no_op': default_test_options,
'payload': default_test_options._replace(cpu_cost=LOWCPU),
'ping_pong_streaming': default_test_options,
'ping': connectivity_test_options._replace(proxyable=False),
'registered_call': default_test_options,
'request_with_flags': default_test_options._replace(proxyable=False),
'request_with_payload': default_test_options,
'server_finishes_request': default_test_options,
'shutdown_finishes_calls': default_test_options,
'shutdown_finishes_tags': default_test_options,
'simple_delayed_request': connectivity_test_options._replace(cpu_cost=LOWCPU),
'simple_metadata': default_test_options,
'simple_request': default_test_options,
'trailing_metadata': default_test_options,
}
def compatible(f, t):
if END2END_TESTS[t].needs_fullstack:
if not END2END_FIXTURES[f].fullstack:
return False
if END2END_TESTS[t].needs_dns:
if not END2END_FIXTURES[f].dns_resolver:
return False
if not END2END_TESTS[t].proxyable:
if END2END_FIXTURES[f].includes_proxy:
return False
if not END2END_TESTS[t].traceable:
if END2END_FIXTURES[f].tracing:
return False
return True
def without(l, e):
l = l[:]
l.remove(e)
return l
def main():
sec_deps = [
'grpc_test_util',
'grpc',
'gpr_test_util',
'gpr'
]
unsec_deps = [
'grpc_test_util_unsecure',
'grpc_unsecure',
'gpr_test_util',
'gpr'
]
json = {
'#': 'generated with test/end2end/gen_build_json.py',
'libs': [
{
'name': 'end2end_tests',
'build': 'private',
'language': 'c',
'secure': True,
'src': ['test/core/end2end/end2end_tests.c'] + [
'test/core/end2end/tests/%s.c' % t
for t in sorted(END2END_TESTS.keys())],
'headers': ['test/core/end2end/tests/cancel_test_helpers.h',
'test/core/end2end/end2end_tests.h'],
'deps': sec_deps,
'vs_proj_dir': 'test/end2end/tests',
}
] + [
{
'name': 'end2end_nosec_tests',
'build': 'private',
'language': 'c',
'secure': False,
'src': ['test/core/end2end/end2end_nosec_tests.c'] + [
'test/core/end2end/tests/%s.c' % t
for t in sorted(END2END_TESTS.keys())
if not END2END_TESTS[t].secure],
'headers': ['test/core/end2end/tests/cancel_test_helpers.h',
'test/core/end2end/end2end_tests.h'],
'deps': unsec_deps,
'vs_proj_dir': 'test/end2end/tests',
}
],
'targets': [
{
'name': '%s_test' % f,
'build': 'test',
'language': 'c',
'run': False,
'src': ['test/core/end2end/fixtures/%s.c' % f],
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms': (END2END_FIXTURES[f].platforms
if END2END_FIXTURES[f].ci_mac else without(
END2END_FIXTURES[f].platforms, 'mac')),
'deps': [
'end2end_tests'
] + sec_deps,
'vs_proj_dir': 'test/end2end/fixtures',
}
for f in sorted(END2END_FIXTURES.keys())
] + [
{
'name': '%s_nosec_test' % f,
'build': 'test',
'language': 'c',
'secure': 'no',
'src': ['test/core/end2end/fixtures/%s.c' % f],
'run': False,
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms': (END2END_FIXTURES[f].platforms
if END2END_FIXTURES[f].ci_mac else without(
END2END_FIXTURES[f].platforms, 'mac')),
'deps': [
'end2end_nosec_tests'
] + unsec_deps,
'vs_proj_dir': 'test/end2end/fixtures',
}
for f in sorted(END2END_FIXTURES.keys())
if not END2END_FIXTURES[f].secure
],
'tests': [
{
'name': '%s_test' % f,
'args': [t],
'exclude_configs': [],
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms': (END2END_FIXTURES[f].platforms
if END2END_FIXTURES[f].ci_mac else without(
END2END_FIXTURES[f].platforms, 'mac')),
'flaky': False,
'language': 'c',
'cpu_cost': END2END_TESTS[t].cpu_cost,
}
for f in sorted(END2END_FIXTURES.keys())
for t in sorted(END2END_TESTS.keys()) if compatible(f, t)
] + [
{
'name': '%s_nosec_test' % f,
'args': [t],
'exclude_configs': [],
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms': (END2END_FIXTURES[f].platforms
if END2END_FIXTURES[f].ci_mac else without(
END2END_FIXTURES[f].platforms, 'mac')),
'flaky': False,
'language': 'c',
'cpu_cost': END2END_TESTS[t].cpu_cost,
}
for f in sorted(END2END_FIXTURES.keys())
if not END2END_FIXTURES[f].secure
for t in sorted(END2END_TESTS.keys())
if compatible(f, t) and not END2END_TESTS[t].secure
],
'core_end2end_tests': dict(
(t, END2END_TESTS[t].secure)
for t in END2END_TESTS.keys()
)
}
print yaml.dump(json)
if __name__ == '__main__':
main()
| |
#####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
#Regression: CodePlex 15715
#Do not move or remove these two lines
x = dir(dict)
x = dir(dict.fromkeys)
from iptest.assert_util import *
import operator
def test_sanity():
items = 0
d = {'key1': 'value1', 'key2': 'value2'}
for key, value in d.iteritems():
items += 1
Assert((key, value) == ('key1', 'value1') or (key,value) == ('key2', 'value2'))
Assert(items == 2)
Assert(d["key1"] == "value1")
Assert(d["key2"] == "value2")
def getitem(d,k):
d[k]
AssertError(KeyError, getitem, d, "key3")
x = d.get("key3")
Assert(x == None)
Assert(d["key1"] == d.get("key1"))
Assert(d["key2"] == d.get("key2"))
Assert(d.get("key3", "value3") == "value3")
AssertError(KeyError, getitem, d, "key3")
Assert(d.setdefault("key3") == None)
Assert(d.setdefault("key4", "value4") == "value4")
Assert(d["key3"] == None)
Assert(d["key4"] == "value4")
d2= dict(key1 = 'value1', key2 = 'value2')
Assert(d2['key1'] == 'value1')
#--inherit from a dictionary---------------------------------------------------
def test_dict_inherit():
class MyDict(dict):
def __setitem__(self, *args):
super(MyDict, self).__setitem__(*args)
a = MyDict()
a[0] = 'abc'
AreEqual(a[0], 'abc')
a[None] = 3
AreEqual(a[None], 3)
class MyDict(dict):
def __setitem__(self, *args):
dict.__setitem__(self, *args)
a = MyDict()
a[0] = 'abc'
AreEqual(a[0], 'abc')
a[None] = 3
AreEqual(a[None], 3)
#------------------------------------------------------------------------------
# verify function environments, FieldIdDict,
# custom old class dict, and module environments
# all local identical to normal dictionaries
def test_function_environments():
x = {}
class C: pass
AreEqual(dir(x), dir(C.__dict__))
class C:
xx = 'abc'
yy = 'def'
pass
AreEqual(dir(x), dir(C.__dict__))
class C:
x0 = 'abc'
x1 = 'def'
x2 = 'aaa'
x3 = 'aaa'
pass
AreEqual(dir(x), dir(C.__dict__))
class C:
x0 = 'abc'
x1 = 'def'
x2 = 'aaa'
x3 = 'aaa'
x4 = 'abc'
x5 = 'def'
x6 = 'aaa'
x7 = 'aaa'
x0 = 'abc'
pass
AreEqual(dir(x), dir(C.__dict__))
class C:
x0 = 'abc'
x1 = 'def'
x2 = 'aaa'
x3 = 'aaa'
x4 = 'abc'
x5 = 'def'
x6 = 'aaa'
x7 = 'aaa'
x0 = 'abc'
x10 = 'abc'
x11 = 'def'
x12 = 'aaa'
x13 = 'aaa'
x14 = 'abc'
x15 = 'def'
x16 = 'aaa'
x17 = 'aaa'
x10 = 'abc'
pass
AreEqual(dir(x), dir(C.__dict__))
class C:
x0 = 'abc'
x1 = 'def'
x2 = 'aaa'
x3 = 'aaa'
x4 = 'abc'
x5 = 'def'
x6 = 'aaa'
x7 = 'aaa'
x0 = 'abc'
x10 = 'abc'
x11 = 'def'
x12 = 'aaa'
x13 = 'aaa'
x14 = 'abc'
x15 = 'def'
x16 = 'aaa'
x17 = 'aaa'
x10 = 'abc'
x20 = 'abc'
x21 = 'def'
x22 = 'aaa'
x23 = 'aaa'
x24 = 'abc'
x25 = 'def'
x26 = 'aaa'
x27 = 'aaa'
x20 = 'abc'
x110 = 'abc'
x111 = 'def'
x112 = 'aaa'
x113 = 'aaa'
x114 = 'abc'
x115 = 'def'
x116 = 'aaa'
x117 = 'aaa'
x110 = 'abc'
pass
AreEqual(dir(x), dir(C.__dict__))
a = C()
AreEqual(dir(x), dir(a.__dict__))
a = C()
a.abc = 'def'
a.ghi = 'def'
AreEqual(dir(x), dir(a.__dict__))
if is_cli:
# cpython does not have __dict__ at the module level?
#AreEqual(dir(x), dir(__dict__))
pass
#####################################################################
## coverage for CustomFieldIdDict
def contains(d, *attrs):
for attr in attrs:
Assert(attr in d, "didn't find " + str(attr) + " in " + repr(d))
Assert(d.__contains__(attr), "didn't find " + str(attr) + " in " + repr(d))
def repeat_on_class(C):
newStyle = "__class__" in dir(C)
c = C()
d = C.__dict__
contains(d, '__doc__', 'x1', 'f1')
## recursive entries & repr
C.abc = d
if not newStyle:
x = repr(d) # shouldn't stack overflow
else:
x = str(d)
Assert(x.find("'abc'") != -1)
if not newStyle:
Assert(x.find("{...}") != -1)
else:
Assert(x.find("'abc': <dictproxy object at") != -1)
del C.abc
keys, values = d.keys(), d.values()
AreEqual(len(keys), len(values))
contains(keys, '__doc__', 'x1', 'f1')
## initial length
l = len(d)
Assert(l > 3)
# add more attributes
def f2(self): return 22
def f3(self): return 33
if not newStyle:
d['f2'] = f2
d['x2'] = 20
AreEqual(len(d), l + 2)
AreEqual(d.__len__(), l + 2)
if not newStyle:
contains(d, '__doc__', 'x1', 'x2', 'f1', 'f2')
contains(d.keys(), '__doc__', 'x1', 'x2', 'f1', 'f2')
else:
contains(d, '__doc__', 'x1', 'f1')
contains(d.keys(), '__doc__', 'x1', 'f1')
AreEqual(d['x1'], 10)
if not newStyle:
AreEqual(d['x2'], 20)
AreEqual(d['f1'](c), 11)
if not newStyle:
AreEqual(d['f2'](c), 22)
AssertError(KeyError, lambda : d['x3'])
AssertError(KeyError, lambda : d['f3'])
## get
AreEqual(d.get('x1'), 10)
if not newStyle:
AreEqual(d.get('x2'), 20)
AreEqual(d.get('f1')(c), 11)
if not newStyle:
AreEqual(d.get('f2')(c), 22)
AreEqual(d.get('x3'), None)
AreEqual(d.get('x3', 30), 30)
AreEqual(d.get('f3'), None)
AreEqual(d.get('f3', f3)(c), 33)
if not newStyle:
## setdefault
AreEqual(d.setdefault('x1'), 10)
AreEqual(d.setdefault('x1', 30), 10)
AreEqual(d.setdefault('f1')(c), 11)
AreEqual(d.setdefault('f1', f3)(c), 11)
AreEqual(d.setdefault('x2'), 20)
AreEqual(d.setdefault('x2', 30), 20)
AreEqual(d.setdefault('f2')(c), 22)
AreEqual(d.setdefault('f2', f3)(c), 22)
AreEqual(d.setdefault('x3', 30), 30)
AreEqual(d.setdefault('f3', f3)(c), 33)
if not newStyle:
## pop
l1 = len(d)
AreEqual(d.pop('x1', 30), 10)
AreEqual(len(d), l1-1)
l1 = len(d)
AreEqual(d.pop('x2', 30), 20)
AreEqual(len(d), l1-1)
l1 = len(d)
AreEqual(d.pop("xx", 70), 70)
AreEqual(len(d), l1)
## has_key
Assert(d.has_key('f1'))
if not newStyle:
Assert(d.has_key('f2'))
Assert(d.has_key('f3'))
Assert(d.has_key('fx') == False)
# subclassing, overriding __getitem__, and passing to
# eval
dictType = type(d)
try:
class newDict(dictType):
def __getitem__(self, key):
if key == 'abc':
return 'def'
return super(self, dictType).__getitem__(key)
except TypeError, ex:
if not newStyle:
Assert(ex.message.find('cannot derive from sealed or value types') != -1, ex.message)
else:
Assert(ex.message.find('Error when calling the metaclass bases') != -1, ex.message)
else:
try:
nd = newDict()
except TypeError, e:
if sys.platform == 'cli':
import clr
if clr.GetClrType(dictType).ToString() == 'IronPython.Runtime.Types.NamespaceDictionary':
Fail("Error! Threw TypeError when creating newDict deriving from NamespaceDictionary")
else:
AreEqual(eval('abc', {}, nd), 'def')
############### IN THIS POINT, d LOOKS LIKE ###############
## {'f1': f1, 'f2': f2, 'f3': f3, 'x3': 30, '__doc__': 'This is comment', '__module__': '??'}
## iteritems
lk = []
for (k, v) in d.iteritems():
lk.append(k)
exp = None
if k == 'f1': exp = 11
elif k == 'f2': exp == 22
elif k == 'f3': exp == 33
if exp <> None:
AreEqual(v(c), exp)
if not newStyle:
contains(lk, 'f1', 'f2', 'f3', 'x3', '__doc__')
else:
contains(lk, 'f1', '__module__', '__dict__', 'x1', '__weakref__', '__doc__')
# iterkeys
lk = []
for k in d.iterkeys():
lk.append(k)
if not newStyle:
contains(lk, 'f1', 'f2', 'f3', 'x3', '__doc__')
else:
contains(lk, 'f1', '__module__', '__dict__', 'x1', '__weakref__', '__doc__')
# itervalues
for v in d.itervalues():
if callable(v):
exp = v(c)
Assert(exp in [11, 22, 33])
elif v is str:
Assert(v == 'This is comment')
elif v is int:
Assert(v == 30)
if not newStyle:
## something fun before destorying it
l1 = len(d)
d[dict] = 3 # object as key
AreEqual(len(d), l1+1)
l1 = len(d)
d[int] = 4 # object as key
if is_cli or is_silverlight:
print "CodePlex 16811"
return
AreEqual(len(d), l1+1)
l1 = len(d)
del d[int]
AreEqual(len(d), l1-1)
l1 = len(d)
del d[dict]
AreEqual(len(d), l1-1)
l1 = len(d)
del d['x3']
AreEqual(len(d), l1-1)
l1 = len(d)
d.popitem()
AreEqual(len(d), l1-1)
## object as key
d[int] = int
d[str] = "str"
AreEqual(d[int], int)
AreEqual(d[str], "str")
d.clear()
AreEqual(len(d), 0)
AreEqual(d.__len__(), 0)
#------------------------------------------------------------------------------
def test_customfieldiddict_old():
class C:
'''This is comment'''
x1 = 10
def f1(self): return 11
repeat_on_class(C)
def test_customfieldiddict_new():
class C(object):
'''This is comment'''
x1 = 10
def f1(self): return 11
repeat_on_class(C)
#------------------------------------------------------------------------------
def test_customfieldiddict_fromkeys():
def new_repeat_on_class(C):
d1 = C.__dict__
l1 = len(d1)
d2 = dict.fromkeys(d1)
l2 = len(d2)
AreEqual(l1, l2)
AreEqual(d2['x'], None)
AreEqual(d2['f'], None)
d2 = dict.fromkeys(d1, 10)
l2 = len(d2)
AreEqual(l1, l2)
AreEqual(d2['x'], 10)
AreEqual(d2['f'], 10)
class C:
x = 10
def f(self): pass
new_repeat_on_class(C)
class C(object):
x = 10
def f(self): pass
new_repeat_on_class(C)
#------------------------------------------------------------------------------
def test_customfieldiddict_compare():
def new_repeat_on_class(C1, C2):
d1 = C1.__dict__
d2 = C2.__dict__
# object as key
d1[int] = int
d2[int] = int
Assert(d1 <> d2)
d2['f'] = d1['f']
Assert([x for x in d1] == [x for x in d2])
Assert(d1.fromkeys([x for x in d1]) >= d2.fromkeys([x for x in d2]))
Assert(d1.fromkeys([x for x in d1]) <= d2.fromkeys([x for x in d2]))
d1['y'] = 20
d1[int] = int
Assert(d1.fromkeys([x for x in d1]) > d2.fromkeys([x for x in d2]))
Assert(d1.fromkeys([x for x in d1]) >= d2.fromkeys([x for x in d2]))
Assert(d2.fromkeys([x for x in d2]) < d1.fromkeys([x for x in d1]))
Assert(d2.fromkeys([x for x in d2]) <= d1.fromkeys([x for x in d1]))
class C1:
x = 10
def f(self): pass
class C2:
x = 10
def f(self): pass
new_repeat_on_class(C1, C2)
def t_func():
class C1(object):
x = 10
def f(self): pass
C1.__dict__[1] = 2
AssertError(TypeError, t_func)
@skip("win32")
def test_dict_to_idict():
"""verify dicts can be converted to IDictionaries"""
load_iron_python_test()
from IronPythonTest import DictConversion
class MyDict(dict): pass
class KOld: pass
class KNew(object): pass
class KOldDerived(KOld): pass
class KNewDerived(KNew): pass
test_dicts = [
{},
{1:100},
{None:None},
{object:object},
{1:100, 2:200},
{1:100, 2:200, 3:300, 4:400},
MyDict.__dict__,
KOld.__dict__,
KNew.__dict__,
KOldDerived.__dict__,
KNewDerived.__dict__,
]
for temp_dict in test_dicts:
expected = temp_dict.keys() + temp_dict.values()
expected.sort()
to_idict = list(DictConversion.ToIDictionary(temp_dict))
to_idict.sort()
AreEqual(to_idict, expected)
to_idict = list(DictConversion.ToIDictionary(MyDict(temp_dict)))
to_idict.sort()
AreEqual(to_idict, expected)
#####################################################################
## coverage for FieldIdDict
def test_fieldiddict():
def func(): pass
d = func.__dict__
d['x1'] = 10
d['f1'] = lambda : 11
d[int] = "int"
d[dict] = {2:20}
keys, values = d.keys(), d.values()
AreEqual(len(keys), len(values))
contains(keys, 'x1', 'f1', int, dict)
## initial length
l = len(d)
Assert(l == 4)
# add more attributes
d['x2'] = 20
d['f2'] = lambda x: 22
AreEqual(len(d), l + 2)
AreEqual(d.__len__(), l + 2)
contains(d, 'x1', 'x2', 'f1', 'f2', int, dict)
contains(d.keys(), 'x1', 'x2', 'f1', 'f2', int, dict)
AreEqual(d['x1'], 10)
AreEqual(d['x2'], 20)
AreEqual(d['f1'](), 11)
AreEqual(d['f2'](9), 22)
AssertError(KeyError, lambda : d['x3'])
AssertError(KeyError, lambda : d['f3'])
## get
AreEqual(d.get('x1'), 10)
AreEqual(d.get('x2'), 20)
AreEqual(d.get('f1')(), 11)
AreEqual(d.get('f2')(1), 22)
def f3(): return 33
AreEqual(d.get('x3'), None)
AreEqual(d.get('x3', 30), 30)
AreEqual(d.get('f3'), None)
AreEqual(d.get('f3', f3)(), 33)
## setdefault
AreEqual(d.setdefault('x1'), 10)
AreEqual(d.setdefault('x1', 30), 10)
AreEqual(d.setdefault('f1')(), 11)
AreEqual(d.setdefault('f1', f3)(), 11)
AreEqual(d.setdefault('x2'), 20)
AreEqual(d.setdefault('x2', 30), 20)
AreEqual(d.setdefault('f2')(1), 22)
AreEqual(d.setdefault('f2', f3)(1), 22)
AreEqual(d.setdefault('x3', 30), 30)
AreEqual(d.setdefault('f3', f3)(), 33)
## pop
l1 = len(d); AreEqual(d.pop('x1', 30), 10)
AreEqual(len(d), l1-1)
l1 = len(d); AreEqual(d.pop('x2', 30), 20)
AreEqual(len(d), l1-1)
l1 = len(d); AreEqual(d.pop(int, 70), "int")
AreEqual(len(d), l1-1)
l1 = len(d); AreEqual(d.pop("xx", 70), 70)
AreEqual(len(d), l1)
## has_key
Assert(d.has_key('f1'))
Assert(d.has_key('f2'))
Assert(d.has_key('f3'))
Assert(d.has_key(dict))
Assert(d.has_key('fx') == False)
############### IN THIS POINT, d LOOKS LIKE ###############
# f1, f2, f3, x3, dict as keys
## iteritems
lk = []
for (k, v) in d.iteritems():
lk.append(k)
if k == 'f1': AreEqual(v(), 11)
elif k == 'f2': AreEqual(v(1), 22)
elif k == 'f3': AreEqual(v(), 33)
elif k == 'x3': AreEqual(v, 30)
elif k == dict: AreEqual(v, {2:20})
contains(lk, 'f1', 'f2', 'f3', 'x3', dict)
# iterkeys
lk = []
for k in d.iterkeys():
lk.append(k)
contains(lk, 'f1', 'f2', 'f3', 'x3', dict)
# itervalues
for v in d.itervalues():
if callable(v):
try: exp = v(1)
except: pass
try: exp = v()
except: pass
Assert(exp in [11, 22, 33])
elif v is dict:
Assert(v == {2:20})
elif v is int:
Assert(v == 30)
## something fun before destorying it
l1 = len(d); d[int] = 4 # object as key
AreEqual(len(d), l1+1)
l1 = len(d); del d[int]
AreEqual(len(d), l1-1)
l1 = len(d); del d[dict]
AreEqual(len(d), l1-1)
l1 = len(d); del d['x3']
AreEqual(len(d), l1-1)
l1 = len(d); popped_item = d.popitem()
AreEqual(len(d), l1-1)
## object as key
d[int] = int
d[str] = "str"
AreEqual(d[int], int)
AreEqual(d[str], "str")
d.clear()
AreEqual(len(d), 0)
AreEqual(d.__len__(), 0)
d[int] = int
AreEqual(len(d), 1)
## comparison
def func1(): pass
def func2(): pass
d1 = func1.__dict__
d2 = func2.__dict__
d1['x'] = 10
d2['x'] = 30
d1[int] = int
d2[int] = int
# object as key
Assert(d1 <> d2)
d2['x'] = 10
Assert(d1 == d2)
Assert(d1 >= d2)
Assert(d1 <= d2)
d1['y'] = 20
d1[dict] = "int"
Assert(d1 > d2)
Assert(d1 >= d2)
Assert(d2 < d1)
Assert(d2 <= d1)
#####################################################################
# subclassing dict, overriding __init__
def test_subclass_dict_override__init__():
class foo(dict):
def __init__(self, abc):
self.abc = abc
a = foo('abc')
AreEqual(a.abc, 'abc')
# make sure dict.__init__ works
a = {}
a.__init__({'abc':'def'})
AreEqual(a, {'abc':'def'})
a.__init__({'abcd':'defg'})
AreEqual(a, {'abc':'def', 'abcd':'defg'})
# keyword arg contruction
# single kw-arg, should go into dict
a = dict(b=2)
AreEqual(a, {'b':2})
# dict value to init, Plus kw-arg
a = dict({'a':3}, b=2)
AreEqual(a, {'a':3, 'b':2})
# more than one
a = dict({'a':3}, b=2, c=5)
AreEqual(a, {'a':3, 'b':2, 'c':5})
try:
dict({'a':3}, {'b':2}, c=5)
AssertUnreachable()
except TypeError: pass
#####################################################################
def test_DictionaryUnionEnumerator():
if is_cli == False:
return
class C(object): pass
c = C()
d = c.__dict__
import System
# Check empty enumerator
e = System.Collections.IDictionary.GetEnumerator(d)
AssertError(SystemError, getattr, e, "Key")
AreEqual(e.MoveNext(), False)
AssertError(SystemError, getattr, e, "Key")
# Add non-string attribute
d[1] = 100
e = System.Collections.IDictionary.GetEnumerator(d)
AssertError(SystemError, getattr, e, "Key")
AreEqual(e.MoveNext(), True)
AreEqual(e.Key, 1)
AreEqual(e.MoveNext(), False)
AssertError(SystemError, getattr, e, "Key")
# Add string attribute
c.attr = 100
e = System.Collections.IDictionary.GetEnumerator(d)
AssertError(SystemError, getattr, e, "Key")
AreEqual(e.MoveNext(), True)
key1 = e.Key
AreEqual(e.MoveNext(), True)
key2 = e.Key
AreEqual((key1, key2) == (1, "attr") or (key1, key2) == ("attr", 1), True)
AreEqual(e.MoveNext(), False)
AssertError(SystemError, getattr, e, "Key")
# Remove non-string attribute
del d[1]
e = System.Collections.IDictionary.GetEnumerator(d)
AssertError(SystemError, getattr, e, "Key")
AreEqual(e.MoveNext(), True)
AreEqual(e.Key, "attr")
AreEqual(e.MoveNext(), False)
AssertError(SystemError, getattr, e, "Key")
# Remove string attribute and check empty enumerator
del c.attr
e = System.Collections.IDictionary.GetEnumerator(d)
AssertError(SystemError, getattr, e, "Key")
AreEqual(e.MoveNext(), False)
AssertError(SystemError, getattr, e, "Key")
def test_same_but_different():
"""Test case checks that when two values who are logically different but share hash code & equality
result in only a single entry"""
AreEqual({-10:0, -10L:1}, {-10:1})
#####################################################################
def test_module_dict():
me = sys.modules[__name__]
moduleDict = me.__dict__
AreEqual(operator.isMappingType(moduleDict), True)
AreEqual(moduleDict.__contains__("test_module_dict"), True)
AreEqual(moduleDict["test_module_dict"], test_module_dict)
AreEqual(moduleDict.keys().__contains__("test_module_dict"), True)
def test_eval_locals_simple():
class Locals(dict):
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError, e:
return 'abc'
locs = Locals()
AreEqual(eval("unknownvariable", globals(), locs), 'abc')
def test_key_error():
class c: pass
class d(object): pass
for key in ['abc', 1, c(), d(), 1.0, 1L]:
try:
{}[key]
except KeyError, e:
AreEqual(e.args[0], key)
try:
del {}[key]
except KeyError, e:
AreEqual(e.args[0], key)
try:
set([]).remove(key)
except KeyError, e:
AreEqual(e.args[0], key)
def test_contains():
class ContainsDict(dict):
was_called = False
def __contains__(self, key):
ContainsDict.was_called = True
return dict.__contains__(self, key)
md = ContainsDict()
md["stuff"] = 1
AreEqual(ContainsDict.was_called, False)
AreEqual("nothing" in md, False)
AreEqual("stuff" in md, True)
AreEqual(ContainsDict.was_called, True)
def test_stdtypes_dict():
temp_types = [ int,
long,
float,
complex,
bool,
str,
unicode,
basestring,
list,
tuple,
xrange,
dict,
set,
frozenset,
type,
object,
] #+ [eval("types." + x) for x in dir(types) if x.endswith("Type")]
if not is_silverlight:
temp_types.append(file)
temp_keys = [ None, -1, 0, 1, 2.34, "", "None", int, object, test_stdtypes_dict, [], (None,)]
for temp_type in temp_types:
for temp_key in temp_keys:
def tFunc(): temp_type.__dict__[temp_key] = 0
AssertError(TypeError, tFunc)
@skip("silverlight")
def test_main_dict():
import __main__
#just make sure this doesn't throw...
t_list = []
for w in __main__.__dict__: t_list.append(w)
t_list.sort()
g_list = globals().keys()
g_list.sort()
AreEqual(t_list, g_list)
def test_update():
test_cases = (
#N changes with an empty dict
({}, (), {}, {}),
({}, ({'k':'v'},), {}, {'k':'v'}),
({}, (), {'k':'v'}, {'k':'v'}),
({}, ({'k':'v', 'x':'y'},), {}, {'k':'v', 'x':'y'}),
({}, (), {'k':'v', 'x':'y'}, {'k':'v', 'x':'y'}),
({}, ({'k':'v'},), {'x':'y'}, {'k':'v', 'x':'y'}),
#N changes with one pre-existing dict element
({'a':'b'}, (), {}, {'a':'b'}),
({'a':'b'}, ({'k':'v'},), {}, {'a':'b', 'k':'v'}),
({'a':'b'}, (), {'k':'v'}, {'a':'b', 'k':'v'}),
({'a':'b'}, ({'k':'v', 'x':'y'},), {}, {'a':'b', 'k':'v', 'x':'y'}),
({'a':'b'}, (), {'k':'v', 'x':'y'}, {'a':'b', 'k':'v', 'x':'y'}),
({'a':'b'}, ({'k':'v'},), {'x':'y'}, {'a':'b', 'k':'v', 'x':'y'}),
#N changes with one pre-existing dict element
({'a':'b', 'c':'d'}, (), {}, {'a':'b', 'c':'d'}),
({'a':'b', 'c':'d'}, ({'k':'v'},), {}, {'a':'b', 'c':'d', 'k':'v'}),
({'a':'b', 'c':'d'}, (), {'k':'v'}, {'a':'b', 'c':'d', 'k':'v'}),
({'a':'b', 'c':'d'}, ({'k':'v', 'x':'y'},), {}, {'a':'b', 'c':'d', 'k':'v', 'x':'y'}),
({'a':'b', 'c':'d'}, (), {'k':'v', 'x':'y'}, {'a':'b', 'c':'d', 'k':'v', 'x':'y'}),
({'a':'b', 'c':'d'}, ({'k':'v'},), {'x':'y'}, {'a':'b', 'c':'d', 'k':'v', 'x':'y'}),
)
for start_dict, dict_param, kw_params, expected in test_cases:
try:
start_dict.update(*dict_param, **kw_params)
except Exception, e:
print "ERROR:", start_dict, ".update(*", dict_param, ", **", kw_params, ") failed!"
raise e
AreEqual(start_dict, expected)
def test_update_argnames():
expected = {"b": 1}
result = {}
result.update(b=1)
AreEqual(result, expected)
expected = {"other": 1}
result = {}
result.update(other=1)
AreEqual(result, expected)
expected = {"other": 1, "otherArgs": 2}
result = {}
result.update({"other": 1}, otherArgs=2)
AreEqual(result, expected)
def test_update_no_setitem():
# update doesn't call __setitem__
class mydict(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.setcalled = False
def __setitem__(self, index, value):
self.setcalled = True
raise Exception()
d = mydict()
d.update(mydict(abc=2))
AreEqual(d.setcalled, False)
d.update({'foo': 2})
AreEqual(d.setcalled, False)
def test_keys_not_as_property():
def f():
mapping = { 10: 10}
for k in mapping.keys: pass
AssertErrorWithMessages(TypeError,
"iteration over non-sequence of type builtin_function_or_method",
"'builtin_function_or_method' object is not iterable",
f)
def test_dict_class_dictionary():
class KOld:
KLASS_MEMBER = 3.14
def aFunc(): pass
def aMethod(self): pass
class KNew(object):
KLASS_MEMBER = 3.14
def aFunc(): pass
def aMethod(self): pass
for K in [KOld, KNew]:
temp_dict = dict(K.__dict__)
#class member has the correct value?
AreEqual(K.__dict__["KLASS_MEMBER"], 3.14)
AreEqual(temp_dict["KLASS_MEMBER"], 3.14)
#methods show up?
for func_name in ["aFunc", "aMethod"]:
Assert(func_name in K.__dict__.keys())
Assert(func_name in temp_dict.keys())
expected_keys = [ '__module__', 'KLASS_MEMBER', 'aFunc', 'aMethod',
'__dict__',
'__weakref__', '__doc__']
for expected_key in expected_keys:
Assert(KNew.__dict__.has_key(expected_key), expected_key)
Assert(temp_dict.has_key(expected_key), expected_key)
def test_cp15882():
x = {}
#negative cases
for bad_stuff in [
[1],
{}, {1:1}, {(1,2): 1},
]:
try:
x[bad_stuff] = 1
Fail(str(bad_stuff) + " is unhashable")
except TypeError:
AreEqual(x, {})
#positive cases
for stuff in [
(), (None),
(-1), (0), (1), (2),
(1, 2), (1, 2, 3),
xrange(3), 1j, object, test_cp15882,
(xrange(3)), (1j), (object), (test_cp15882),
(()), ((())),
]:
for i in xrange(2):
x[stuff] = 1
AreEqual(x[stuff], 1)
del x[stuff]
AreEqual(x, {})
AssertError(KeyError, x.__delitem__, stuff)
for i in xrange(2):
x[stuff] = 1
AreEqual(x[stuff], 1)
x.__delitem__(stuff)
AreEqual(x, {})
AssertError(KeyError, x.__delitem__, stuff)
def test_comparison_operators():
x = {2:3}
y = {2:4}
for oper in ('__lt__', '__gt__', '__le__', '__ge__'):
for data in (y, None, 1, 1.0, 1L, (), [], 1j, "abc"):
AreEqual(getattr(x, oper)(data), NotImplemented)
def test_cp16519():
__main__ = __import__(__name__)
__main__.Dict = {"1": "a"}
AreEqual(__main__.Dict["1"], "a")
del __main__.Dict
import sys
sys.Dict = {"1": "b"}
AreEqual(sys.Dict["1"], "b")
del sys.Dict
import testpkg1
testpkg1.Dict = {"1": "c"}
AreEqual(testpkg1.Dict["1"], "c")
del testpkg1.Dict
def test_dict_equality_lookup():
"""dictionaries check object equality before running normal equality"""
class x(object):
def __eq__(self, other):
return False
def __ne__(self, other):
return True
a = x()
d = {}
d[a] = 42
AreEqual(d[a], 42)
def test_missing():
class Foo(dict):
def __missing__(self, key):
raise TypeError('Foo.__missing__ should not be called')
f = Foo()
AreEqual(f.setdefault(1, 2), 2)
AreEqual(f.get(2), None)
AreEqual(f.get(2, 3), 3)
AssertError(KeyError, f.pop, 3)
AreEqual(f.pop(3, 4), 4)
x = {2:3}
for f in (Foo({'abc':3}), Foo()):
Assert(x != f)
Assert(f != x)
AreEqual(x.__eq__(f), False)
AreEqual(f.__eq__(x), False)
def test_cp29914():
AreEqual(dict(o=42), {'o':42})
def test_dict_comp():
pass
def test_dict_comp():
AreEqual({locals()['x'] : locals()['x'] for x in (2,3,4)}, {2:2, 3:3, 4:4})
x = 100
{x:x for x in (2,3,4)}
AreEqual(x, 100)
class C:
{x:x for x in (2,3,4)}
AreEqual(hasattr(C, 'x'), False)
class C:
abc = {locals()['x']:locals()['x'] for x in (2,3,4)}
AreEqual(C.abc, {2:2,3:3,4:4})
d = {}
exec compile("abc = {locals()['x']:locals()['x'] for x in (2,3,4)}", 'exec', 'exec') in d, d
AreEqual(d['abc'], {2:2,3:3,4:4})
d = {'y':42}
exec compile("abc = {y:y for x in (2,3,4)}", 'exec', 'exec') in d, d
AreEqual(d['abc'], {42:42})
d = {'y':42, 't':(2,3,42)}
exec compile("abc = {y:y for x in t if x == y}", 'exec', 'exec') in d, d
AreEqual(d['abc'], {42:42})
t = (2,3,4)
v = 2
abc = {v:v for x in t}
AreEqual(abc, {2:2})
abc = {x:x for x in t if x == v}
AreEqual(abc, {2:2})
def f():
abc = {x:x for x in t if x == v}
AreEqual(abc, {2:2})
f()
def f():
abc = {v:v for x in t}
AreEqual(abc, {2:2})
class C:
abc = {v:v for x in t}
AreEqual(abc, {2:2})
class C:
abc = {x:x for x in t if x == v}
AreEqual(abc, {2:2})
def test_cp32527():
'''test for duplicate key in dict under specific hash value conditions'''
d = {'1': 1, '2': 1, '3': 1, 'a7': 1, 'a8': 1}
#d now has 7 buckets internally, and computed hash for a7 and a8 keys will land on same starting bucket index
#recycle the a7 bucket
d.pop('a7')
#attempt to update the a8 bucket, which now comes after the recycled a7
d['a8'] = 5
#if working properly, there will now be a recycled bucket (former home of a7) and a single a8 bucket
#if not working properly, there will instead be two a8 buckets
expected = 1
actual = d.keys().count('a8')
AreEqual(actual, expected)
run_test(__name__)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RouteFilterRulesOperations:
"""RouteFilterRulesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified rule from a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the rule.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def get(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
**kwargs: Any
) -> "_models.RouteFilterRule":
"""Gets the specified rule from a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the rule.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteFilterRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_07_01.models.RouteFilterRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.RouteFilterRule",
**kwargs: Any
) -> "_models.RouteFilterRule":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_rule_parameters, 'RouteFilterRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.RouteFilterRule",
**kwargs: Any
) -> AsyncLROPoller["_models.RouteFilterRule"]:
"""Creates or updates a route in the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the route filter rule.
:type rule_name: str
:param route_filter_rule_parameters: Parameters supplied to the create or update route filter
rule operation.
:type route_filter_rule_parameters: ~azure.mgmt.network.v2020_07_01.models.RouteFilterRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteFilterRule or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_07_01.models.RouteFilterRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
route_filter_rule_parameters=route_filter_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
def list_by_route_filter(
self,
resource_group_name: str,
route_filter_name: str,
**kwargs: Any
) -> AsyncIterable["_models.RouteFilterRuleListResult"]:
"""Gets all RouteFilterRules in a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteFilterRuleListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_07_01.models.RouteFilterRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_route_filter.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteFilterRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_route_filter.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules'} # type: ignore
| |
#! /usr/bin/env python
"""
Module for writing PREMIS metadata
"""
import os
import io
import uuid
from datetime import datetime
import xml.etree.ElementTree as ET
import pytz
from lxml import etree
from isolyzer import isolyzer
from . import config
from .mdaudio import getAudioMetadata
from .shared import makeHumanReadable
from .shared import add_ns_prefix
def addCreationEvent(log):
"""Generate creation event using info from log file of creation application"""
# Read contents of log to a text string
with io.open(log, "r", encoding="utf-8") as fLog:
logContents = fLog.read()
fLog.close()
# Create PREMIS creation event
eventName = etree.QName(config.premis_ns, "event")
event = etree.Element(eventName, nsmap=config.NSMAP)
# Event identifier: UUID, based on host ID and current time
eventIdentifier = etree.SubElement(
event, "{%s}eventIdentifier" % (config.premis_ns))
eventIdentifierType = etree.SubElement(
eventIdentifier, "{%s}eventIdentifierType" % (config.premis_ns))
eventIdentifierType.text = "UUID"
eventIdentifierValue = etree.SubElement(
eventIdentifier, "{%s}eventIdentifierValue" % (config.premis_ns))
eventIdentifierValue.text = str(uuid.uuid1())
# Event type
eventType = etree.SubElement(event, "{%s}eventType" % (config.premis_ns))
eventType.text = "creation"
# Event date/time: taken from timestamp of log file (last-modified)
eventDateTimeValue = datetime.fromtimestamp(os.path.getctime(log))
# Add time zone info
pst = pytz.timezone('Europe/Amsterdam')
eventDateTimeValue = pst.localize(eventDateTimeValue)
eventDateTimeFormatted = eventDateTimeValue.isoformat()
eventDateTime = etree.SubElement(
event, "{%s}eventDateTime" % (config.premis_ns))
eventDateTime.text = eventDateTimeFormatted
# eventDetailInformation container with eventDetail element
eventDetailInformation = etree.SubElement(
event, "{%s}eventDetailInformation" % (config.premis_ns))
eventDetail = etree.SubElement(
eventDetailInformation, "{%s}eventDetail" % (config.premis_ns))
# eventOutcomeInformation container
eventOutcomeInformation = etree.SubElement(
event, "{%s}eventOutcomeInformation" % (config.premis_ns))
eventOutcomeDetail = etree.SubElement(
eventOutcomeInformation, "{%s}eventOutcomeDetail" % (config.premis_ns))
eventOutcomeDetailNote = etree.SubElement(
eventOutcomeDetail, "{%s}eventOutcomeDetailNote" % (config.premis_ns))
# linkingAgentIdentifier element
linkingAgentIdentifier = etree.SubElement(
event, "{%s}linkingAgentIdentifier" % (config.premis_ns))
linkingAgentIdentifierType = etree.SubElement(
linkingAgentIdentifier, "{%s}linkingAgentIdentifierType" % (config.premis_ns))
linkingAgentIdentifierType.text = "URI"
# Values of linkingAgentIdentifierValue and agentName are set further below
linkingAgentIdentifierValue = etree.SubElement(
linkingAgentIdentifier, "{%s}linkingAgentIdentifierValue" % (config.premis_ns))
# Name of log
logName = os.path.basename(log)
eventOutcomeDetailNote.text = logContents
isoBusterComment = "Isobuster error values:\n \
0 No Error (success)\n \
1 No Tracks / Sessions found\n \
2 Track Index provided but this track is not available\n \
3 Session Index provided but this Session is not available\n \
4 No File-system track found\n \
5 No (or not a matching) File-system found\n \
6 Folder name is already in use as filename\n \
7 Not a matching file or folder found\n \
10xx Extraction aborted by user"
comment = etree.Comment(isoBusterComment)
if logName == "isobuster.log":
eventDetail.text = "Image created with IsoBuster"
eventOutcomeDetail.insert(1, comment)
# URI to isoBuster Wikidata page
linkingAgentIdentifierValue.text = "https://www.wikidata.org/wiki/Q304733"
elif logName == "dbpoweramp.log":
# URI to dBpoweramp Wikidata page
eventDetail.text = "Audio ripped with dBpoweramp"
# URI to dBpoweramp Wikidata page
linkingAgentIdentifierValue.text = "https://www.wikidata.org/wiki/Q1152133"
return event
def addAgent(softwareName):
"""Generate agent instance for creation software"""
# TODO: do we need this function?
# Create PREMIS event
eventName = etree.QName(config.premis_ns, "event")
event = etree.Element(eventName, nsmap=config.NSMAP)
# Create PREMIS agent instance
agentName = etree.QName(config.premis_ns, "agent")
agent = etree.Element(agentName, nsmap=config.NSMAP)
agent = etree.SubElement(event, "{%s}agent" % (config.premis_ns))
agentIdentifier = etree.SubElement(
agent, "{%s}agentIdentifier" % (config.premis_ns))
agentIdentifierType = etree.SubElement(
agentIdentifier, "{%s}agentIdentifierType" % (config.premis_ns))
agentIdentifierType.text = "URI"
# Values of agentIdentifierValue and agentName are set further below
agentIdentifierValue = etree.SubElement(
agentIdentifier, "{%s}agentIdentifierValue" % (config.premis_ns))
agentName = etree.SubElement(agent, "{%s}agentName" % (config.premis_ns))
agentType = etree.SubElement(agent, "{%s}agentType" % (config.premis_ns))
agentType.text = "software"
if softwareName == "isobuster":
# URI to isoBuster Wikidata page
agentIdentifierValue.text = "https://www.wikidata.org/wiki/Q304733"
agentName.text = "isoBuster"
elif softwareName == "dbpoweramp":
# URI to dBpoweramp Wikidata page
agentIdentifierValue.text = "https://www.wikidata.org/wiki/Q1152133"
agentName.text = "dBpoweramp"
return agent
def addObjectInstance(fileName, fileSize, mimeType, sha512Sum, sectorOffset, isobusterReportElt):
"""Generate object instance for file"""
# Dictionary that links formatName values to mimeTypes
formatNames = {
# From LoC: https://www.loc.gov/preservation/digital/formats/fdd/fdd000348.shtml
'application/x-iso9660-image': 'ISO_Image',
'audio/wav': 'Wave', # from DIAS filetypes list
'audio/flac': 'FLAC' # Not on DIAS filetypes list
}
# Dictionary that links DIAS fileTypeID values to mimeTypes
fileTypeIDs = {
'application/x-iso9660-image': 'n/a', # Not on DIAS filetypes list
'audio/wav': '60',
'audio/flac': 'n/a' # Not on DIAS filetypes list
}
# Create PREMIS object instance
objectName = etree.QName(config.premis_ns, "object")
pObject = etree.Element(objectName, nsmap=config.NSMAP)
pObject.attrib["{%s}type" % config.xsi_ns] = "premis:file"
# Object identifier
objectIdentifier = etree.SubElement(
pObject, "{%s}objectIdentifier" % (config.premis_ns))
objectIdentifierType = etree.SubElement(
objectIdentifier, "{%s}objectIdentifierType" % (config.premis_ns))
objectIdentifierType.text = "UUID"
objectIdentifierValue = etree.SubElement(
objectIdentifier, "{%s}objectIdentifierValue" % (config.premis_ns))
objectIdentifierValue.text = str(uuid.uuid1())
# Object characteristics
objectCharacteristics = etree.SubElement(
pObject, "{%s}objectCharacteristics" % (config.premis_ns))
compositionLevel = etree.SubElement(
objectCharacteristics, "{%s}compositionLevel" % (config.premis_ns))
compositionLevel.text = "0"
# Fixity element for SHA-512 checksum
fixity1 = etree.SubElement(
objectCharacteristics, "{%s}fixity" % (config.premis_ns))
messageDigestAlgorithm = etree.SubElement(
fixity1, "{%s}messageDigestAlgorithm" % (config.premis_ns))
messageDigestAlgorithm.text = "SHA-512"
messageDigest = etree.SubElement(
fixity1, "{%s}messageDigest" % (config.premis_ns))
messageDigest.text = sha512Sum
messageDigestOriginator = etree.SubElement(
fixity1, "{%s}messageDigestOriginator" % (config.premis_ns))
# Value more or less follows convention for DM 1.5
messageDigestOriginator.text = "python.hashlib.sha512.hexdigest"
# Size
size = etree.SubElement(objectCharacteristics,
"{%s}size" % (config.premis_ns))
size.text = fileSize
# Format
pFormat = etree.SubElement(objectCharacteristics,
"{%s}format" % (config.premis_ns))
formatDesignation = etree.SubElement(
pFormat, "{%s}formatDesignation" % (config.premis_ns))
formatName = etree.SubElement(
formatDesignation, "{%s}formatName" % (config.premis_ns))
# Lookup formatName for mimeType
formatName.text = formatNames.get(mimeType)
# formatRegistry: DIAS fileTypeID values
# TODO FLAC and ISO Image fmts have no fileTypeID values. These either have to be added to the
# DIAS filetypes list or the formatRegistry element should be omitted altogether
formatRegistry = etree.SubElement(
pFormat, "{%s}formatRegistry" % (config.premis_ns))
formatRegistryName = etree.SubElement(
formatRegistry, "{%s}formatRegistryName" % (config.premis_ns))
formatRegistryName.text = "DIAS"
formatRegistryKey = etree.SubElement(
formatRegistry, "{%s}formatRegistryKey" % (config.premis_ns))
formatRegistryKey.text = fileTypeIDs.get(mimeType)
# objectCharacteristicsExtension - EBUCore, isolyzer, Isobuster DFXML
objectCharacteristicsExtension1 = etree.SubElement(
objectCharacteristics, "{%s}objectCharacteristicsExtension" % (config.premis_ns))
if fileName.endswith(('.wav', '.WAV', 'flac', 'FLAC')):
audioMDOut = getAudioMetadata(fileName)
audioMD = audioMDOut["outElt"]
objectCharacteristicsExtension1.append(audioMD)
elif fileName.endswith(('.iso', '.ISO')):
# Add Isobuster's DFXML report
isobusterReportElt = add_ns_prefix(isobusterReportElt, config.dfxml_ns)
objectCharacteristicsExtension1.append(isobusterReportElt)
# Add another objectCharacteristicsExtension element for Isolyzer output
objectCharacteristicsExtension2 = etree.SubElement(
objectCharacteristics, "{%s}objectCharacteristicsExtension" % (config.premis_ns))
# Analyze ISO image with isolyzer
isolyzerOut = isolyzer.processImage(fileName, sectorOffset)
# Isolyzer output is Elementtree element, which must be converted
# to lxml element
makeHumanReadable(isolyzerOut)
isolyzerOutAsXML = ET.tostring(isolyzerOut, 'UTF-8', 'xml')
isolyzerOutLXML = etree.fromstring(isolyzerOutAsXML)
isolyzerOutLXML = add_ns_prefix(isolyzerOutLXML, config.isolyzer_ns)
isoMDOut = etree.Element("{%s}isolyzer" % (config.isolyzer_ns), nsmap=config.NSMAP)
toolInfo = etree.SubElement(isoMDOut, "{%s}toolInfo" % (config.isolyzer_ns))
toolName = etree.SubElement(toolInfo, "{%s}toolName" % (config.isolyzer_ns))
toolVersion = etree.SubElement(toolInfo, "{%s}toolVersion" % (config.isolyzer_ns))
toolName.text = "isolyzer"
toolVersion.text = isolyzer.__version__
isoMDOut.append(isolyzerOutLXML)
objectCharacteristicsExtension2.append(isoMDOut)
# originalName
originalName = etree.SubElement(
pObject, "{%s}originalName" % (config.premis_ns))
originalName.text = os.path.basename(fileName)
return pObject
| |
import datetime
import json
import re
import time
from functools import wraps
from hashlib import md5
from textwrap import wrap
from django.contrib.auth.decorators import permission_required
from django.core import validators
from django.http import (
HttpResponse,
HttpResponseBadRequest,
HttpResponseRedirect
)
from django.utils.dateparse import parse_date
from django.utils.encoding import force_str, force_text
from django.utils.feedgenerator import Atom1Feed
from product_details import product_details
from ratelimit.helpers import is_ratelimited
from rest_framework.throttling import BaseThrottle
from statsd import statsd
from fjord.base.urlresolvers import reverse
class JSONDatetimeEncoder(json.JSONEncoder):
def default(self, value):
if hasattr(value, 'strftime'):
return value.isoformat()
return super(JSONDatetimeEncoder, self).default(value)
def wrap_with_paragraphs(text, width=72):
"""Runs textwrap on text, but keeps pre-existing paragraphs"""
if not text:
return text
return '\n'.join(
['\n'.join(wrap(segment, width=width))
for segment in text.splitlines()]
)
def is_url(url):
"""Takes a string and returns whether or not it's a url
Recognizes about: and chrome:// urls, everything Django's
URLValidator recognizes and protocol-less urls.
>>> is_url(u'example.com')
True
>>> is_url(u'about:')
True
>>> is_url(u'foo')
False
"""
url = force_text(url)
# Check what Django's URLValidator thinks about it. That covers
# the http/https/ftp cases including localhost, ipv4, ipv6 and
# optional ports.
if validators.URLValidator.regex.search(url):
return True
# Check if it's an about: url.
if url.startswith('about:'):
return True
# Check if it's a chrome:// url.
if url.startswith('chrome://'):
return True
# Check if it's a protocol-less url by prepending http:// to it.
if validators.URLValidator.regex.search('http://' + url):
return True
return False
def translate_country_name(current_language, country_code, country_name,
country_name_l10n):
"""Translates country name from product details or gettext
It might seem a bit weird we're not doing the _lazy gettext
translation here, but if we did, then we'd be translating a
variable value rather than a string and then it wouldn't get
picked up by extract script.
:arg current_language: the language of the user viewing the page
:arg country_code: the iso 3166 two-letter country code
:arg country_name: the country name
:arg country_name_l10n: the country name wrapped in a lazy gettext call
:returns: translated country name
"""
# FIXME: this is a lousy way to alleviate the problem where we
# have a "locale" and we really need a "language".
language_fix = {
'es': 'es-ES',
}
current_language = language_fix.get(current_language, current_language)
# If the country name has been translated, then use that
if unicode(country_name) != unicode(country_name_l10n):
return country_name_l10n
current_language = current_language.split('-')
current_language[0] = current_language[0].lower()
if len(current_language) > 1:
current_language[1] = current_language[1].upper()
current_language = '-'.join(current_language)
country_code = country_code.lower()
try:
countries = product_details.get_regions(current_language)
except IOError:
return country_name
return countries.get(country_code, country_name)
def smart_truncate(content, length=100, suffix='...'):
"""Truncate text at space before length bound.
:arg content: string to truncate
:arg length: length to truncate at
:arg suffix: text to append to truncated content
:returns: string
Example:
>>> smart_truncate('abcde fghij', length=8)
'abcde...'
>>> smart_truncate('abcde fghij', length=100)
'abcde fghij'
"""
if len(content) <= length:
return content
else:
return content[:length].rsplit(' ', 1)[0] + suffix
def smart_str(s, fallback=u''):
"""Returns the string or the fallback if it's not a string"""
if isinstance(s, basestring):
return s
return fallback
def smart_int(s, fallback=0):
"""Convert a string to int, with fallback for invalid strings or types."""
try:
return int(float(s))
except (ValueError, TypeError, OverflowError):
return fallback
def smart_timedelta(s, fallback=None):
"""Convert s to a datetime.timedelta with a fallback for invalid input.
:arg s: The string to convert to a timedelta.
:arg fallback: Value to use in case of an error. Default: ``None``.
"""
if isinstance(s, datetime.timedelta):
return s
if s and s.endswith('d'):
try:
days = int(s[:-1])
if days > 0:
return datetime.timedelta(days=days)
except ValueError:
pass
return fallback
def smart_date(s, fallback=None):
"""Convert a string to a datetime.date with a fallback for invalid input.
:arg s: The string to convert to a date.
:arg fallback: Value to use in case of an error. Default: ``None``.
"""
if isinstance(s, datetime.date):
return s
try:
dt = parse_date(s)
# The strftime functions require a year >= 1900, so if this
# has a year before that, then we treat it as an invalid date so
# later processing doesn't get hosed.
if dt and dt.year >= 1900:
return dt
except (ValueError, TypeError):
pass
return fallback
def smart_bool(s, fallback=False):
"""Convert a string that has a semantic boolean value to a real boolean.
Note that this is not the same as ``s`` being "truthy". The string
``'False'`` will be returned as False, even though it is Truthy, and non-
boolean values like ``'apple'`` would return the fallback parameter, since
it doesn't represent a boolean value.
"""
try:
s = s.lower()
if s in ['true', 't', 'yes', 'y', '1']:
return True
elif s in ['false', 'f', 'no', 'n', '0']:
return False
except AttributeError:
pass
return fallback
def epoch_milliseconds(d):
"""Convert a datetime to a number of milliseconds since the epoch."""
return time.mktime(d.timetuple()) * 1000
class FakeLogger(object):
"""Fake logger that we can pretend is a Python Logger
Why? Well, because Django has logging settings that prevent me
from setting up a logger here that uses the stdout that the Django
BaseCommand has. At some point p while fiddling with it, I
figured, 'screw it--I'll just write my own' and did.
The minor ramification is that this isn't a complete
implementation so if it's missing stuff, we'll have to add it.
"""
def __init__(self, stdout):
self.stdout = stdout
def _out(self, level, msg, *args):
msg = msg % args
self.stdout.write('%s %-8s: %s\n' % (
time.strftime('%H:%M:%S'), level, msg))
def info(self, msg, *args):
self._out('INFO', msg, *args)
def error(self, msg, *args):
self._out('ERROR', msg, *args)
class Atom1FeedWithRelatedLinks(Atom1Feed):
"""Atom1Feed with related links
This adds a "link_related" item as::
<link rel="related">url</link>
"""
def add_item_elements(self, handler, item):
super(Atom1FeedWithRelatedLinks, self).add_item_elements(handler, item)
if item.get('link_related'):
handler.addQuickElement(
'link',
attrs={'href': item['link_related'], 'rel': 'related'})
def actual_ip(req):
"""Returns the actual ip address
Our dev, stage and prod servers are behind a reverse proxy, so the ip
address in REMOTE_ADDR is the reverse proxy server and not the client
ip address. The actual client ip address is in HTTP_X_CLUSTER_CLIENT_IP.
In our local development and test environments, the client ip address
is in REMOTE_ADDR.
"""
return req.META.get('HTTP_X_CLUSTER_CLIENT_IP', req.META['REMOTE_ADDR'])
def actual_ip_plus_context(contextfun):
"""Returns a key function that adds md5 hashed context to the key"""
def _actual_ip_plus_context(req, *args, **kwargs):
# Force whatever comes out of contextfun to be bytes.
context = force_str(contextfun(req))
# md5 hash that.
hasher = md5()
hasher.update(context)
context = hasher.hexdigest()
# Then return the ip address plus a : plus the desc md5 hash.
return actual_ip(req) + ':' + context
return _actual_ip_plus_context
def ratelimit(rulename, keyfun=actual_ip, rate='5/m'):
"""Rate-limiting decorator that keeps metrics via statsd
This is just like the django-ratelimit ratelimit decorator, but is
stacking-friendly, performs some statsd fancypants and also has
Fjord-friendly defaults.
:arg rulename: rulename for statsd logging---must be a string
with letters only! look for this in statsd under
"throttled." + rulename.
:arg keyfun: (optional) function to generate a key for this
throttling. defaults to actual_ip.
:arg rate: (optional) rate to throttle at. defaults to 5/m.
"""
def decorator(fn):
@wraps(fn)
def _wrapped(request, *args, **kwargs):
already_limited = getattr(request, 'limited', False)
ratelimited = is_ratelimited(
request=request, increment=True, ip=False, method=['POST'],
field=None, rate=rate, keys=keyfun)
if not already_limited and ratelimited:
statsd.incr('throttled.' + rulename)
return fn(request, *args, **kwargs)
return _wrapped
return decorator
RATE_RE = re.compile(r'^(\d+)/(\d*)([smhd])$')
class RatelimitThrottle(BaseThrottle):
"""This wraps the django-ratelimit ratelimiter in a DRF class
Django Rest Framework has its own throttling system. That's great,
but we're already using django-ratelimit. So this wraps
django-ratelimit throttling in the Django Rest Framework structure
so I can have a unified throttling backend for regular and API
views.
.. Note::
Return an instance of this in the `get_throttles` method. Don't
use this with `throttled_classes` property because it requires
other parameters to instantiate.
e.g.::
class MyThrottle(AnonRateThrottle):
def get_throttles(self):
return [
RatelimitThrottle(
rulename='double_submit',
rate='1/10m'
)
]
"""
def __init__(self, rulename, keyfun=None, rate='5/m', methods=('POST',)):
self.rulename = rulename
self.rate = rate
self.num_requests, self.duration = self.parse_rate(rate)
self.keyfun = keyfun or actual_ip
self.methods = methods
def parse_rate(self, rate):
"""Handles num/(multi * period) like 1/10m"""
num, multiplier, period = RATE_RE.match(rate).groups()
num = int(num)
multiplier = int(multiplier or 1)
period = {'s': 1, 'm': 60, 'h': 3600, 'd': 86400}[period]
return (num, (multiplier * period))
def allow_request(self, request, view):
ratelimited = is_ratelimited(
request=request, increment=True, ip=False, method=self.methods,
field=None, rate=self.rate, keys=self.keyfun)
if ratelimited:
# Failed rate-limiting, so this request is not allowed.
statsd.incr('throttled.' + self.rulename)
return self.throttle_failure()
# Did not trigger rate-limiting, so this request is allowed.
return self.throttle_success()
def throttle_success(self):
return True
def throttle_failure(self):
"""Called when a request has failed due to throttling"""
return False
def wait(self):
# We don't want to calculate the actual wait time, so we cheat
# here and just return the full duration.
return self.duration
def check_new_user(fun):
@wraps(fun)
def _wrapped_view(request, *args, **kwargs):
# Do this here to avoid circular imports
from fjord.base.models import Profile
try:
request.user.profile
except AttributeError:
pass
except Profile.DoesNotExist:
url = reverse('new-user-view') + '?next=' + request.path
return HttpResponseRedirect(url)
return fun(request, *args, **kwargs)
return _wrapped_view
def cors_enabled(origin, methods=['GET']):
"""A simple decorator to enable CORS."""
def decorator(f):
@wraps(f)
def decorated_func(request, *args, **kwargs):
if request.method == 'OPTIONS':
# preflight
if ('HTTP_ACCESS_CONTROL_REQUEST_METHOD' in request.META and
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS' in request.META):
response = HttpResponse()
response['Access-Control-Allow-Methods'] = ', '.join(
methods)
# TODO: We might need to change this
response['Access-Control-Allow-Headers'] = \
request.META['HTTP_ACCESS_CONTROL_REQUEST_HEADERS']
else:
return HttpResponseBadRequest()
elif request.method in methods:
response = f(request, *args, **kwargs)
else:
return HttpResponseBadRequest()
response['Access-Control-Allow-Origin'] = origin
return response
return decorated_func
return decorator
analyzer_required = permission_required(
'analytics.can_view_dashboard',
raise_exception=True)
def class_to_path(cls):
"""Given a class, returns the class path"""
return ':'.join([cls.__module__, cls.__name__])
def path_to_class(path):
"""Given a class path, returns the class"""
module_path, cls_name = path.split(':')
module = __import__(module_path, fromlist=[cls_name])
cls = getattr(module, cls_name)
return cls
def instance_to_key(instance):
"""Given an instance, returns a key
:arg instance: The model instance to generate a key for
:returns: A string representing that specific instance
.. Note::
If you ever make a code change that moves the model to some
other Python module, then the keys for those model instances
will fail.
"""
cls = instance.__class__
return ':'.join([cls.__module__, cls.__name__, str(instance.pk)])
def key_to_instance(key):
"""Given a key, returns the instance
:raises DoesNotExist: if the instance doesn't exist
:raises ImportError: if there's an import error
:raises AttributeError: if the class doesn't exist in the module
"""
module_path, cls_name, id_ = key.split(':')
module = __import__(module_path, fromlist=[cls_name])
cls = getattr(module, cls_name)
instance = cls.objects.get(pk=int(id_))
return instance
| |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A wrapper for subprocess to make calling shell commands easier."""
import codecs
import logging
import os
import pipes
import select
import signal
import string
import subprocess
import sys
import time
CATAPULT_ROOT_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..'))
SIX_PATH = os.path.join(CATAPULT_ROOT_PATH, 'third_party', 'six')
if SIX_PATH not in sys.path:
sys.path.append(SIX_PATH)
import six
from devil import base_error
logger = logging.getLogger(__name__)
_SafeShellChars = frozenset(string.ascii_letters + string.digits + '@%_-+=:,./')
# Cache the string-escape codec to ensure subprocess can find it
# later. Return value doesn't matter.
if six.PY2:
codecs.lookup('string-escape')
def SingleQuote(s):
"""Return an shell-escaped version of the string using single quotes.
Reliably quote a string which may contain unsafe characters (e.g. space,
quote, or other special characters such as '$').
The returned value can be used in a shell command line as one token that gets
to be interpreted literally.
Args:
s: The string to quote.
Return:
The string quoted using single quotes.
"""
return pipes.quote(s)
def DoubleQuote(s):
"""Return an shell-escaped version of the string using double quotes.
Reliably quote a string which may contain unsafe characters (e.g. space
or quote characters), while retaining some shell features such as variable
interpolation.
The returned value can be used in a shell command line as one token that gets
to be further interpreted by the shell.
The set of characters that retain their special meaning may depend on the
shell implementation. This set usually includes: '$', '`', '\', '!', '*',
and '@'.
Args:
s: The string to quote.
Return:
The string quoted using double quotes.
"""
if not s:
return '""'
elif all(c in _SafeShellChars for c in s):
return s
else:
return '"' + s.replace('"', '\\"') + '"'
def ShrinkToSnippet(cmd_parts, var_name, var_value):
"""Constructs a shell snippet for a command using a variable to shrink it.
Takes into account all quoting that needs to happen.
Args:
cmd_parts: A list of command arguments.
var_name: The variable that holds var_value.
var_value: The string to replace in cmd_parts with $var_name
Returns:
A shell snippet that does not include setting the variable.
"""
def shrink(value):
parts = (x and SingleQuote(x) for x in value.split(var_value))
with_substitutions = ('"$%s"' % var_name).join(parts)
return with_substitutions or "''"
return ' '.join(shrink(part) for part in cmd_parts)
def Popen(args,
stdin=None,
stdout=None,
stderr=None,
shell=None,
cwd=None,
env=None):
# preexec_fn isn't supported on windows.
# pylint: disable=unexpected-keyword-arg
if sys.platform == 'win32':
close_fds = (stdin is None and stdout is None and stderr is None)
preexec_fn = None
else:
close_fds = True
preexec_fn = lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL)
if six.PY2:
return subprocess.Popen(
args=args,
cwd=cwd,
stdin=stdin,
stdout=stdout,
stderr=stderr,
shell=shell,
close_fds=close_fds,
env=env,
preexec_fn=preexec_fn
)
else:
# opens stdout in text mode, so that caller side always get 'str',
# and there will be no type mismatch error.
# Ignore any decoding error, so that caller will not crash due to
# uncaught exception. Decoding errors are unavoidable, as we
# do not know the encoding of the output, and in some output there
# will be multiple encodings (e.g. adb logcat)
return subprocess.Popen(
args=args,
cwd=cwd,
stdin=stdin,
stdout=stdout,
stderr=stderr,
shell=shell,
close_fds=close_fds,
env=env,
preexec_fn=preexec_fn,
universal_newlines=True,
encoding='utf-8',
errors='ignore'
)
def Call(args, stdout=None, stderr=None, shell=None, cwd=None, env=None):
pipe = Popen(
args, stdout=stdout, stderr=stderr, shell=shell, cwd=cwd, env=env)
pipe.communicate()
return pipe.wait()
def RunCmd(args, cwd=None):
"""Opens a subprocess to execute a program and returns its return value.
Args:
args: A string or a sequence of program arguments. The program to execute is
the string or the first item in the args sequence.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
Returns:
Return code from the command execution.
"""
logger.debug(str(args) + ' ' + (cwd or ''))
return Call(args, cwd=cwd)
def GetCmdOutput(args, cwd=None, shell=False, env=None):
"""Open a subprocess to execute a program and returns its output.
Args:
args: A string or a sequence of program arguments. The program to execute is
the string or the first item in the args sequence.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
shell: Whether to execute args as a shell command.
env: If not None, a mapping that defines environment variables for the
subprocess.
Returns:
Captures and returns the command's stdout.
Prints the command's stderr to logger (which defaults to stdout).
"""
(_, output) = GetCmdStatusAndOutput(args, cwd, shell, env)
return output
def _ValidateAndLogCommand(args, cwd, shell):
if isinstance(args, six.string_types):
if not shell:
raise Exception('string args must be run with shell=True')
else:
if shell:
raise Exception('array args must be run with shell=False')
args = ' '.join(SingleQuote(str(c)) for c in args)
if cwd is None:
cwd = ''
else:
cwd = ':' + cwd
logger.debug('[host]%s> %s', cwd, args)
return args
def GetCmdStatusAndOutput(args,
cwd=None,
shell=False,
env=None,
merge_stderr=False):
"""Executes a subprocess and returns its exit code and output.
Args:
args: A string or a sequence of program arguments. The program to execute is
the string or the first item in the args sequence.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
shell: Whether to execute args as a shell command. Must be True if args
is a string and False if args is a sequence.
env: If not None, a mapping that defines environment variables for the
subprocess.
merge_stderr: If True, captures stderr as part of stdout.
Returns:
The 2-tuple (exit code, stdout).
"""
status, stdout, stderr = GetCmdStatusOutputAndError(
args, cwd=cwd, shell=shell, env=env, merge_stderr=merge_stderr)
if stderr:
logger.critical('STDERR: %s', stderr)
logger.debug('STDOUT: %s%s', stdout[:4096].rstrip(),
'<truncated>' if len(stdout) > 4096 else '')
return (status, stdout)
def StartCmd(args, cwd=None, shell=False, env=None):
"""Starts a subprocess and returns a handle to the process.
Args:
args: A string or a sequence of program arguments. The program to execute is
the string or the first item in the args sequence.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
shell: Whether to execute args as a shell command. Must be True if args
is a string and False if args is a sequence.
env: If not None, a mapping that defines environment variables for the
subprocess.
Returns:
A process handle from subprocess.Popen.
"""
_ValidateAndLogCommand(args, cwd, shell)
return Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=shell,
cwd=cwd,
env=env)
def GetCmdStatusOutputAndError(args,
cwd=None,
shell=False,
env=None,
merge_stderr=False):
"""Executes a subprocess and returns its exit code, output, and errors.
Args:
args: A string or a sequence of program arguments. The program to execute is
the string or the first item in the args sequence.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
shell: Whether to execute args as a shell command. Must be True if args
is a string and False if args is a sequence.
env: If not None, a mapping that defines environment variables for the
subprocess.
merge_stderr: If True, captures stderr as part of stdout.
Returns:
The 3-tuple (exit code, stdout, stderr).
"""
_ValidateAndLogCommand(args, cwd, shell)
stderr = subprocess.STDOUT if merge_stderr else subprocess.PIPE
pipe = Popen(
args,
stdout=subprocess.PIPE,
stderr=stderr,
shell=shell,
cwd=cwd,
env=env)
stdout, stderr = pipe.communicate()
return (pipe.returncode, stdout, stderr)
class TimeoutError(base_error.BaseError):
"""Module-specific timeout exception."""
def __init__(self, output=None):
super(TimeoutError, self).__init__('Timeout')
self._output = output
@property
def output(self):
return self._output
def _read_and_decode(fd, buffer_size):
data = os.read(fd, buffer_size)
if data and six.PY3:
data = data.decode('utf-8', errors='ignore')
return data
def _IterProcessStdoutFcntl(process,
iter_timeout=None,
timeout=None,
buffer_size=4096,
poll_interval=1):
"""An fcntl-based implementation of _IterProcessStdout."""
# pylint: disable=too-many-nested-blocks
import fcntl
try:
# Enable non-blocking reads from the child's stdout.
child_fd = process.stdout.fileno()
fl = fcntl.fcntl(child_fd, fcntl.F_GETFL)
fcntl.fcntl(child_fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
end_time = (time.time() + timeout) if timeout else None
iter_end_time = (time.time() + iter_timeout) if iter_timeout else None
while True:
if end_time and time.time() > end_time:
raise TimeoutError()
if iter_end_time and time.time() > iter_end_time:
yield None
iter_end_time = time.time() + iter_timeout
if iter_end_time:
iter_aware_poll_interval = min(poll_interval,
max(0, iter_end_time - time.time()))
else:
iter_aware_poll_interval = poll_interval
read_fds, _, _ = select.select([child_fd], [], [],
iter_aware_poll_interval)
if child_fd in read_fds:
data = _read_and_decode(child_fd, buffer_size)
if not data:
break
yield data
if process.poll() is not None:
# If process is closed, keep checking for output data (because of timing
# issues).
while True:
read_fds, _, _ = select.select([child_fd], [], [],
iter_aware_poll_interval)
if child_fd in read_fds:
data = _read_and_decode(child_fd, buffer_size)
if data:
yield data
continue
break
break
finally:
try:
if process.returncode is None:
# Make sure the process doesn't stick around if we fail with an
# exception.
process.kill()
except OSError:
pass
process.wait()
def _IterProcessStdoutQueue(process,
iter_timeout=None,
timeout=None,
buffer_size=4096,
poll_interval=1):
"""A Queue.Queue-based implementation of _IterProcessStdout.
TODO(jbudorick): Evaluate whether this is a suitable replacement for
_IterProcessStdoutFcntl on all platforms.
"""
# pylint: disable=unused-argument
if six.PY3:
import queue
else:
import Queue as queue
import threading
stdout_queue = queue.Queue()
def read_process_stdout():
# TODO(jbudorick): Pick an appropriate read size here.
while True:
try:
output_chunk = _read_and_decode(process.stdout.fileno(), buffer_size)
except IOError:
break
stdout_queue.put(output_chunk, True)
if not output_chunk and process.poll() is not None:
break
reader_thread = threading.Thread(target=read_process_stdout)
reader_thread.start()
end_time = (time.time() + timeout) if timeout else None
try:
while True:
if end_time and time.time() > end_time:
raise TimeoutError()
try:
s = stdout_queue.get(True, iter_timeout)
if not s:
break
yield s
except queue.Empty:
yield None
finally:
try:
if process.returncode is None:
# Make sure the process doesn't stick around if we fail with an
# exception.
process.kill()
except OSError:
pass
process.wait()
reader_thread.join()
_IterProcessStdout = (_IterProcessStdoutQueue
if sys.platform == 'win32' else _IterProcessStdoutFcntl)
"""Iterate over a process's stdout.
This is intentionally not public.
Args:
process: The process in question.
iter_timeout: An optional length of time, in seconds, to wait in
between each iteration. If no output is received in the given
time, this generator will yield None.
timeout: An optional length of time, in seconds, during which
the process must finish. If it fails to do so, a TimeoutError
will be raised.
buffer_size: The maximum number of bytes to read (and thus yield) at once.
poll_interval: The length of time to wait in calls to `select.select`.
If iter_timeout is set, the remaining length of time in the iteration
may take precedence.
Raises:
TimeoutError: if timeout is set and the process does not complete.
Yields:
basestrings of data or None.
"""
def GetCmdStatusAndOutputWithTimeout(args,
timeout,
cwd=None,
shell=False,
logfile=None,
env=None):
"""Executes a subprocess with a timeout.
Args:
args: List of arguments to the program, the program to execute is the first
element.
timeout: the timeout in seconds or None to wait forever.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
shell: Whether to execute args as a shell command. Must be True if args
is a string and False if args is a sequence.
logfile: Optional file-like object that will receive output from the
command as it is running.
env: If not None, a mapping that defines environment variables for the
subprocess.
Returns:
The 2-tuple (exit code, output).
Raises:
TimeoutError on timeout.
"""
_ValidateAndLogCommand(args, cwd, shell)
output = six.StringIO()
process = Popen(
args,
cwd=cwd,
shell=shell,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env)
try:
for data in _IterProcessStdout(process, timeout=timeout):
if logfile:
logfile.write(data)
output.write(data)
except TimeoutError:
raise TimeoutError(output.getvalue())
str_output = output.getvalue()
logger.debug('STDOUT+STDERR: %s%s', str_output[:4096].rstrip(),
'<truncated>' if len(str_output) > 4096 else '')
return process.returncode, str_output
def IterCmdOutputLines(args,
iter_timeout=None,
timeout=None,
cwd=None,
shell=False,
env=None,
check_status=True):
"""Executes a subprocess and continuously yields lines from its output.
Args:
args: List of arguments to the program, the program to execute is the first
element.
iter_timeout: Timeout for each iteration, in seconds.
timeout: Timeout for the entire command, in seconds.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
shell: Whether to execute args as a shell command. Must be True if args
is a string and False if args is a sequence.
env: If not None, a mapping that defines environment variables for the
subprocess.
check_status: A boolean indicating whether to check the exit status of the
process after all output has been read.
Yields:
The output of the subprocess, line by line.
Raises:
CalledProcessError if check_status is True and the process exited with a
non-zero exit status.
"""
cmd = _ValidateAndLogCommand(args, cwd, shell)
process = Popen(
args,
cwd=cwd,
shell=shell,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
return _IterCmdOutputLines(
process,
cmd,
iter_timeout=iter_timeout,
timeout=timeout,
check_status=check_status)
def _IterCmdOutputLines(process,
cmd,
iter_timeout=None,
timeout=None,
check_status=True):
buffer_output = ''
iter_end = None
cur_iter_timeout = None
if iter_timeout:
iter_end = time.time() + iter_timeout
cur_iter_timeout = iter_timeout
for data in _IterProcessStdout(
process, iter_timeout=cur_iter_timeout, timeout=timeout):
if iter_timeout:
# Check whether the current iteration has timed out.
cur_iter_timeout = iter_end - time.time()
if data is None or cur_iter_timeout < 0:
yield None
iter_end = time.time() + iter_timeout
continue
else:
assert data is not None, (
'Iteration received no data despite no iter_timeout being set. '
'cmd: %s' % cmd)
# Construct lines to yield from raw data.
buffer_output += data
has_incomplete_line = buffer_output[-1] not in '\r\n'
lines = buffer_output.splitlines()
buffer_output = lines.pop() if has_incomplete_line else ''
for line in lines:
yield line
if iter_timeout:
iter_end = time.time() + iter_timeout
if buffer_output:
yield buffer_output
if check_status and process.returncode:
raise subprocess.CalledProcessError(process.returncode, cmd)
| |
"""The tests for the automation component."""
import asyncio
import logging
from unittest.mock import Mock, patch
import pytest
from homeassistant.components import logbook
import homeassistant.components.automation as automation
from homeassistant.components.automation import (
ATTR_SOURCE,
DOMAIN,
EVENT_AUTOMATION_RELOADED,
EVENT_AUTOMATION_TRIGGERED,
SERVICE_TRIGGER,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_NAME,
EVENT_HOMEASSISTANT_STARTED,
SERVICE_RELOAD,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import Context, CoreState, State, callback
from homeassistant.exceptions import HomeAssistantError, Unauthorized
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import (
assert_setup_component,
async_capture_events,
async_mock_service,
mock_restore_cache,
)
from tests.components.logbook.test_init import MockLazyEventPartialState
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_service_data_not_a_dict(hass, calls):
"""Test service data not dict."""
with assert_setup_component(0, automation.DOMAIN):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": "test.automation", "data": 100},
}
},
)
async def test_service_specify_data(hass, calls):
"""Test service data."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.platform }} - "
"{{ trigger.event.event_type }}"
},
},
}
},
)
time = dt_util.utcnow()
with patch("homeassistant.helpers.script.utcnow", return_value=time):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "event - test_event"
state = hass.states.get("automation.hello")
assert state is not None
assert state.attributes.get("last_triggered") == time
async def test_service_specify_entity_id(hass, calls):
"""Test service data."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": "test.automation", "entity_id": "hello.world"},
}
},
)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
assert ["hello.world"] == calls[0].data.get(ATTR_ENTITY_ID)
async def test_service_specify_entity_id_list(hass, calls):
"""Test service data."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {
"service": "test.automation",
"entity_id": ["hello.world", "hello.world2"],
},
}
},
)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
assert ["hello.world", "hello.world2"] == calls[0].data.get(ATTR_ENTITY_ID)
async def test_two_triggers(hass, calls):
"""Test triggers."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": [
{"platform": "event", "event_type": "test_event"},
{"platform": "state", "entity_id": "test.entity"},
],
"action": {"service": "test.automation"},
}
},
)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set("test.entity", "hello")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_trigger_service_ignoring_condition(hass, caplog, calls):
"""Test triggers."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "test",
"trigger": [{"platform": "event", "event_type": "test_event"}],
"condition": {
"condition": "numeric_state",
"entity_id": "non.existing",
"above": "1",
},
"action": {"service": "test.automation"},
}
},
)
caplog.clear()
caplog.set_level(logging.WARNING)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
assert len(caplog.record_tuples) == 1
assert caplog.record_tuples[0][1] == logging.WARNING
await hass.services.async_call(
"automation", "trigger", {"entity_id": "automation.test"}, blocking=True
)
assert len(calls) == 1
await hass.services.async_call(
"automation",
"trigger",
{"entity_id": "automation.test", "skip_condition": True},
blocking=True,
)
assert len(calls) == 2
await hass.services.async_call(
"automation",
"trigger",
{"entity_id": "automation.test", "skip_condition": False},
blocking=True,
)
assert len(calls) == 2
async def test_two_conditions_with_and(hass, calls):
"""Test two and conditions."""
entity_id = "test.entity"
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": [{"platform": "event", "event_type": "test_event"}],
"condition": [
{"condition": "state", "entity_id": entity_id, "state": "100"},
{
"condition": "numeric_state",
"entity_id": entity_id,
"below": 150,
},
],
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set(entity_id, 100)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set(entity_id, 101)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set(entity_id, 151)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_shorthand_conditions_template(hass, calls):
"""Test shorthand nation form in conditions."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": [{"platform": "event", "event_type": "test_event"}],
"condition": "{{ is_state('test.entity', 'hello') }}",
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", "hello")
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set("test.entity", "goodbye")
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_automation_list_setting(hass, calls):
"""Event is not a valid condition."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": "test.automation"},
},
{
"trigger": {"platform": "event", "event_type": "test_event_2"},
"action": {"service": "test.automation"},
},
]
},
)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
hass.bus.async_fire("test_event_2")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_automation_calling_two_actions(hass, calls):
"""Test if we can call two actions from automation async definition."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"action": [
{"service": "test.automation", "data": {"position": 0}},
{"service": "test.automation", "data": {"position": 1}},
],
}
},
)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[0].data["position"] == 0
assert calls[1].data["position"] == 1
async def test_shared_context(hass, calls):
"""Test that the shared context is passed down the chain."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"event": "test_event2"},
},
{
"alias": "bye",
"trigger": {"platform": "event", "event_type": "test_event2"},
"action": {"service": "test.automation"},
},
]
},
)
context = Context()
first_automation_listener = Mock()
event_mock = Mock()
hass.bus.async_listen("test_event2", first_automation_listener)
hass.bus.async_listen(EVENT_AUTOMATION_TRIGGERED, event_mock)
hass.bus.async_fire("test_event", context=context)
await hass.async_block_till_done()
# Ensure events was fired
assert first_automation_listener.call_count == 1
assert event_mock.call_count == 2
# Verify automation triggered evenet for 'hello' automation
args, _ = event_mock.call_args_list[0]
first_trigger_context = args[0].context
assert first_trigger_context.parent_id == context.id
# Ensure event data has all attributes set
assert args[0].data.get(ATTR_NAME) is not None
assert args[0].data.get(ATTR_ENTITY_ID) is not None
assert args[0].data.get(ATTR_SOURCE) is not None
# Ensure context set correctly for event fired by 'hello' automation
args, _ = first_automation_listener.call_args
assert args[0].context is first_trigger_context
# Ensure the 'hello' automation state has the right context
state = hass.states.get("automation.hello")
assert state is not None
assert state.context is first_trigger_context
# Verify automation triggered evenet for 'bye' automation
args, _ = event_mock.call_args_list[1]
second_trigger_context = args[0].context
assert second_trigger_context.parent_id == first_trigger_context.id
# Ensure event data has all attributes set
assert args[0].data.get(ATTR_NAME) is not None
assert args[0].data.get(ATTR_ENTITY_ID) is not None
assert args[0].data.get(ATTR_SOURCE) is not None
# Ensure the service call from the second automation
# shares the same context
assert len(calls) == 1
assert calls[0].context is second_trigger_context
async def test_services(hass, calls):
"""Test the automation services for turning entities on/off."""
entity_id = "automation.hello"
assert hass.states.get(entity_id) is None
assert not automation.is_on(hass, entity_id)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": "test.automation"},
}
},
)
assert hass.states.get(entity_id) is not None
assert automation.is_on(hass, entity_id)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
await hass.services.async_call(
automation.DOMAIN,
SERVICE_TURN_OFF,
{
ATTR_ENTITY_ID: entity_id,
},
blocking=True,
)
assert not automation.is_on(hass, entity_id)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
await hass.services.async_call(
automation.DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: entity_id}, blocking=True
)
assert automation.is_on(hass, entity_id)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
await hass.services.async_call(
automation.DOMAIN,
SERVICE_TOGGLE,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
assert not automation.is_on(hass, entity_id)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
await hass.services.async_call(
automation.DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: entity_id}, blocking=True
)
await hass.services.async_call(
automation.DOMAIN, SERVICE_TRIGGER, {ATTR_ENTITY_ID: entity_id}, blocking=True
)
assert len(calls) == 3
await hass.services.async_call(
automation.DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: entity_id}, blocking=True
)
await hass.services.async_call(
automation.DOMAIN, SERVICE_TRIGGER, {ATTR_ENTITY_ID: entity_id}, blocking=True
)
assert len(calls) == 4
await hass.services.async_call(
automation.DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: entity_id}, blocking=True
)
assert automation.is_on(hass, entity_id)
async def test_reload_config_service(hass, calls, hass_admin_user, hass_read_only_user):
"""Test the reload config service."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {
"service": "test.automation",
"data_template": {"event": "{{ trigger.event.event_type }}"},
},
}
},
)
assert hass.states.get("automation.hello") is not None
assert hass.states.get("automation.bye") is None
listeners = hass.bus.async_listeners()
assert listeners.get("test_event") == 1
assert listeners.get("test_event2") is None
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data.get("event") == "test_event"
test_reload_event = async_capture_events(hass, EVENT_AUTOMATION_RELOADED)
with patch(
"homeassistant.config.load_yaml_config_file",
autospec=True,
return_value={
automation.DOMAIN: {
"alias": "bye",
"trigger": {"platform": "event", "event_type": "test_event2"},
"action": {
"service": "test.automation",
"data_template": {"event": "{{ trigger.event.event_type }}"},
},
}
},
):
with pytest.raises(Unauthorized):
await hass.services.async_call(
automation.DOMAIN,
SERVICE_RELOAD,
context=Context(user_id=hass_read_only_user.id),
blocking=True,
)
await hass.services.async_call(
automation.DOMAIN,
SERVICE_RELOAD,
context=Context(user_id=hass_admin_user.id),
blocking=True,
)
# De-flake ?!
await hass.async_block_till_done()
assert len(test_reload_event) == 1
assert hass.states.get("automation.hello") is None
assert hass.states.get("automation.bye") is not None
listeners = hass.bus.async_listeners()
assert listeners.get("test_event") is None
assert listeners.get("test_event2") == 1
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data.get("event") == "test_event2"
async def test_reload_config_when_invalid_config(hass, calls):
"""Test the reload config service handling invalid config."""
with assert_setup_component(1, automation.DOMAIN):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {
"service": "test.automation",
"data_template": {"event": "{{ trigger.event.event_type }}"},
},
}
},
)
assert hass.states.get("automation.hello") is not None
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data.get("event") == "test_event"
with patch(
"homeassistant.config.load_yaml_config_file",
autospec=True,
return_value={automation.DOMAIN: "not valid"},
):
await hass.services.async_call(automation.DOMAIN, SERVICE_RELOAD, blocking=True)
assert hass.states.get("automation.hello") is None
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_reload_config_handles_load_fails(hass, calls):
"""Test the reload config service."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {
"service": "test.automation",
"data_template": {"event": "{{ trigger.event.event_type }}"},
},
}
},
)
assert hass.states.get("automation.hello") is not None
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data.get("event") == "test_event"
with patch(
"homeassistant.config.load_yaml_config_file",
side_effect=HomeAssistantError("bla"),
):
await hass.services.async_call(automation.DOMAIN, SERVICE_RELOAD, blocking=True)
assert hass.states.get("automation.hello") is not None
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
@pytest.mark.parametrize("service", ["turn_off_stop", "turn_off_no_stop", "reload"])
async def test_automation_stops(hass, calls, service):
"""Test that turning off / reloading stops any running actions as appropriate."""
entity_id = "automation.hello"
test_entity = "test.entity"
config = {
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": [
{"event": "running"},
{"wait_template": "{{ is_state('test.entity', 'goodbye') }}"},
{"service": "test.automation"},
],
}
}
assert await async_setup_component(hass, automation.DOMAIN, config)
running = asyncio.Event()
@callback
def running_cb(event):
running.set()
hass.bus.async_listen_once("running", running_cb)
hass.states.async_set(test_entity, "hello")
hass.bus.async_fire("test_event")
await running.wait()
if service == "turn_off_stop":
await hass.services.async_call(
automation.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
elif service == "turn_off_no_stop":
await hass.services.async_call(
automation.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: entity_id, automation.CONF_STOP_ACTIONS: False},
blocking=True,
)
else:
with patch(
"homeassistant.config.load_yaml_config_file",
autospec=True,
return_value=config,
):
await hass.services.async_call(
automation.DOMAIN, SERVICE_RELOAD, blocking=True
)
hass.states.async_set(test_entity, "goodbye")
await hass.async_block_till_done()
assert len(calls) == (1 if service == "turn_off_no_stop" else 0)
async def test_automation_restore_state(hass):
"""Ensure states are restored on startup."""
time = dt_util.utcnow()
mock_restore_cache(
hass,
(
State("automation.hello", STATE_ON),
State("automation.bye", STATE_OFF, {"last_triggered": time}),
),
)
config = {
automation.DOMAIN: [
{
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event_hello"},
"action": {"service": "test.automation"},
},
{
"alias": "bye",
"trigger": {"platform": "event", "event_type": "test_event_bye"},
"action": {"service": "test.automation"},
},
]
}
assert await async_setup_component(hass, automation.DOMAIN, config)
state = hass.states.get("automation.hello")
assert state
assert state.state == STATE_ON
assert state.attributes["last_triggered"] is None
state = hass.states.get("automation.bye")
assert state
assert state.state == STATE_OFF
assert state.attributes["last_triggered"] == time
calls = async_mock_service(hass, "test", "automation")
assert automation.is_on(hass, "automation.bye") is False
hass.bus.async_fire("test_event_bye")
await hass.async_block_till_done()
assert len(calls) == 0
assert automation.is_on(hass, "automation.hello")
hass.bus.async_fire("test_event_hello")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_initial_value_off(hass):
"""Test initial value off."""
calls = async_mock_service(hass, "test", "automation")
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"initial_state": "off",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": "test.automation", "entity_id": "hello.world"},
}
},
)
assert not automation.is_on(hass, "automation.hello")
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
async def test_initial_value_on(hass):
"""Test initial value on."""
hass.state = CoreState.not_running
calls = async_mock_service(hass, "test", "automation")
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"initial_state": "on",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {
"service": "test.automation",
"entity_id": ["hello.world", "hello.world2"],
},
}
},
)
assert automation.is_on(hass, "automation.hello")
await hass.async_start()
await hass.async_block_till_done()
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_initial_value_off_but_restore_on(hass):
"""Test initial value off and restored state is turned on."""
hass.state = CoreState.not_running
calls = async_mock_service(hass, "test", "automation")
mock_restore_cache(hass, (State("automation.hello", STATE_ON),))
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"initial_state": "off",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": "test.automation", "entity_id": "hello.world"},
}
},
)
assert not automation.is_on(hass, "automation.hello")
await hass.async_start()
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
async def test_initial_value_on_but_restore_off(hass):
"""Test initial value on and restored state is turned off."""
calls = async_mock_service(hass, "test", "automation")
mock_restore_cache(hass, (State("automation.hello", STATE_OFF),))
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"initial_state": "on",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": "test.automation", "entity_id": "hello.world"},
}
},
)
assert automation.is_on(hass, "automation.hello")
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_no_initial_value_and_restore_off(hass):
"""Test initial value off and restored state is turned on."""
calls = async_mock_service(hass, "test", "automation")
mock_restore_cache(hass, (State("automation.hello", STATE_OFF),))
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": "test.automation", "entity_id": "hello.world"},
}
},
)
assert not automation.is_on(hass, "automation.hello")
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
async def test_automation_is_on_if_no_initial_state_or_restore(hass):
"""Test initial value is on when no initial state or restored state."""
calls = async_mock_service(hass, "test", "automation")
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": "test.automation", "entity_id": "hello.world"},
}
},
)
assert automation.is_on(hass, "automation.hello")
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_automation_not_trigger_on_bootstrap(hass):
"""Test if automation is not trigger on bootstrap."""
hass.state = CoreState.not_running
calls = async_mock_service(hass, "test", "automation")
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": "test.automation", "entity_id": "hello.world"},
}
},
)
assert automation.is_on(hass, "automation.hello")
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert automation.is_on(hass, "automation.hello")
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
assert ["hello.world"] == calls[0].data.get(ATTR_ENTITY_ID)
async def test_automation_bad_trigger(hass, caplog):
"""Test bad trigger configuration."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "automation"},
"action": [],
}
},
)
assert "Integration 'automation' does not provide trigger support." in caplog.text
async def test_automation_with_error_in_script(hass, caplog):
"""Test automation with an error in script."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": "test.automation", "entity_id": "hello.world"},
}
},
)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert "Service not found" in caplog.text
assert "Traceback" not in caplog.text
async def test_automation_with_error_in_script_2(hass, caplog):
"""Test automation with an error in script."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": None, "entity_id": "hello.world"},
}
},
)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert "string value is None" in caplog.text
async def test_automation_restore_last_triggered_with_initial_state(hass):
"""Ensure last_triggered is restored, even when initial state is set."""
time = dt_util.utcnow()
mock_restore_cache(
hass,
(
State("automation.hello", STATE_ON),
State("automation.bye", STATE_ON, {"last_triggered": time}),
State("automation.solong", STATE_OFF, {"last_triggered": time}),
),
)
config = {
automation.DOMAIN: [
{
"alias": "hello",
"initial_state": "off",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": "test.automation"},
},
{
"alias": "bye",
"initial_state": "off",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": "test.automation"},
},
{
"alias": "solong",
"initial_state": "on",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": "test.automation"},
},
]
}
await async_setup_component(hass, automation.DOMAIN, config)
state = hass.states.get("automation.hello")
assert state
assert state.state == STATE_OFF
assert state.attributes["last_triggered"] is None
state = hass.states.get("automation.bye")
assert state
assert state.state == STATE_OFF
assert state.attributes["last_triggered"] == time
state = hass.states.get("automation.solong")
assert state
assert state.state == STATE_ON
assert state.attributes["last_triggered"] == time
async def test_extraction_functions(hass):
"""Test extraction functions."""
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: [
{
"alias": "test1",
"trigger": {"platform": "state", "entity_id": "sensor.trigger_1"},
"condition": {
"condition": "state",
"entity_id": "light.condition_state",
"state": "on",
},
"action": [
{
"service": "test.script",
"data": {"entity_id": "light.in_both"},
},
{
"service": "test.script",
"data": {"entity_id": "light.in_first"},
},
{
"domain": "light",
"device_id": "device-in-both",
"entity_id": "light.bla",
"type": "turn_on",
},
],
},
{
"alias": "test2",
"trigger": {
"platform": "device",
"domain": "light",
"type": "turned_on",
"entity_id": "light.trigger_2",
"device_id": "trigger-device-2",
},
"condition": {
"condition": "device",
"device_id": "condition-device",
"domain": "light",
"type": "is_on",
"entity_id": "light.bla",
},
"action": [
{
"service": "test.script",
"data": {"entity_id": "light.in_both"},
},
{
"condition": "state",
"entity_id": "sensor.condition",
"state": "100",
},
{"scene": "scene.hello"},
{
"domain": "light",
"device_id": "device-in-both",
"entity_id": "light.bla",
"type": "turn_on",
},
{
"domain": "light",
"device_id": "device-in-last",
"entity_id": "light.bla",
"type": "turn_on",
},
],
},
]
},
)
assert set(automation.automations_with_entity(hass, "light.in_both")) == {
"automation.test1",
"automation.test2",
}
assert set(automation.entities_in_automation(hass, "automation.test1")) == {
"sensor.trigger_1",
"light.condition_state",
"light.in_both",
"light.in_first",
}
assert set(automation.automations_with_device(hass, "device-in-both")) == {
"automation.test1",
"automation.test2",
}
assert set(automation.devices_in_automation(hass, "automation.test2")) == {
"trigger-device-2",
"condition-device",
"device-in-both",
"device-in-last",
}
async def test_logbook_humanify_automation_triggered_event(hass):
"""Test humanifying Automation Trigger event."""
hass.config.components.add("recorder")
await async_setup_component(hass, automation.DOMAIN, {})
await async_setup_component(hass, "logbook", {})
entity_attr_cache = logbook.EntityAttributeCache(hass)
event1, event2 = list(
logbook.humanify(
hass,
[
MockLazyEventPartialState(
EVENT_AUTOMATION_TRIGGERED,
{ATTR_ENTITY_ID: "automation.hello", ATTR_NAME: "Hello Automation"},
),
MockLazyEventPartialState(
EVENT_AUTOMATION_TRIGGERED,
{
ATTR_ENTITY_ID: "automation.bye",
ATTR_NAME: "Bye Automation",
ATTR_SOURCE: "source of trigger",
},
),
],
entity_attr_cache,
{},
)
)
assert event1["name"] == "Hello Automation"
assert event1["domain"] == "automation"
assert event1["message"] == "has been triggered"
assert event1["entity_id"] == "automation.hello"
assert event2["name"] == "Bye Automation"
assert event2["domain"] == "automation"
assert event2["message"] == "has been triggered by source of trigger"
assert event2["entity_id"] == "automation.bye"
async def test_automation_variables(hass, caplog):
"""Test automation variables."""
calls = async_mock_service(hass, "test", "automation")
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"variables": {
"test_var": "defined_in_config",
"event_type": "{{ trigger.event.event_type }}",
},
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {
"service": "test.automation",
"data": {
"value": "{{ test_var }}",
"event_type": "{{ event_type }}",
},
},
},
{
"variables": {
"test_var": "defined_in_config",
},
"trigger": {"platform": "event", "event_type": "test_event_2"},
"condition": {
"condition": "template",
"value_template": "{{ trigger.event.data.pass_condition }}",
},
"action": {
"service": "test.automation",
},
},
{
"variables": {
"test_var": "{{ trigger.event.data.break + 1 }}",
},
"trigger": {"platform": "event", "event_type": "test_event_3"},
"action": {
"service": "test.automation",
},
},
]
},
)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["value"] == "defined_in_config"
assert calls[0].data["event_type"] == "test_event"
hass.bus.async_fire("test_event_2")
await hass.async_block_till_done()
assert len(calls) == 1
hass.bus.async_fire("test_event_2", {"pass_condition": True})
await hass.async_block_till_done()
assert len(calls) == 2
assert "Error rendering variables" not in caplog.text
hass.bus.async_fire("test_event_3")
await hass.async_block_till_done()
assert len(calls) == 2
assert "Error rendering variables" in caplog.text
hass.bus.async_fire("test_event_3", {"break": 0})
await hass.async_block_till_done()
assert len(calls) == 3
async def test_automation_trigger_variables(hass, caplog):
"""Test automation trigger variables."""
calls = async_mock_service(hass, "test", "automation")
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"variables": {
"event_type": "{{ trigger.event.event_type }}",
},
"trigger_variables": {
"test_var": "defined_in_config",
},
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {
"service": "test.automation",
"data": {
"value": "{{ test_var }}",
"event_type": "{{ event_type }}",
},
},
},
{
"variables": {
"event_type": "{{ trigger.event.event_type }}",
"test_var": "overridden_in_config",
},
"trigger_variables": {
"test_var": "defined_in_config",
},
"trigger": {"platform": "event", "event_type": "test_event_2"},
"action": {
"service": "test.automation",
"data": {
"value": "{{ test_var }}",
"event_type": "{{ event_type }}",
},
},
},
]
},
)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["value"] == "defined_in_config"
assert calls[0].data["event_type"] == "test_event"
hass.bus.async_fire("test_event_2")
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["value"] == "overridden_in_config"
assert calls[1].data["event_type"] == "test_event_2"
assert "Error rendering variables" not in caplog.text
async def test_automation_bad_trigger_variables(hass, caplog):
"""Test automation trigger variables accessing hass is rejected."""
calls = async_mock_service(hass, "test", "automation")
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger_variables": {
"test_var": "{{ states('foo.bar') }}",
},
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {
"service": "test.automation",
},
},
]
},
)
hass.bus.async_fire("test_event")
assert "Use of 'states' is not supported in limited templates" in caplog.text
await hass.async_block_till_done()
assert len(calls) == 0
async def test_blueprint_automation(hass, calls):
"""Test blueprint automation."""
assert await async_setup_component(
hass,
"automation",
{
"automation": {
"use_blueprint": {
"path": "test_event_service.yaml",
"input": {
"trigger_event": "blueprint_event",
"service_to_call": "test.automation",
},
}
}
},
)
hass.bus.async_fire("blueprint_event")
await hass.async_block_till_done()
assert len(calls) == 1
assert automation.entities_in_automation(hass, "automation.automation_0") == [
"light.kitchen"
]
async def test_trigger_service(hass, calls):
"""Test the automation trigger service."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {
"service": "test.automation",
"data_template": {"trigger": "{{ trigger }}"},
},
}
},
)
context = Context()
await hass.services.async_call(
"automation",
"trigger",
{"entity_id": "automation.hello"},
blocking=True,
context=context,
)
assert len(calls) == 1
assert calls[0].data.get("trigger") == {"platform": None}
assert calls[0].context.parent_id is context.id
| |
# coding: utf-8
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1Role(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
operations = [
{
'class': 'OapiV1',
'type': 'create',
'method': 'create_namespaced_role',
'namespaced': True
},
{
'class': 'OapiV1',
'type': 'create',
'method': 'create_role',
'namespaced': False
},
{
'class': 'OapiV1',
'type': 'update',
'method': 'replace_namespaced_role',
'namespaced': True
},
{
'class': 'OapiV1',
'type': 'delete',
'method': 'delete_namespaced_role',
'namespaced': True
},
{
'class': 'OapiV1',
'type': 'read',
'method': 'get_namespaced_role',
'namespaced': True
},
]
# The key is attribute name
# and the value is attribute type.
swagger_types = {
'kind': 'str',
'api_version': 'str',
'metadata': 'V1ObjectMeta',
'rules': 'list[V1PolicyRule]'
}
# The key is attribute name
# and the value is json key in definition.
attribute_map = {
'kind': 'kind',
'api_version': 'apiVersion',
'metadata': 'metadata',
'rules': 'rules'
}
def __init__(self, kind=None, api_version=None, metadata=None, rules=None):
"""
V1Role - a model defined in Swagger
"""
self._kind = kind
self._api_version = api_version
self._metadata = metadata
self._rules = rules
@property
def kind(self):
"""
Gets the kind of this V1Role.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds
:return: The kind of this V1Role.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1Role.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1Role.
:type: str
"""
self._kind = kind
@property
def api_version(self):
"""
Gets the api_version of this V1Role.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#resources
:return: The api_version of this V1Role.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1Role.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#resources
:param api_version: The api_version of this V1Role.
:type: str
"""
self._api_version = api_version
@property
def metadata(self):
"""
Gets the metadata of this V1Role.
Standard object's metadata.
:return: The metadata of this V1Role.
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1Role.
Standard object's metadata.
:param metadata: The metadata of this V1Role.
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def rules(self):
"""
Gets the rules of this V1Role.
Rules holds all the PolicyRules for this Role
:return: The rules of this V1Role.
:rtype: list[V1PolicyRule]
"""
return self._rules
@rules.setter
def rules(self, rules):
"""
Sets the rules of this V1Role.
Rules holds all the PolicyRules for this Role
:param rules: The rules of this V1Role.
:type: list[V1PolicyRule]
"""
self._rules = rules
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(V1Role.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| |
# --input_dir=/home/torch/cleverhans/toolkit/dataset/images --output_dir=output --max_epsilon=16
from os.path import join
import numpy as np
from os import listdir
import argparse
from time import time
from keras.layers import Input, Dense, Reshape, Lambda, Add, Activation
from keras.models import Model
import keras.backend as K
from keras.constraints import Constraint
from keras.optimizers import Adam
from keras.utils import to_categorical
from keras.callbacks import LambdaCallback, Callback
from keras.models import load_model
from PIL import Image
import json
import tensorflow as tf
NUM_CLASSES = 1001
DATA_SIZE = 120
BATCH_SIZE = 12
GOOD_ACC = 0.96
LR = 0.2
MIRROR = True
TARGET_TIME = 4.5 # sec/image
MIN_EPOCH = 1
MAX_EPOCH = 5
WARM_UP = 5 # num epochs before start calibrating
# INCEPTION_PATH = "inception_v3_weights_tf_dim_ordering_tf_kernels.h5"
INCEPTION_PATH = "inception_v3_adv_from_tf.h5"
SIDE = 299
SHAPE = (3, SIDE, SIDE) if K.image_data_format() == "channels_first" else (SIDE, SIDE, 3)
VERBOSE = True
def preprocess_input(x):
x /= 255.
x -= 0.5
x *= 2.
return x
def show_weights(w):
a = np.clip((w.reshape(SHAPE) / 2 + 0.5) * 255, 0, 255)
img = Image.fromarray(np.array(a, dtype=np.uint8))
img.show()
def load_images(input_dir, batch_size):
images, batch_names = [], []
file_names = [filename for filename in listdir(input_dir) if filename.endswith(".png")]
for f in file_names:
img = Image.open(join(input_dir, f))
img = img.resize((SIDE, SIDE))
images.append(np.array(img, dtype=K.floatx()))
batch_names.append(f)
if len(batch_names) == batch_size:
batch = np.stack(images)
batch = preprocess_input(batch)
if K.image_data_format() == "channels_first":
batch = np.rollaxis(batch, 3, 1)
yield batch, batch_names
images, batch_names = [], []
if len(batch_names) > 0:
batch = np.stack(images)
batch = preprocess_input(batch)
if K.image_data_format() == "channels_first":
batch = np.rollaxis(batch, 3, 1)
yield batch, batch_names
class TerminateOnGoodAccuracy(Callback):
"""Callback that terminates training when a good accuracy is reached."""
def __init__(self, limit_acc=0.9):
self.limit_acc = limit_acc
super(TerminateOnGoodAccuracy, self).__init__()
def on_batch_end(self, batch, logs=None):
logs = logs or {}
acc = logs.get("acc")
if acc is not None:
if acc >= self.limit_acc:
print("Batch %d: accuracy %0.2f reached, terminating training" % (batch, acc))
self.model.stop_training = True
batch_print_callback = LambdaCallback(
on_batch_end=lambda batch, logs: print(batch, logs["loss"], logs["acc"]))
def process_batch(model, picture_layer, adversarial_layer, base_model, batch, seen, matched, ts, nb_epoch):
adversarials = []
for d in batch:
pred = base_model.predict(d[None, ...])[0]
actual = pred.argsort()[-1]
target = pred.argsort()[-2]
if NUM_CLASSES == 1001:
actual_imnet = actual - 1
target_imnet = target - 1
else:
actual_imnet = actual
target_imnet = target
if VERBOSE:
print("Actual {}, label: {} | Target {}, label: {}".format(actual, IMAGENET_LABELS[actual_imnet],
target, IMAGENET_LABELS[target_imnet]))
dummy = np.ones((DATA_SIZE, 1))
target = [target] * DATA_SIZE
target_cat = to_categorical(target, NUM_CLASSES)
picture_layer.set_weights([d.reshape((1, -1))])
adversarial_layer.set_weights([np.zeros((1, SIDE * SIDE * 3))])
model.fit(x=dummy, y=target_cat, epochs=nb_epoch,
batch_size=BATCH_SIZE,
callbacks=[TerminateOnGoodAccuracy(GOOD_ACC)],
verbose=0,
)
w = model.get_layer("adversarial").get_weights()[0]
adversarials.append(d + w.reshape(SHAPE))
seen += 1
te = time()
elapsed_time = (te - ts)
average_time = elapsed_time / seen
print("Runtime: {} sec/image. {} images seen.".format(average_time, seen))
if seen > WARM_UP:
if average_time > TARGET_TIME:
nb_epoch = max(MIN_EPOCH, nb_epoch - 1)
print("Decrease nb_epoch to", nb_epoch)
if average_time < TARGET_TIME:
nb_epoch = min(MAX_EPOCH, nb_epoch + 1)
print("Increase nb_epoch to", nb_epoch)
if VERBOSE:
diff = (adversarials[-1] - d) / 2 * 255
print("min {}, max {}".format(diff.min(), diff.max()))
pred = model.predict(dummy[:1])[0]
topk = pred.argsort()[-10:][::-1]
if actual != topk[0]:
matched += 1
print("Running accuracy {:0.3f}. Target {}, Predicted {}".format(matched / seen, target[0], topk[0]))
if NUM_CLASSES == 1001:
print([IMAGENET_LABELS[i][:15] for i in topk - 1])
else:
print([IMAGENET_LABELS[i][:15] for i in topk])
return adversarials, seen, matched, nb_epoch
def save_images(adversarials, batch_names, output_dir):
for adversarial, filename in zip(adversarials, batch_names):
a = np.clip((adversarial.reshape(SHAPE) + 1.0) / 2 * 255, 0, 255)
img = Image.fromarray(a.astype(np.uint8))
img.save(join(output_dir, filename), format="PNG")
def rotation_layer(rotation_range, mirror=MIRROR):
def rotation(x):
if mirror:
r = x[:, :, :, ::-1] if K.image_data_format() == "channels_first" else x[:, :, ::-1, :]
x = K.switch(K.greater_equal(K.random_uniform((1,), 0, 1)[0], 0.5), lambda: r, x)
ang = np.pi / 180 * K.random_uniform(K.shape(x)[:1], -rotation_range, rotation_range)
x = tf.contrib.image.rotate(x, ang)
return x
def rotation_output_shape(input_shape):
return input_shape
return Lambda(rotation, output_shape=rotation_output_shape)
class MinMax(Constraint):
"""Constrains the weights to be between a lower bound and an upper bound.
"""
def __init__(self, eps_value=0.0):
self.eps_value = eps_value
def __call__(self, w):
return K.clip(w, -self.eps_value, self.eps_value)
def clip(x):
return K.clip(x, -1, 1)
def create_model(epsilon, rotation_range):
eps_ = epsilon / 255. * 2.
base_model = load_model(INCEPTION_PATH)
base_model.trainable = False
dummy_input = Input(shape=(1,))
x0 = Dense(3 * SIDE * SIDE, use_bias=False,
kernel_initializer="zero",
kernel_constraint=MinMax(eps_),
name="adversarial")(dummy_input)
x1 = Dense(3 * SIDE * SIDE, use_bias=False,
kernel_initializer="zero",
trainable=False,
name="picture")(dummy_input)
x = Add()([x0, x1])
x = Reshape(SHAPE)(x)
x = rotation_layer(rotation_range)(x)
x = Activation(clip)(x)
model_output = base_model(x)
model = Model(inputs=dummy_input, outputs=model_output)
optimizer = Adam(lr=LR)
model.compile(optimizer=optimizer, loss="categorical_crossentropy", metrics=["accuracy"])
picture_layer = model.get_layer("picture")
adversarial_layer = model.get_layer("adversarial")
return model, picture_layer, adversarial_layer, base_model
if __name__ == "__main__":
ts = time()
parser = argparse.ArgumentParser()
parser.add_argument("--input_dir",
required=True,
default="input_dir")
parser.add_argument("--output_dir",
required=True,
default="output_dir")
parser.add_argument("--max_epsilon",
required=True,
type=int,
default=10)
args = parser.parse_args()
INPUT_DIRECTORY = args.input_dir
OUTPUT_DIRECTORY = args.output_dir
MAX_EPSILON = args.max_epsilon
with open("imagenet.json") as f:
IMAGENET_LABELS = json.load(f)
rotation_range = [15, 15, 25, 25][max(np.digitize([MAX_EPSILON], [4, 8, 12, 16])[0] - 1, 0)]
print("Epsilon {}. Rotation range {}. Mirror {}".format(MAX_EPSILON, rotation_range, MIRROR))
model, picture_layer, adversarial_layer, base_model = create_model(MAX_EPSILON, rotation_range)
seen, matched = 0, 0
nb_epoch = MAX_EPOCH
for batch, batch_names in load_images(INPUT_DIRECTORY, 30):
adversarials, seen, matched, nb_epoch = process_batch(model, picture_layer, adversarial_layer,
base_model, batch, seen, matched, ts, nb_epoch)
save_images(adversarials, batch_names, OUTPUT_DIRECTORY)
te = time()
elapsed_time = (te - ts)
print("Total time: {} sec. {} images seen.".format(elapsed_time, seen))
| |
#!/usr/bin/env python
#
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provisions Android devices with settings required for bots.
Usage:
./provision_devices.py [-d <device serial number>]
"""
import logging
import optparse
import os
import re
import subprocess
import sys
import time
from pylib import android_commands
from pylib import constants
from pylib import device_settings
from pylib.device import device_blacklist
from pylib.device import device_errors
from pylib.device import device_utils
sys.path.append(os.path.join(constants.DIR_SOURCE_ROOT,
'third_party', 'android_testrunner'))
import errors
def KillHostHeartbeat():
ps = subprocess.Popen(['ps', 'aux'], stdout = subprocess.PIPE)
stdout, _ = ps.communicate()
matches = re.findall('\\n.*host_heartbeat.*', stdout)
for match in matches:
print 'An instance of host heart beart running... will kill'
pid = re.findall('(\d+)', match)[0]
subprocess.call(['kill', str(pid)])
def LaunchHostHeartbeat():
# Kill if existing host_heartbeat
KillHostHeartbeat()
# Launch a new host_heartbeat
print 'Spawning host heartbeat...'
subprocess.Popen([os.path.join(constants.DIR_SOURCE_ROOT,
'build/android/host_heartbeat.py')])
def PushAndLaunchAdbReboot(devices, target):
"""Pushes and launches the adb_reboot binary on the device.
Arguments:
devices: The list of serial numbers of the device to which the
adb_reboot binary should be pushed.
target : The build target (example, Debug or Release) which helps in
locating the adb_reboot binary.
"""
for device_serial in devices:
print 'Will push and launch adb_reboot on %s' % device_serial
device = device_utils.DeviceUtils(device_serial)
# Kill if adb_reboot is already running.
try:
# Don't try to kill adb_reboot more than once. We don't expect it to be
# running at all.
device.KillAll('adb_reboot', blocking=True, timeout=2, retries=0)
except device_errors.CommandFailedError:
# We can safely ignore the exception because we don't expect adb_reboot
# to be running.
pass
# Push adb_reboot
print ' Pushing adb_reboot ...'
adb_reboot = os.path.join(constants.DIR_SOURCE_ROOT,
'out/%s/adb_reboot' % target)
device.PushChangedFiles(adb_reboot, '/data/local/tmp/')
# Launch adb_reboot
print ' Launching adb_reboot ...'
device.old_interface.GetAndroidToolStatusAndOutput(
'/data/local/tmp/adb_reboot')
LaunchHostHeartbeat()
def _ConfigureLocalProperties(device, is_perf):
"""Set standard readonly testing device properties prior to reboot."""
local_props = [
'ro.monkey=1',
'ro.test_harness=1',
'ro.audio.silent=1',
'ro.setupwizard.mode=DISABLED',
]
if not is_perf:
local_props.append('%s=all' % android_commands.JAVA_ASSERT_PROPERTY)
local_props.append('debug.checkjni=1')
try:
device.WriteFile(
constants.DEVICE_LOCAL_PROPERTIES_PATH,
'\n'.join(local_props), as_root=True)
# Android will not respect the local props file if it is world writable.
device.RunShellCommand(
'chmod 644 %s' % constants.DEVICE_LOCAL_PROPERTIES_PATH,
as_root=True)
except device_errors.CommandFailedError as e:
logging.warning(str(e))
# LOCAL_PROPERTIES_PATH = '/data/local.prop'
def WipeDeviceData(device):
"""Wipes data from device, keeping only the adb_keys for authorization.
After wiping data on a device that has been authorized, adb can still
communicate with the device, but after reboot the device will need to be
re-authorized because the adb keys file is stored in /data/misc/adb/.
Thus, adb_keys file is rewritten so the device does not need to be
re-authorized.
Arguments:
device: the device to wipe
"""
device_authorized = device.FileExists(constants.ADB_KEYS_FILE)
if device_authorized:
adb_keys = device.RunShellCommand('cat %s' % constants.ADB_KEYS_FILE,
as_root=True)
device.RunShellCommand('wipe data', as_root=True)
if device_authorized:
path_list = constants.ADB_KEYS_FILE.split('/')
dir_path = '/'.join(path_list[:len(path_list)-1])
device.RunShellCommand('mkdir -p %s' % dir_path, as_root=True)
device.RunShellCommand('restorecon %s' % dir_path, as_root=True)
device.RunShellCommand('echo %s > %s' %
(adb_keys[0], constants.ADB_KEYS_FILE), as_root=True)
for adb_key in adb_keys[1:]:
device.RunShellCommand(
'echo %s >> %s' % (adb_key, constants.ADB_KEYS_FILE), as_root=True)
device.RunShellCommand('restorecon %s' % constants.ADB_KEYS_FILE,
as_root=True)
def WipeDevicesIfPossible(devices):
devices_to_reboot = []
for device_serial in devices:
device = device_utils.DeviceUtils(device_serial)
if not device.old_interface.EnableAdbRoot():
continue
WipeDeviceData(device)
devices_to_reboot.append(device)
if devices_to_reboot:
try:
device_utils.DeviceUtils.parallel(devices_to_reboot).Reboot(True)
except errors.DeviceUnresponsiveError:
pass
for device_serial in devices_to_reboot:
device.WaitUntilFullyBooted(timeout=90)
def ProvisionDevice(device_serial, is_perf, disable_location):
device = device_utils.DeviceUtils(device_serial)
device.old_interface.EnableAdbRoot()
_ConfigureLocalProperties(device, is_perf)
device_settings_map = device_settings.DETERMINISTIC_DEVICE_SETTINGS
if disable_location:
device_settings_map.update(device_settings.DISABLE_LOCATION_SETTING)
else:
device_settings_map.update(device_settings.ENABLE_LOCATION_SETTING)
device_settings.ConfigureContentSettingsDict(device, device_settings_map)
device_settings.SetLockScreenSettings(device)
if is_perf:
# TODO(tonyg): We eventually want network on. However, currently radios
# can cause perfbots to drain faster than they charge.
device_settings.ConfigureContentSettingsDict(
device, device_settings.NETWORK_DISABLED_SETTINGS)
# Some perf bots run benchmarks with USB charging disabled which leads
# to gradual draining of the battery. We must wait for a full charge
# before starting a run in order to keep the devices online.
try:
battery_info = device.old_interface.GetBatteryInfo()
except Exception as e:
battery_info = {}
logging.error('Unable to obtain battery info for %s, %s',
device_serial, e)
while int(battery_info.get('level', 100)) < 95:
if not device.old_interface.IsDeviceCharging():
if device.old_interface.CanControlUsbCharging():
device.old_interface.EnableUsbCharging()
else:
logging.error('Device is not charging')
break
logging.info('Waiting for device to charge. Current level=%s',
battery_info.get('level', 0))
time.sleep(60)
battery_info = device.old_interface.GetBatteryInfo()
device.RunShellCommand('date -u %f' % time.time(), as_root=True)
def ProvisionDevices(options):
is_perf = 'perf' in os.environ.get('BUILDBOT_BUILDERNAME', '').lower()
# TODO(jbudorick): Parallelize provisioning of all attached devices after
# switching from AndroidCommands.
if options.device is not None:
devices = [options.device]
else:
devices = android_commands.GetAttachedDevices()
# Wipe devices (unless --skip-wipe was specified)
if not options.skip_wipe:
WipeDevicesIfPossible(devices)
bad_devices = []
# Provision devices
for device_serial in devices:
try:
ProvisionDevice(device_serial, is_perf, options.disable_location)
except errors.WaitForResponseTimedOutError:
logging.info('Timed out waiting for device %s. Adding to blacklist.',
device_serial)
bad_devices.append(device_serial)
# Device black list is reset by bb_device_status_check.py per build.
device_blacklist.ExtendBlacklist([device_serial])
devices = [device for device in devices if device not in bad_devices]
# If there are no good devices
if not devices:
raise device_errors.NoDevicesError
try:
device_utils.DeviceUtils.parallel(devices).Reboot(True)
except errors.DeviceUnresponsiveError:
pass
bad_devices = []
for device_serial in devices:
device = device_utils.DeviceUtils(device_serial)
try:
device.WaitUntilFullyBooted(timeout=90)
(_, prop) = device.old_interface.GetShellCommandStatusAndOutput('getprop')
for p in prop:
print p
except errors.WaitForResponseTimedOutError:
logging.info('Timed out waiting for device %s. Adding to blacklist.',
device_serial)
bad_devices.append(device_serial)
# Device black list is reset by bb_device_status_check.py per build.
device_blacklist.ExtendBlacklist([device_serial])
devices = [device for device in devices if device not in bad_devices]
# If there are no good devices
if not devices:
raise device_errors.NoDevicesError
if options.auto_reconnect:
PushAndLaunchAdbReboot(devices, options.target)
def main(argv):
logging.basicConfig(level=logging.INFO)
parser = optparse.OptionParser()
parser.add_option('--skip-wipe', action='store_true', default=False,
help="Don't wipe device data during provisioning.")
parser.add_option('--disable-location', action='store_true', default=False,
help="Disallow Google location services on devices.")
parser.add_option('-d', '--device',
help='The serial number of the device to be provisioned')
parser.add_option('-t', '--target', default='Debug', help='The build target')
parser.add_option(
'-r', '--auto-reconnect', action='store_true',
help='Push binary which will reboot the device on adb disconnections.')
options, args = parser.parse_args(argv[1:])
constants.SetBuildType(options.target)
if args:
print >> sys.stderr, 'Unused args %s' % args
return 1
ProvisionDevices(options)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic Node base class for all workers that run on hosts."""
import inspect
import os
import random
from oslo_config import cfg
from oslo_log import log
import oslo_messaging as messaging
from oslo_service import loopingcall
from oslo_service import service
from oslo_utils import importutils
from manila import context
from manila import db
from manila import exception
from manila.i18n import _LE
from manila.i18n import _LI
from manila.i18n import _LW
from manila import rpc
from manila import version
from manila import wsgi
LOG = log.getLogger(__name__)
service_opts = [
cfg.IntOpt('report_interval',
default=10,
help='Seconds between nodes reporting state to datastore.'),
cfg.IntOpt('periodic_interval',
default=60,
help='Seconds between running periodic tasks.'),
cfg.IntOpt('periodic_fuzzy_delay',
default=60,
help='Range of seconds to randomly delay when starting the '
'periodic task scheduler to reduce stampeding. '
'(Disable by setting to 0)'),
cfg.StrOpt('osapi_share_listen',
default="::",
help='IP address for OpenStack Share API to listen on.'),
cfg.IntOpt('osapi_share_listen_port',
default=8786,
help='Port for OpenStack Share API to listen on.'),
cfg.IntOpt('osapi_share_workers',
default=1,
help='Number of workers for OpenStack Share API service.'),
]
CONF = cfg.CONF
CONF.register_opts(service_opts)
class Service(service.Service):
"""Service object for binaries running on hosts.
A service takes a manager and enables rpc by listening to queues based
on topic. It also periodically runs tasks on the manager and reports
it state to the database services table.
"""
def __init__(self, host, binary, topic, manager, report_interval=None,
periodic_interval=None, periodic_fuzzy_delay=None,
service_name=None, *args, **kwargs):
super(Service, self).__init__()
if not rpc.initialized():
rpc.init(CONF)
self.host = host
self.binary = binary
self.topic = topic
self.manager_class_name = manager
manager_class = importutils.import_class(self.manager_class_name)
self.manager = manager_class(host=self.host,
service_name=service_name,
*args, **kwargs)
self.report_interval = report_interval
self.periodic_interval = periodic_interval
self.periodic_fuzzy_delay = periodic_fuzzy_delay
self.saved_args, self.saved_kwargs = args, kwargs
self.timers = []
def start(self):
version_string = version.version_string()
LOG.info(_LI('Starting %(topic)s node (version %(version_string)s)'),
{'topic': self.topic, 'version_string': version_string})
self.model_disconnected = False
ctxt = context.get_admin_context()
try:
service_ref = db.service_get_by_args(ctxt,
self.host,
self.binary)
self.service_id = service_ref['id']
except exception.NotFound:
self._create_service_ref(ctxt)
LOG.debug("Creating RPC server for service %s.", self.topic)
target = messaging.Target(topic=self.topic, server=self.host)
endpoints = [self.manager]
endpoints.extend(self.manager.additional_endpoints)
self.rpcserver = rpc.get_server(target, endpoints)
self.rpcserver.start()
self.manager.init_host()
if self.report_interval:
pulse = loopingcall.FixedIntervalLoopingCall(self.report_state)
pulse.start(interval=self.report_interval,
initial_delay=self.report_interval)
self.timers.append(pulse)
if self.periodic_interval:
if self.periodic_fuzzy_delay:
initial_delay = random.randint(0, self.periodic_fuzzy_delay)
else:
initial_delay = None
periodic = loopingcall.FixedIntervalLoopingCall(
self.periodic_tasks)
periodic.start(interval=self.periodic_interval,
initial_delay=initial_delay)
self.timers.append(periodic)
def _create_service_ref(self, context):
zone = CONF.storage_availability_zone
service_ref = db.service_create(context,
{'host': self.host,
'binary': self.binary,
'topic': self.topic,
'report_count': 0,
'availability_zone': zone})
self.service_id = service_ref['id']
def __getattr__(self, key):
manager = self.__dict__.get('manager', None)
return getattr(manager, key)
@classmethod
def create(cls, host=None, binary=None, topic=None, manager=None,
report_interval=None, periodic_interval=None,
periodic_fuzzy_delay=None, service_name=None):
"""Instantiates class and passes back application object.
:param host: defaults to CONF.host
:param binary: defaults to basename of executable
:param topic: defaults to bin_name - 'manila-' part
:param manager: defaults to CONF.<topic>_manager
:param report_interval: defaults to CONF.report_interval
:param periodic_interval: defaults to CONF.periodic_interval
:param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay
"""
if not host:
host = CONF.host
if not binary:
binary = os.path.basename(inspect.stack()[-1][1])
if not topic:
topic = binary
if not manager:
subtopic = topic.rpartition('manila-')[2]
manager = CONF.get('%s_manager' % subtopic, None)
if report_interval is None:
report_interval = CONF.report_interval
if periodic_interval is None:
periodic_interval = CONF.periodic_interval
if periodic_fuzzy_delay is None:
periodic_fuzzy_delay = CONF.periodic_fuzzy_delay
service_obj = cls(host, binary, topic, manager,
report_interval=report_interval,
periodic_interval=periodic_interval,
periodic_fuzzy_delay=periodic_fuzzy_delay,
service_name=service_name)
return service_obj
def kill(self):
"""Destroy the service object in the datastore."""
self.stop()
try:
db.service_destroy(context.get_admin_context(), self.service_id)
except exception.NotFound:
LOG.warn(_LW('Service killed that has no database entry'))
def stop(self):
# Try to shut the connection down, but if we get any sort of
# errors, go ahead and ignore them.. as we're shutting down anyway
try:
self.rpcserver.stop()
except Exception:
pass
for x in self.timers:
try:
x.stop()
except Exception:
pass
self.timers = []
super(Service, self).stop()
def wait(self):
for x in self.timers:
try:
x.wait()
except Exception:
pass
def periodic_tasks(self, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
ctxt = context.get_admin_context()
self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)
def report_state(self):
"""Update the state of this service in the datastore."""
ctxt = context.get_admin_context()
zone = CONF.storage_availability_zone
state_catalog = {}
try:
try:
service_ref = db.service_get(ctxt, self.service_id)
except exception.NotFound:
LOG.debug('The service database object disappeared, '
'Recreating it.')
self._create_service_ref(ctxt)
service_ref = db.service_get(ctxt, self.service_id)
state_catalog['report_count'] = service_ref['report_count'] + 1
if zone != service_ref['availability_zone']:
state_catalog['availability_zone'] = zone
db.service_update(ctxt,
self.service_id, state_catalog)
# TODO(termie): make this pattern be more elegant.
if getattr(self, 'model_disconnected', False):
self.model_disconnected = False
LOG.error(_LE('Recovered model server connection!'))
# TODO(vish): this should probably only catch connection errors
except Exception: # pylint: disable=W0702
if not getattr(self, 'model_disconnected', False):
self.model_disconnected = True
LOG.exception(_LE('model server went away'))
class WSGIService(service.ServiceBase):
"""Provides ability to launch API from a 'paste' configuration."""
def __init__(self, name, loader=None):
"""Initialize, but do not start the WSGI server.
:param name: The name of the WSGI server given to the loader.
:param loader: Loads the WSGI application using the given name.
:returns: None
"""
self.name = name
self.manager = self._get_manager()
self.loader = loader or wsgi.Loader()
if not rpc.initialized():
rpc.init(CONF)
self.app = self.loader.load_app(name)
self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0")
self.port = getattr(CONF, '%s_listen_port' % name, 0)
self.workers = getattr(CONF, '%s_workers' % name, None)
if self.workers is not None and self.workers < 1:
LOG.warn(
_LW("Value of config option %(name)s_workers must be integer "
"greater than 1. Input value ignored.") % {'name': name})
# Reset workers to default
self.workers = None
self.server = wsgi.Server(name,
self.app,
host=self.host,
port=self.port)
def _get_manager(self):
"""Initialize a Manager object appropriate for this service.
Use the service name to look up a Manager subclass from the
configuration and initialize an instance. If no class name
is configured, just return None.
:returns: a Manager instance, or None.
"""
fl = '%s_manager' % self.name
if fl not in CONF:
return None
manager_class_name = CONF.get(fl, None)
if not manager_class_name:
return None
manager_class = importutils.import_class(manager_class_name)
return manager_class()
def start(self):
"""Start serving this service using loaded configuration.
Also, retrieve updated port number in case '0' was passed in, which
indicates a random port should be used.
:returns: None
"""
if self.manager:
self.manager.init_host()
self.server.start()
self.port = self.server.port
def stop(self):
"""Stop serving this API.
:returns: None
"""
self.server.stop()
def wait(self):
"""Wait for the service to stop serving this API.
:returns: None
"""
self.server.wait()
def reset(self):
"""Reset server greenpool size to default.
:returns: None
"""
self.server.reset()
def process_launcher():
return service.ProcessLauncher(CONF)
# NOTE(vish): the global launcher is to maintain the existing
# functionality of calling service.serve +
# service.wait
_launcher = None
def serve(server, workers=None):
global _launcher
if _launcher:
raise RuntimeError(_('serve() can only be called once'))
_launcher = service.launch(CONF, server, workers=workers)
def wait():
LOG.debug('Full set of CONF:')
for flag in CONF:
flag_get = CONF.get(flag, None)
# hide flag contents from log if contains a password
# should use secret flag when switch over to openstack-common
if ("_password" in flag or "_key" in flag or
(flag == "sql_connection" and "mysql:" in flag_get)):
LOG.debug('%(flag)s : FLAG SET ', {"flag": flag})
else:
LOG.debug('%(flag)s : %(flag_get)s',
{"flag": flag, "flag_get": flag_get})
try:
_launcher.wait()
except KeyboardInterrupt:
_launcher.stop()
rpc.cleanup()
| |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# CREATED:2015-02-14 19:13:49 by Brian McFee <brian.mcfee@nyu.edu>
'''Unit tests for time and frequency conversion'''
import os
try:
os.environ.pop('LIBROSA_CACHE_DIR')
except KeyError:
pass
import matplotlib
matplotlib.use('Agg')
import librosa
import numpy as np
from nose.tools import raises, eq_
def test_frames_to_samples():
def __test(x, y, hop_length, n_fft):
y_test = librosa.frames_to_samples(x,
hop_length=hop_length,
n_fft=n_fft)
assert np.allclose(y, y_test)
x = np.arange(2)
for hop_length in [512, 1024]:
for n_fft in [None, 1024]:
y = x * hop_length
if n_fft is not None:
y += n_fft // 2
yield __test, x, y, hop_length, n_fft
def test_samples_to_frames():
def __test(x, y, hop_length, n_fft):
y_test = librosa.samples_to_frames(x,
hop_length=hop_length,
n_fft=n_fft)
assert np.allclose(y, y_test)
x = np.arange(2)
for hop_length in [512, 1024]:
for n_fft in [None, 1024]:
y = x * hop_length
if n_fft is not None:
y += n_fft // 2
yield __test, y, x, hop_length, n_fft
def test_frames_to_time():
def __test(sr, hop_length, n_fft):
# Generate frames at times 0s, 1s, 2s
frames = np.arange(3) * sr // hop_length
if n_fft:
frames -= n_fft // (2 * hop_length)
times = librosa.frames_to_time(frames,
sr=sr,
hop_length=hop_length,
n_fft=n_fft)
# we need to be within one frame
assert np.all(np.abs(times - np.asarray([0, 1, 2])) * sr
< hop_length)
for sr in [22050, 44100]:
for hop_length in [256, 512]:
for n_fft in [None, 2048]:
yield __test, sr, hop_length, n_fft
def test_time_to_samples():
def __test(sr):
assert np.allclose(librosa.time_to_samples([0, 1, 2], sr=sr),
[0, sr, 2 * sr])
for sr in [22050, 44100]:
yield __test, sr
def test_samples_to_time():
def __test(sr):
assert np.allclose(librosa.samples_to_time([0, sr, 2 * sr], sr=sr),
[0, 1, 2])
for sr in [22050, 44100]:
yield __test, sr
def test_time_to_frames():
def __test(sr, hop_length, n_fft):
# Generate frames at times 0s, 1s, 2s
times = np.arange(3)
frames = librosa.time_to_frames(times,
sr=sr,
hop_length=hop_length,
n_fft=n_fft)
if n_fft:
frames -= n_fft // (2 * hop_length)
# we need to be within one frame
assert np.all(np.abs(times - np.asarray([0, 1, 2])) * sr
< hop_length)
for sr in [22050, 44100]:
for hop_length in [256, 512]:
for n_fft in [None, 2048]:
yield __test, sr, hop_length, n_fft
def test_octs_to_hz():
def __test(a440):
freq = np.asarray([55, 110, 220, 440]) * (float(a440) / 440.0)
freq_out = librosa.octs_to_hz([1, 2, 3, 4], A440=a440)
assert np.allclose(freq, freq_out)
for a440 in [415, 430, 435, 440, 466]:
yield __test, a440
def test_hz_to_octs():
def __test(a440):
freq = np.asarray([55, 110, 220, 440]) * (float(a440) / 440.0)
octs = [1, 2, 3, 4]
oct_out = librosa.hz_to_octs(freq, A440=a440)
assert np.allclose(octs, oct_out)
for a440 in [415, 430, 435, 440, 466]:
yield __test, a440
def test_note_to_midi():
def __test(tuning, accidental, octave, round_midi):
note = 'C{:s}'.format(accidental)
if octave is not None:
note = '{:s}{:d}'.format(note, octave)
else:
octave = 0
if tuning is not None:
note = '{:s}{:+d}'.format(note, tuning)
else:
tuning = 0
midi_true = 12 * (octave + 1) + tuning * 0.01
if accidental == '#':
midi_true += 1
elif accidental in list('b!'):
midi_true -= 1
midi = librosa.note_to_midi(note, round_midi=round_midi)
if round_midi:
midi_true = np.round(midi_true)
eq_(midi, midi_true)
midi = librosa.note_to_midi([note], round_midi=round_midi)
eq_(midi[0], midi_true)
@raises(librosa.ParameterError)
def __test_fail():
librosa.note_to_midi('does not pass')
for tuning in [None, -25, 0, 25]:
for octave in [None, 1, 2, 3]:
if octave is None and tuning is not None:
continue
for accidental in ['', '#', 'b', '!']:
for round_midi in [False, True]:
yield __test, tuning, accidental, octave, round_midi
yield __test_fail
def test_note_to_hz():
def __test(tuning, accidental, octave, round_midi):
note = 'A{:s}'.format(accidental)
if octave is not None:
note = '{:s}{:d}'.format(note, octave)
else:
octave = 0
if tuning is not None:
note = '{:s}{:+d}'.format(note, tuning)
else:
tuning = 0
if round_midi:
tuning = np.around(tuning, -2)
hz_true = 440.0 * (2.0**(tuning * 0.01 / 12)) * (2.0**(octave - 4))
if accidental == '#':
hz_true *= 2.0**(1./12)
elif accidental in list('b!'):
hz_true /= 2.0**(1./12)
hz = librosa.note_to_hz(note, round_midi=round_midi)
assert np.allclose(hz[0], hz_true)
@raises(librosa.ParameterError)
def __test_fail():
librosa.note_to_midi('does not pass')
for tuning in [None, -25, 0, 25]:
for octave in [None, 1, 2, 3]:
if octave is None and tuning is not None:
continue
for accidental in ['', '#', 'b', '!']:
for round_midi in [False, True]:
yield __test, tuning, accidental, octave, round_midi
yield __test_fail
def test_midi_to_note():
def __test(midi_num, note, octave, cents):
note_out = librosa.midi_to_note(midi_num, octave=octave, cents=cents)
eq_(note_out, note)
midi_num = 24.25
yield __test, midi_num, 'C', False, False
yield __test, midi_num, 'C1', True, False
yield raises(librosa.ParameterError)(__test), midi_num, 'C+25', False, True
yield __test, midi_num, 'C1+25', True, True
yield __test, [midi_num], ['C'], False, False
def test_midi_to_hz():
assert np.allclose(librosa.midi_to_hz([33, 45, 57, 69]),
[55, 110, 220, 440])
def test_hz_to_midi():
assert np.allclose(librosa.hz_to_midi([55, 110, 220, 440]),
[33, 45, 57, 69])
def test_hz_to_note():
def __test(hz, note, octave, cents):
note_out = librosa.hz_to_note(hz, octave=octave, cents=cents)
eq_(list(note_out), list([note]))
hz = 440
yield __test, hz, 'A', False, False
yield __test, hz, 'A4', True, False
yield raises(librosa.ParameterError)(__test), hz, 'A+0', False, True
yield __test, hz, 'A4+0', True, True
def test_fft_frequencies():
def __test(sr, n_fft):
freqs = librosa.fft_frequencies(sr=sr, n_fft=n_fft)
# DC
eq_(freqs[0], 0)
# Nyquist, positive here for more convenient display purposes
eq_(freqs[-1], sr / 2.0)
# Ensure that the frequencies increase linearly
dels = np.diff(freqs)
assert np.allclose(dels, dels[0])
for n_fft in [1024, 2048]:
for sr in [8000, 22050]:
yield __test, sr, n_fft
def test_cqt_frequencies():
def __test(n_bins, fmin, bins_per_octave, tuning):
freqs = librosa.cqt_frequencies(n_bins,
fmin,
bins_per_octave=bins_per_octave,
tuning=tuning)
# Make sure we get the right number of bins
eq_(len(freqs), n_bins)
# And that the first bin matches fmin by tuning
assert np.allclose(freqs[0],
fmin * 2.0**(float(tuning) / bins_per_octave))
# And that we have constant Q
Q = np.diff(np.log2(freqs))
assert np.allclose(Q, 1./bins_per_octave)
for n_bins in [12, 24, 36]:
for fmin in [440.0]:
for bins_per_octave in [12, 24, 36]:
for tuning in [-0.25, 0.0, 0.25]:
yield __test, n_bins, fmin, bins_per_octave, tuning
def test_tempo_frequencies():
def __test(n_bins, hop_length, sr):
freqs = librosa.tempo_frequencies(n_bins, hop_length=hop_length, sr=sr)
# Verify the length
eq_(len(freqs), n_bins)
# 0-bin should be infinite
assert not np.isfinite(freqs[0])
# remaining bins should be spaced by 1/hop_length
if n_bins > 1:
invdiff = (freqs[1:]**-1) * (60.0 * sr)
assert np.allclose(invdiff[0], hop_length)
assert np.allclose(np.diff(invdiff), np.asarray(hop_length)), np.diff(invdiff)
for n_bins in [1, 16, 128]:
for hop_length in [256, 512, 1024]:
for sr in [11025, 22050, 44100]:
yield __test, n_bins, hop_length, sr
def test_A_weighting():
def __test(min_db):
# Check that 1KHz is around 0dB
a_khz = librosa.A_weighting(1000.0, min_db=min_db)
assert np.allclose(a_khz, 0, atol=1e-3)
a_range = librosa.A_weighting(np.linspace(2e1, 2e4),
min_db=min_db)
# Check that the db cap works
if min_db is not None:
assert not np.any(a_range < min_db)
for min_db in [None, -40, -80]:
yield __test, min_db
| |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Bitmap is a basic wrapper for image pixels. It includes some basic processing
tools: crop, find bounding box of a color and compute histogram of color values.
"""
import array
import base64
import cStringIO
import struct
import subprocess
from telemetry.core import platform
from telemetry.core import util
from telemetry.image_processing import histogram
from telemetry.image_processing import rgba_color
from telemetry.util import support_binaries
util.AddDirToPythonPath(util.GetTelemetryDir(), 'third_party', 'png')
import png # pylint: disable=F0401
class _BitmapTools(object):
"""Wraps a child process of bitmaptools and allows for one command."""
CROP_PIXELS = 0
HISTOGRAM = 1
BOUNDING_BOX = 2
def __init__(self, dimensions, pixels):
binary = support_binaries.FindPath(
'bitmaptools',
platform.GetHostPlatform().GetArchName(),
platform.GetHostPlatform().GetOSName())
assert binary, 'You must build bitmaptools first!'
self._popen = subprocess.Popen([binary],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# dimensions are: bpp, width, height, boxleft, boxtop, boxwidth, boxheight
packed_dims = struct.pack('iiiiiii', *dimensions)
self._popen.stdin.write(packed_dims)
# If we got a list of ints, we need to convert it into a byte buffer.
if type(pixels) is not bytearray:
pixels = bytearray(pixels)
self._popen.stdin.write(pixels)
def _RunCommand(self, *command):
assert not self._popen.stdin.closed, (
'Exactly one command allowed per instance of tools.')
packed_command = struct.pack('i' * len(command), *command)
self._popen.stdin.write(packed_command)
self._popen.stdin.close()
length_packed = self._popen.stdout.read(struct.calcsize('i'))
if not length_packed:
raise Exception(self._popen.stderr.read())
length = struct.unpack('i', length_packed)[0]
return self._popen.stdout.read(length)
def CropPixels(self):
return self._RunCommand(_BitmapTools.CROP_PIXELS)
def Histogram(self, ignore_color, tolerance):
ignore_color_int = -1 if ignore_color is None else int(ignore_color)
response = self._RunCommand(_BitmapTools.HISTOGRAM,
ignore_color_int, tolerance)
out = array.array('i')
out.fromstring(response)
assert len(out) == 768, (
'The ColorHistogram has the wrong number of buckets: %s' % len(out))
return histogram.ColorHistogram(out[:256], out[256:512], out[512:],
ignore_color)
def BoundingBox(self, color, tolerance):
response = self._RunCommand(_BitmapTools.BOUNDING_BOX, int(color),
tolerance)
unpacked = struct.unpack('iiiii', response)
box, count = unpacked[:4], unpacked[-1]
if box[2] < 0 or box[3] < 0:
box = None
return box, count
class Bitmap(object):
"""Utilities for parsing and inspecting a bitmap."""
def __init__(self, bpp, width, height, pixels, metadata=None):
assert bpp in [3, 4], 'Invalid bytes per pixel'
assert width > 0, 'Invalid width'
assert height > 0, 'Invalid height'
assert pixels, 'Must specify pixels'
assert bpp * width * height == len(pixels), 'Dimensions and pixels mismatch'
self._bpp = bpp
self._width = width
self._height = height
self._pixels = pixels
self._metadata = metadata or {}
self._crop_box = None
@property
def bpp(self):
return self._bpp
@property
def width(self):
return self._crop_box[2] if self._crop_box else self._width
@property
def height(self):
return self._crop_box[3] if self._crop_box else self._height
def _PrepareTools(self):
"""Prepares an instance of _BitmapTools which allows exactly one command.
"""
crop_box = self._crop_box or (0, 0, self._width, self._height)
return _BitmapTools((self._bpp, self._width, self._height) + crop_box,
self._pixels)
@property
def pixels(self):
if self._crop_box:
self._pixels = self._PrepareTools().CropPixels()
# pylint: disable=unpacking-non-sequence
_, _, self._width, self._height = self._crop_box
self._crop_box = None
if type(self._pixels) is not bytearray:
self._pixels = bytearray(self._pixels)
return self._pixels
@property
def metadata(self):
self._metadata['size'] = (self.width, self.height)
self._metadata['alpha'] = self.bpp == 4
self._metadata['bitdepth'] = 8
return self._metadata
def GetPixelColor(self, x, y):
pixels = self.pixels
base = self._bpp * (y * self._width + x)
if self._bpp == 4:
return rgba_color.RgbaColor(pixels[base + 0], pixels[base + 1],
pixels[base + 2], pixels[base + 3])
return rgba_color.RgbaColor(pixels[base + 0], pixels[base + 1],
pixels[base + 2])
@staticmethod
def FromPng(png_data):
width, height, pixels, meta = png.Reader(bytes=png_data).read_flat()
return Bitmap(4 if meta['alpha'] else 3, width, height, pixels, meta)
@staticmethod
def FromPngFile(path):
with open(path, "rb") as f:
return Bitmap.FromPng(f.read())
def WritePngFile(self, path):
with open(path, "wb") as f:
png.Writer(**self.metadata).write_array(f, self.pixels)
def IsEqual(self, other, tolerance=0):
# Dimensions must be equal
if self.width != other.width or self.height != other.height:
return False
# Loop over each pixel and test for equality
if tolerance or self.bpp != other.bpp:
for y in range(self.height):
for x in range(self.width):
c0 = self.GetPixelColor(x, y)
c1 = other.GetPixelColor(x, y)
if not c0.IsEqual(c1, tolerance):
return False
else:
return self.pixels == other.pixels
return True
def Diff(self, other):
# Output dimensions will be the maximum of the two input dimensions
out_width = max(self.width, other.width)
out_height = max(self.height, other.height)
diff = [[0 for x in xrange(out_width * 3)] for x in xrange(out_height)]
# Loop over each pixel and write out the difference
for y in range(out_height):
for x in range(out_width):
if x < self.width and y < self.height:
c0 = self.GetPixelColor(x, y)
else:
c0 = rgba_color.RgbaColor(0, 0, 0, 0)
if x < other.width and y < other.height:
c1 = other.GetPixelColor(x, y)
else:
c1 = rgba_color.RgbaColor(0, 0, 0, 0)
offset = x * 3
diff[y][offset] = abs(c0.r - c1.r)
diff[y][offset+1] = abs(c0.g - c1.g)
diff[y][offset+2] = abs(c0.b - c1.b)
# This particular method can only save to a file, so the result will be
# written into an in-memory buffer and read back into a Bitmap
diff_img = png.from_array(diff, mode='RGB')
output = cStringIO.StringIO()
try:
diff_img.save(output)
diff = Bitmap.FromPng(output.getvalue())
finally:
output.close()
return diff
def GetBoundingBox(self, color, tolerance=0):
return self._PrepareTools().BoundingBox(color, tolerance)
def Crop(self, left, top, width, height):
cur_box = self._crop_box or (0, 0, self._width, self._height)
cur_left, cur_top, cur_width, cur_height = cur_box
if (left < 0 or top < 0 or
(left + width) > cur_width or
(top + height) > cur_height):
raise ValueError('Invalid dimensions')
self._crop_box = cur_left + left, cur_top + top, width, height
return self
def ColorHistogram(self, ignore_color=None, tolerance=0):
return self._PrepareTools().Histogram(ignore_color, tolerance)
| |
#!/usr/bin/env python
"""Web server for the Trendy Lights application.
The overall architecture looks like:
server.py script.js
______ ____________ _________
| | | | | |
| EE | <-> | App Engine | <-> | Browser |
|______| |____________| |_________|
\ /
'- - - - - - - - - - - - - - -'
The code in this file runs on App Engine. It's called when the user loads the
web page and when details about a polygon are requested.
Our App Engine code does most of the communication with EE. It uses the
EE Python library and the service account specified in config.py. The
exception is that when the browser loads map tiles it talks directly with EE.
The basic flows are:
1. Initial page load
When the user first loads the application in their browser, their request is
routed to the get() function in the MainHandler class by the framework we're
using, webapp2.
The get() function sends back the main web page (from index.html) along
with information the browser needs to render an Earth Engine map and
the IDs of the polygons to show on the map. This information is injected
into the index.html template through a templating engine called Jinja2,
which puts information from the Python context into the HTML for the user's
browser to receive.
Note: The polygon IDs are determined by looking at the static/polygons
folder. To add support for another polygon, just add another GeoJSON file to
that folder.
2. Getting details about a polygon
When the user clicks on a polygon, our JavaScript code (in static/script.js)
running in their browser sends a request to our backend. webapp2 routes this
request to the get() method in the DetailsHandler.
This method checks to see if the details for this polygon are cached. If
yes, it returns them right away. If no, we generate a Wikipedia URL and use
Earth Engine to compute the brightness trend for the region. We then store
these results in a cache and return the result.
Note: The brightness trend is a list of points for the chart drawn by the
Google Visualization API in a time series e.g. [[x1, y1], [x2, y2], ...].
Note: memcache, the cache we are using, is a service provided by App Engine
that temporarily stores small values in memory. Using it allows us to avoid
needlessly requesting the same data from Earth Engine over and over again,
which in turn helps us avoid exceeding our quota and respond to user
requests more quickly.
"""
import json
import os
import config
import ee
import jinja2
import webapp2
from google.appengine.api import memcache
###############################################################################
# Web request handlers. #
###############################################################################
class MainHandler(webapp2.RequestHandler):
"""A servlet to handle requests to load the main Trendy Lights web page."""
def get(self, path=''):
"""Returns the main web page, populated with EE map and polygon info."""
mapid = GetTrendyMapId()
template_values = {
'eeMapId': mapid['mapid'],
'eeToken': mapid['token'],
'serializedPolygonIds': json.dumps(POLYGON_IDS)
}
template = JINJA2_ENVIRONMENT.get_template('index.html')
self.response.out.write(template.render(template_values))
class DetailsHandler(webapp2.RequestHandler):
"""A servlet to handle requests for details about a Polygon."""
def get(self):
"""Returns details about a polygon."""
polygon_id = self.request.get('polygon_id')
if polygon_id in POLYGON_IDS:
content = GetPolygonTimeSeries(polygon_id)
else:
content = json.dumps({'error': 'Unrecognized polygon ID: ' + polygon_id})
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(content)
# Define webapp2 routing from URL paths to web request handlers. See:
# http://webapp-improved.appspot.com/tutorials/quickstart.html
app = webapp2.WSGIApplication([
('/details', DetailsHandler),
('/', MainHandler),
])
###############################################################################
# Helpers. #
###############################################################################
def GetTrendyMapId():
"""Returns the MapID for the night-time lights trend map."""
collection = ee.ImageCollection(IMAGE_COLLECTION_ID)
# Add a band containing image date as years since 1991.
def CreateTimeBand(img):
year = ee.Date(img.get('system:time_start')).get('year').subtract(1991)
return ee.Image(year).byte().addBands(img)
collection = collection.select('stable_lights').map(CreateTimeBand)
# Fit a linear trend to the nighttime lights collection.
fit = collection.reduce(ee.Reducer.linearFit())
return fit.getMapId({
'min': '0',
'max': '0.18,20,-0.18',
'bands': 'scale,offset,scale',
})
def GetPolygonTimeSeries(polygon_id):
"""Returns details about the polygon with the passed-in ID."""
details = memcache.get(polygon_id)
# If we've cached details for this polygon, return them.
if details is not None:
return details
details = {'wikiUrl': WIKI_URL + polygon_id.replace('-', '%20')}
try:
details['timeSeries'] = ComputePolygonTimeSeries(polygon_id)
# Store the results in memcache.
memcache.add(polygon_id, json.dumps(details), MEMCACHE_EXPIRATION)
except ee.EEException as e:
# Handle exceptions from the EE client library.
details['error'] = str(e)
# Send the results to the browser.
return json.dumps(details)
def ComputePolygonTimeSeries(polygon_id):
"""Returns a series of brightness over time for the polygon."""
collection = ee.ImageCollection(IMAGE_COLLECTION_ID)
collection = collection.select('stable_lights').sort('system:time_start')
feature = GetFeature(polygon_id)
# Compute the mean brightness in the region in each image.
def ComputeMean(img):
reduction = img.reduceRegion(
ee.Reducer.mean(), feature.geometry(), REDUCTION_SCALE_METERS)
return ee.Feature(None, {
'stable_lights': reduction.get('stable_lights'),
'system:time_start': img.get('system:time_start')
})
chart_data = collection.map(ComputeMean).getInfo()
# Extract the results as a list of lists.
def ExtractMean(feature):
return [
feature['properties']['system:time_start'],
feature['properties']['stable_lights']
]
return map(ExtractMean, chart_data['features'])
def GetFeature(polygon_id):
"""Returns an ee.Feature for the polygon with the given ID."""
# Note: The polygon IDs are read from the filesystem in the initialization
# section below. "sample-id" corresponds to "static/polygons/sample-id.json".
path = POLYGON_PATH + polygon_id + '.json'
path = os.path.join(os.path.split(__file__)[0], path)
with open(path) as f:
return ee.Feature(json.load(f))
###############################################################################
# Constants. #
###############################################################################
# Memcache is used to avoid exceeding our EE quota. Entries in the cache expire
# 24 hours after they are added. See:
# https://cloud.google.com/appengine/docs/python/memcache/
MEMCACHE_EXPIRATION = 60 * 60 * 24
# The ImageCollection of the night-time lights dataset. See:
# https://earthengine.google.org/#detail/NOAA%2FDMSP-OLS%2FNIGHTTIME_LIGHTS
IMAGE_COLLECTION_ID = 'NOAA/DMSP-OLS/NIGHTTIME_LIGHTS'
# The file system folder path to the folder with GeoJSON polygon files.
POLYGON_PATH = 'static/polygons/'
# The scale at which to reduce the polygons for the brightness time series.
REDUCTION_SCALE_METERS = 20000
# The Wikipedia URL prefix.
WIKI_URL = 'http://en.wikipedia.org/wiki/'
###############################################################################
# Initialization. #
###############################################################################
# Use our App Engine service account's credentials.
EE_CREDENTIALS = ee.ServiceAccountCredentials(
config.EE_ACCOUNT, config.EE_PRIVATE_KEY_FILE)
# Read the polygon IDs from the file system.
POLYGON_IDS = [name.replace('.json', '') for name in os.listdir(POLYGON_PATH)]
# Create the Jinja templating system we use to dynamically generate HTML. See:
# http://jinja.pocoo.org/docs/dev/
JINJA2_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
autoescape=True,
extensions=['jinja2.ext.autoescape'])
# Initialize the EE API.
ee.Initialize(EE_CREDENTIALS)
| |
# Copyright 2019 The TensorNetwork Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import jax
import jax.numpy as jnp
from jax import config
import tensorflow as tf
import torch
import pytest
import tensornetwork
from tensornetwork.backends import abstract_backend
from tensornetwork import backends, backend_contextmanager
from tensornetwork.tests import testing_utils
from tensornetwork import ncon_interface
#pylint: disable=no-member
config.update("jax_enable_x64", True)
BaseBackend = abstract_backend.AbstractBackend
@pytest.mark.parametrize("dtype", testing_utils.np_all_dtypes)
def test_init_tensor_from_numpy_array(backend, dtype):
""" Creates a numpy array, initializes a Tensor from it, and checks that all
its members have been correctly initialized.
"""
A, init = testing_utils.safe_zeros((2, 3, 1), backend, dtype)
if A is None:
return
assert A.backend.name == backend
np.testing.assert_allclose(A.array, init)
assert A.shape == init.shape
assert A.size == init.size
assert A.ndim == init.ndim
@pytest.mark.parametrize("dtype", testing_utils.torch_supported_dtypes)
def test_init_tensor_default_backend(dtype):
""" Creates a numpy array, initializes a Tensor from it, and checks that all
its members have been correctly initialized.
"""
backend = backend_contextmanager.get_default_backend()
backend_obj = backends.backend_factory.get_backend(backend)
shape = (3, 5, 2)
testA = backend_obj.zeros(shape, dtype=dtype)
init = np.zeros(shape, dtype=dtype)
A = tensornetwork.Tensor(init)
assert A.backend.name == backend
np.testing.assert_allclose(A.array, testA)
assert A.shape == testA.shape
assert A.size == testA.size
assert A.ndim == testA.ndim
@pytest.mark.parametrize("dtype", testing_utils.np_all_dtypes)
def test_init_tensor_from_backend_array(backend, dtype):
"""
Creates an instance of the backend's array class, initializes a Tensor from
it, and checks that all its members have been correctly initialized.
"""
shape = (2, 3, 1)
if backend == "pytorch":
if dtype not in testing_utils.torch_supported_dtypes:
with pytest.raises(TypeError):
dtype = testing_utils.np_dtype_to_backend(backend, dtype)
return
dtype = testing_utils.np_dtype_to_backend(backend, dtype)
init = torch.zeros(shape, dtype=dtype)
elif backend == "numpy":
dtype = testing_utils.np_dtype_to_backend(backend, dtype)
init = np.zeros(shape, dtype=dtype)
elif backend == "jax":
dtype = testing_utils.np_dtype_to_backend(backend, dtype)
init = jnp.zeros(shape, dtype=dtype)
elif backend == "tensorflow":
dtype = testing_utils.np_dtype_to_backend(backend, dtype)
init = tf.zeros(shape, dtype=dtype)
else:
raise ValueError("Unexpected backend ", backend)
A = tensornetwork.Tensor(init, backend=backend)
assert A.backend.name == backend
np.testing.assert_allclose(A.array, init)
assert A.shape == init.shape
assert A.size == np.prod(init.shape)
assert A.ndim == init.ndim
@pytest.mark.parametrize("dtype", testing_utils.np_all_dtypes)
def test_tensor_dtype(backend, dtype):
""" Checks that Tensor.dtype works.
"""
shape = (2, 3, 1)
A, init = testing_utils.safe_zeros(shape, backend, dtype)
if A is None:
return
if backend != "pytorch":
assert A.dtype == init.dtype
else:
assert A.dtype == torch.tensor(init).dtype
@pytest.mark.parametrize("dtype", testing_utils.np_all_dtypes)
def test_tensor_T(backend, dtype):
""" Checks that Tensor.T works.
"""
shape = (2, 3, 1)
A, init = testing_utils.safe_randn(shape, backend, dtype)
if A is not None:
np.testing.assert_allclose(A.T.array, init.T)
@pytest.mark.parametrize("dtype", testing_utils.np_not_bool)
def test_tensor_H(backend, dtype):
""" Checks that Tensor.H works.
"""
shape = (2, 3, 1)
A, init = testing_utils.safe_randn(shape, backend, dtype)
if A is not None:
np.testing.assert_allclose(A.H.array, init.conj().T)
@pytest.mark.parametrize("dtype", testing_utils.np_not_bool)
def test_tensor_conj(backend, dtype):
""" Checks that Tensor.conj() works.
"""
shape = (2, 3, 1)
A, init = testing_utils.safe_randn(shape, backend, dtype)
if A is not None:
np.testing.assert_allclose(A.conj().array, A.backend.conj(init))
@pytest.mark.parametrize("dtype", testing_utils.np_not_bool)
def test_tensor_conjugate(backend, dtype):
""" Checks that Tensor.conjugate() works.
"""
shape = (2, 3, 1)
A, init = testing_utils.safe_randn(shape, backend, dtype)
if A is not None:
np.testing.assert_allclose(A.conjugate().array, A.backend.conj(init))
@pytest.mark.parametrize("dtype", testing_utils.np_all_dtypes)
def test_tensor_copy(backend, dtype):
""" Checks that Tensor.copy() works.
"""
shape = (2, 3, 1)
A, init = testing_utils.safe_randn(shape, backend, dtype)
if A is not None:
np.testing.assert_allclose(A.copy().array, init.copy())
@pytest.mark.parametrize("dtype", testing_utils.np_all_dtypes)
def test_tensor_reshape(backend, dtype):
""" Checks that Tensor.copy() works.
"""
shape = (2, 3, 1)
newshape = (6, 1)
A, init = testing_utils.safe_randn(shape, backend, dtype)
if A is not None:
np.testing.assert_allclose(A.reshape(newshape).array,
init.reshape(newshape))
@pytest.mark.parametrize("dtype", testing_utils.np_all_dtypes)
def test_tensor_transpose(backend, dtype):
""" Checks that Tensor.transpose() works.
"""
shape = (2, 3, 1)
permutation = (1, 2, 0)
A, init = testing_utils.safe_randn(shape, backend, dtype)
if A is not None:
test = A.backend.convert_to_tensor(init)
test = A.backend.transpose(test, perm=permutation)
np.testing.assert_allclose(A.transpose(perm=permutation).array, test)
@pytest.mark.parametrize("dtype", testing_utils.np_all_dtypes)
def test_tensor_squeeze(backend, dtype):
""" Checks that Tensor.squeeze() works.
"""
shape = (2, 3, 1)
A, init = testing_utils.safe_randn(shape, backend, dtype)
if A is not None:
np.testing.assert_allclose(A.squeeze().array, init.squeeze())
@pytest.mark.parametrize("dtype", testing_utils.np_all_dtypes)
def test_tensor_ravel(backend, dtype):
""" Checks that Tensor.ravel() works.
"""
shape = (2, 3, 1)
A, init = testing_utils.safe_randn(shape, backend, dtype)
if A is not None:
np.testing.assert_allclose(A.ravel().array, init.ravel())
@pytest.mark.parametrize("dtype", testing_utils.np_all_dtypes)
def test_tensor_flatten(backend, dtype):
""" Checks that Tensor.flatten() works.
"""
shape = (2, 3, 1)
A, init = testing_utils.safe_randn(shape, backend, dtype)
if A is not None:
np.testing.assert_allclose(A.flatten().array, init.flatten())
@pytest.mark.parametrize("dtype", testing_utils.np_not_bool)
def test_tensor_hconj(backend, dtype):
""" Checks that Tensor.hconj() works.
"""
shape = (2, 3, 1)
permutation = (1, 2, 0)
A, init = testing_utils.safe_randn(shape, backend, dtype)
if A is not None:
test = A.backend.convert_to_tensor(init)
test = A.backend.transpose(A.backend.conj(test), perm=permutation)
np.testing.assert_allclose(A.hconj(perm=permutation).array, test)
@pytest.mark.parametrize("dtype", testing_utils.np_float_dtypes)
def test_tensor_multiply(backend, dtype):
""" Checks that Tensor*Tensor works.
"""
shape = (2, 3, 1)
A, initA = testing_utils.safe_randn(shape, backend, dtype)
B, initB = testing_utils.safe_randn(shape, backend, dtype)
if A is not None:
testA = A.backend.convert_to_tensor(initA)
testB = B.backend.convert_to_tensor(initB)
result = A * B
result2 = A.backend.multiply(testA, testB)
np.testing.assert_allclose(result.array, result2)
@pytest.mark.parametrize("dtype", testing_utils.np_float_dtypes)
def test_tensor_scalar_multiply(backend, dtype):
""" Checks that Tensor*scalar works.
"""
shape = (2, 3, 1)
A, initA = testing_utils.safe_randn(shape, backend, dtype)
B = 2.
if A is not None:
testA = A.backend.convert_to_tensor(initA)
result = A * B
result2 = A.backend.multiply(testA, B)
np.testing.assert_allclose(result.array, result2)
@pytest.mark.parametrize("dtype", testing_utils.np_float_dtypes)
def test_tensor_scalar_rmultiply(backend, dtype):
""" Checks that scalar*Tensor works.
"""
shape = (2, 3, 1)
A, initA = testing_utils.safe_randn(shape, backend, dtype)
B = 2.
if A is not None:
testA = A.backend.convert_to_tensor(initA)
result = B * A
result2 = A.backend.multiply(B, testA)
np.testing.assert_allclose(result.array, result2)
@pytest.mark.parametrize("dtype", testing_utils.np_float_dtypes)
def test_tensor_divide(backend, dtype):
""" Checks that Tensor/Tensor works.
"""
shape = (2, 3, 1)
A, initA = testing_utils.safe_randn(shape, backend, dtype)
B, _ = testing_utils.safe_zeros(shape, backend, dtype)
if A is not None:
B = B + 1
testA = A.backend.convert_to_tensor(initA)
result = A / B
result2 = A.backend.divide(testA, B.array)
np.testing.assert_allclose(result.array, result2)
@pytest.mark.parametrize("dtype", testing_utils.np_float_dtypes)
def test_tensor_scalar_divide(backend, dtype):
""" Checks that Tensor/scalar works.
"""
shape = (2, 3, 1)
A, initA = testing_utils.safe_randn(shape, backend, dtype)
B = 2.
if A is not None:
testA = A.backend.convert_to_tensor(initA)
result = A / B
result2 = A.backend.divide(testA, B)
np.testing.assert_allclose(result.array, result2)
@pytest.mark.parametrize("dtype", testing_utils.np_float_dtypes)
def test_tensor_addition(backend, dtype):
""" Checks that Tensor+Tensor works.
"""
shape = (2, 3, 1)
A, initA = testing_utils.safe_randn(shape, backend, dtype)
B, initB = testing_utils.safe_randn(shape, backend, dtype)
if A is not None:
testA = A.backend.convert_to_tensor(initA)
testB = B.backend.convert_to_tensor(initB)
result = A + B
result2 = A.backend.addition(testA, testB)
np.testing.assert_allclose(result.array, result2)
@pytest.mark.parametrize("dtype", testing_utils.np_float_dtypes)
def test_tensor_scalar_addition(backend, dtype):
""" Checks that Tensor+scalar works.
"""
shape = (2, 3, 1)
A, initA = testing_utils.safe_randn(shape, backend, dtype)
B = 2.
if A is not None:
testA = A.backend.convert_to_tensor(initA)
result = A + B
result2 = A.backend.addition(testA, B)
np.testing.assert_allclose(result.array, result2)
@pytest.mark.parametrize("dtype", testing_utils.np_float_dtypes)
def test_tensor_scalar_raddition(backend, dtype):
""" Checks that scalar+Tensor works.
"""
shape = (2, 3, 1)
A, initA = testing_utils.safe_randn(shape, backend, dtype)
B = 2.
if A is not None:
testA = A.backend.convert_to_tensor(initA)
result = B + A
result2 = A.backend.addition(B, testA)
np.testing.assert_allclose(result.array, result2)
@pytest.mark.parametrize("dtype", testing_utils.np_float_dtypes)
def test_tensor_subtraction(backend, dtype):
""" Checks that Tensor-Tensor works.
"""
shape = (2, 3, 1)
A, initA = testing_utils.safe_randn(shape, backend, dtype)
B, initB = testing_utils.safe_randn(shape, backend, dtype)
if A is not None:
testA = A.backend.convert_to_tensor(initA)
testB = B.backend.convert_to_tensor(initB)
result = A - B
result2 = A.backend.subtraction(testA, testB)
np.testing.assert_allclose(result.array, result2)
@pytest.mark.parametrize("dtype", testing_utils.np_float_dtypes)
def test_tensor_scalar_subtraction(backend, dtype):
""" Checks that Tensor-scalar works.
"""
shape = (2, 3, 1)
A, initA = testing_utils.safe_randn(shape, backend, dtype)
B = 2.
if A is not None:
testA = A.backend.convert_to_tensor(initA)
result = A - B
result2 = A.backend.subtraction(testA, B)
np.testing.assert_allclose(result.array, result2)
@pytest.mark.parametrize("dtype", testing_utils.np_float_dtypes)
def test_tensor_scalar_rsubtraction(backend, dtype):
""" Checks that scalar-Tensor works.
"""
shape = (2, 3, 1)
A, initA = testing_utils.safe_randn(shape, backend, dtype)
B = 2.
if A is not None:
testA = A.backend.convert_to_tensor(initA)
result = B - A
result2 = A.backend.subtraction(B, testA)
np.testing.assert_allclose(result.array, result2)
@pytest.mark.parametrize("dtype", testing_utils.np_float_dtypes)
def test_tensor_matmul(backend, dtype):
""" Checks that Tensor@Tensor works.
"""
shape = (3, 3)
A, initA = testing_utils.safe_randn(shape, backend, dtype)
B, initB = testing_utils.safe_randn(shape, backend, dtype)
if A is not None and B is not None:
testA = A.backend.convert_to_tensor(initA)
testB = B.backend.convert_to_tensor(initB)
result = A @ B
result2 = A.backend.matmul(testA, testB)
np.testing.assert_allclose(result.array, result2)
@pytest.mark.parametrize("dtype", testing_utils.np_float_dtypes)
def test_tensor_ops_raise(dtype):
""" Checks that tensor operators raise the right error.
"""
shape = (2, 3, 1)
A, _ = testing_utils.safe_randn(shape, "numpy", dtype)
B, _ = testing_utils.safe_randn(shape, "jax", dtype)
with pytest.raises(ValueError):
_ = A * B
with pytest.raises(ValueError):
_ = A + B
with pytest.raises(ValueError):
_ = A - B
with pytest.raises(ValueError):
_ = A / B
with pytest.raises(ValueError):
_ = A @ B
def test_ncon_builder(backend):
a, _ = testing_utils.safe_randn((2, 2, 2), backend, np.float32)
b, _ = testing_utils.safe_randn((2, 2, 2), backend, np.float32)
c, _ = testing_utils.safe_randn((2, 2, 2), backend, np.float32)
tmp = a(2, 1, -1)
assert tmp.tensors[0] is a
assert tmp.axes[0] == [2, 1, -1]
builder = a(2, 1, -1) @ b(2, 3, -2) @ c(1, 3, -3)
assert builder.tensors == [a, b, c]
assert builder.axes == [[2, 1, -1], [2, 3, -2], [1, 3, -3]]
np.testing.assert_allclose(
ncon_interface.ncon(
[a, b, c],
[[2, 1, -1], [2, 3, -2], [1, 3, -3]],
backend=backend).array,
ncon_interface.finalize(builder).array)
| |
# Copyright 2013 Metacloud, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import uuid
from dogpile.cache import api
from dogpile.cache import proxy
from dogpile.cache import util
import mock
import six
from testtools import matchers
from keystone.common.kvs.backends import inmemdb
from keystone.common.kvs.backends import memcached
from keystone.common.kvs import core
from keystone import exception
from keystone.tests import unit
NO_VALUE = api.NO_VALUE
class MutexFixture(object):
def __init__(self, storage_dict, key, timeout):
self.database = storage_dict
self.key = '_lock' + key
def acquire(self, wait=True):
while True:
try:
self.database[self.key] = 1
return True
except KeyError:
return False
def release(self):
self.database.pop(self.key, None)
class KVSBackendFixture(inmemdb.MemoryBackend):
def __init__(self, arguments):
class InmemTestDB(dict):
def __setitem__(self, key, value):
if key in self:
raise KeyError('Key %s already exists' % key)
super(InmemTestDB, self).__setitem__(key, value)
self._db = InmemTestDB()
self.lock_timeout = arguments.pop('lock_timeout', 5)
self.test_arg = arguments.pop('test_arg', None)
def get_mutex(self, key):
return MutexFixture(self._db, key, self.lock_timeout)
@classmethod
def key_mangler(cls, key):
return 'KVSBackend_' + key
class KVSBackendForcedKeyMangleFixture(KVSBackendFixture):
use_backend_key_mangler = True
@classmethod
def key_mangler(cls, key):
return 'KVSBackendForcedKeyMangle_' + key
class RegionProxyFixture(proxy.ProxyBackend):
"""A test dogpile.cache proxy that does nothing."""
class RegionProxy2Fixture(proxy.ProxyBackend):
"""A test dogpile.cache proxy that does nothing."""
class TestMemcacheDriver(api.CacheBackend):
"""A test dogpile.cache backend that conforms to the mixin-mechanism for
overriding set and set_multi methods on dogpile memcached drivers.
"""
class test_client(object):
# FIXME(morganfainberg): Convert this test client over to using mock
# and/or mock.MagicMock as appropriate
def __init__(self):
self.__name__ = 'TestingMemcacheDriverClientObject'
self.set_arguments_passed = None
self.keys_values = {}
self.lock_set_time = None
self.lock_expiry = None
def set(self, key, value, **set_arguments):
self.keys_values.clear()
self.keys_values[key] = value
self.set_arguments_passed = set_arguments
def set_multi(self, mapping, **set_arguments):
self.keys_values.clear()
self.keys_values = mapping
self.set_arguments_passed = set_arguments
def add(self, key, value, expiry_time):
# NOTE(morganfainberg): `add` is used in this case for the
# memcache lock testing. If further testing is required around the
# actual memcache `add` interface, this method should be
# expanded to work more like the actual memcache `add` function
if self.lock_expiry is not None and self.lock_set_time is not None:
if time.time() - self.lock_set_time < self.lock_expiry:
return False
self.lock_expiry = expiry_time
self.lock_set_time = time.time()
return True
def delete(self, key):
# NOTE(morganfainberg): `delete` is used in this case for the
# memcache lock testing. If further testing is required around the
# actual memcache `delete` interface, this method should be
# expanded to work more like the actual memcache `delete` function.
self.lock_expiry = None
self.lock_set_time = None
return True
def __init__(self, arguments):
self.client = self.test_client()
self.set_arguments = {}
# NOTE(morganfainberg): This is the same logic as the dogpile backend
# since we need to mirror that functionality for the `set_argument`
# values to appear on the actual backend.
if 'memcached_expire_time' in arguments:
self.set_arguments['time'] = arguments['memcached_expire_time']
def set(self, key, value):
self.client.set(key, value, **self.set_arguments)
def set_multi(self, mapping):
self.client.set_multi(mapping, **self.set_arguments)
class KVSTest(unit.TestCase):
def setUp(self):
super(KVSTest, self).setUp()
self.key_foo = 'foo_' + uuid.uuid4().hex
self.value_foo = uuid.uuid4().hex
self.key_bar = 'bar_' + uuid.uuid4().hex
self.value_bar = {'complex_data_structure': uuid.uuid4().hex}
self.addCleanup(memcached.VALID_DOGPILE_BACKENDS.pop,
'TestDriver',
None)
memcached.VALID_DOGPILE_BACKENDS['TestDriver'] = TestMemcacheDriver
def _get_kvs_region(self, name=None):
if name is None:
name = uuid.uuid4().hex
return core.get_key_value_store(name)
def test_kvs_basic_configuration(self):
# Test that the most basic configuration options pass through to the
# backend.
region_one = uuid.uuid4().hex
region_two = uuid.uuid4().hex
test_arg = 100
kvs = self._get_kvs_region(region_one)
kvs.configure('openstack.kvs.Memory')
self.assertIsInstance(kvs._region.backend, inmemdb.MemoryBackend)
self.assertEqual(region_one, kvs._region.name)
kvs = self._get_kvs_region(region_two)
kvs.configure('openstack.kvs.KVSBackendFixture',
test_arg=test_arg)
self.assertEqual(region_two, kvs._region.name)
self.assertEqual(test_arg, kvs._region.backend.test_arg)
def test_kvs_proxy_configuration(self):
# Test that proxies are applied correctly and in the correct (reverse)
# order to the kvs region.
kvs = self._get_kvs_region()
kvs.configure(
'openstack.kvs.Memory',
proxy_list=['keystone.tests.unit.test_kvs.RegionProxyFixture',
'keystone.tests.unit.test_kvs.RegionProxy2Fixture'])
self.assertIsInstance(kvs._region.backend, RegionProxyFixture)
self.assertIsInstance(kvs._region.backend.proxied, RegionProxy2Fixture)
self.assertIsInstance(kvs._region.backend.proxied.proxied,
inmemdb.MemoryBackend)
def test_kvs_key_mangler_fallthrough_default(self):
# Test to make sure we default to the standard dogpile sha1 hashing
# key_mangler
kvs = self._get_kvs_region()
kvs.configure('openstack.kvs.Memory')
self.assertIs(kvs._region.key_mangler, util.sha1_mangle_key)
# The backend should also have the keymangler set the same as the
# region now.
self.assertIs(kvs._region.backend.key_mangler, util.sha1_mangle_key)
def test_kvs_key_mangler_configuration_backend(self):
kvs = self._get_kvs_region()
kvs.configure('openstack.kvs.KVSBackendFixture')
expected = KVSBackendFixture.key_mangler(self.key_foo)
self.assertEqual(expected, kvs._region.key_mangler(self.key_foo))
def test_kvs_key_mangler_configuration_forced_backend(self):
kvs = self._get_kvs_region()
kvs.configure('openstack.kvs.KVSBackendForcedKeyMangleFixture',
key_mangler=util.sha1_mangle_key)
expected = KVSBackendForcedKeyMangleFixture.key_mangler(self.key_foo)
self.assertEqual(expected, kvs._region.key_mangler(self.key_foo))
def test_kvs_key_mangler_configuration_disabled(self):
# Test that no key_mangler is set if enable_key_mangler is false
self.config_fixture.config(group='kvs', enable_key_mangler=False)
kvs = self._get_kvs_region()
kvs.configure('openstack.kvs.Memory')
self.assertIsNone(kvs._region.key_mangler)
self.assertIsNone(kvs._region.backend.key_mangler)
def test_kvs_key_mangler_set_on_backend(self):
def test_key_mangler(key):
return key
kvs = self._get_kvs_region()
kvs.configure('openstack.kvs.Memory')
self.assertIs(kvs._region.backend.key_mangler, util.sha1_mangle_key)
kvs._set_key_mangler(test_key_mangler)
self.assertIs(kvs._region.backend.key_mangler, test_key_mangler)
def test_kvs_basic_get_set_delete(self):
# Test the basic get/set/delete actions on the KVS region
kvs = self._get_kvs_region()
kvs.configure('openstack.kvs.Memory')
# Not found should be raised if the key doesn't exist
self.assertRaises(exception.NotFound, kvs.get, key=self.key_bar)
kvs.set(self.key_bar, self.value_bar)
returned_value = kvs.get(self.key_bar)
# The returned value should be the same value as the value in .set
self.assertEqual(self.value_bar, returned_value)
# The value should not be the exact object used in .set
self.assertIsNot(returned_value, self.value_bar)
kvs.delete(self.key_bar)
# Second delete should raise NotFound
self.assertRaises(exception.NotFound, kvs.delete, key=self.key_bar)
def _kvs_multi_get_set_delete(self, kvs):
keys = [self.key_foo, self.key_bar]
expected = [self.value_foo, self.value_bar]
kvs.set_multi({self.key_foo: self.value_foo,
self.key_bar: self.value_bar})
# Returned value from get_multi should be a list of the values of the
# keys
self.assertEqual(expected, kvs.get_multi(keys))
# Delete both keys
kvs.delete_multi(keys)
# make sure that NotFound is properly raised when trying to get the now
# deleted keys
self.assertRaises(exception.NotFound, kvs.get_multi, keys=keys)
self.assertRaises(exception.NotFound, kvs.get, key=self.key_foo)
self.assertRaises(exception.NotFound, kvs.get, key=self.key_bar)
# Make sure get_multi raises NotFound if one of the keys isn't found
kvs.set(self.key_foo, self.value_foo)
self.assertRaises(exception.NotFound, kvs.get_multi, keys=keys)
def test_kvs_multi_get_set_delete(self):
kvs = self._get_kvs_region()
kvs.configure('openstack.kvs.Memory')
self._kvs_multi_get_set_delete(kvs)
def test_kvs_locking_context_handler(self):
# Make sure we're creating the correct key/value pairs for the backend
# distributed locking mutex.
self.config_fixture.config(group='kvs', enable_key_mangler=False)
kvs = self._get_kvs_region()
kvs.configure('openstack.kvs.KVSBackendFixture')
lock_key = '_lock' + self.key_foo
self.assertNotIn(lock_key, kvs._region.backend._db)
with core.KeyValueStoreLock(kvs._mutex(self.key_foo), self.key_foo):
self.assertIn(lock_key, kvs._region.backend._db)
self.assertIs(kvs._region.backend._db[lock_key], 1)
self.assertNotIn(lock_key, kvs._region.backend._db)
def test_kvs_locking_context_handler_locking_disabled(self):
# Make sure no creation of key/value pairs for the backend
# distributed locking mutex occurs if locking is disabled.
self.config_fixture.config(group='kvs', enable_key_mangler=False)
kvs = self._get_kvs_region()
kvs.configure('openstack.kvs.KVSBackendFixture', locking=False)
lock_key = '_lock' + self.key_foo
self.assertNotIn(lock_key, kvs._region.backend._db)
with core.KeyValueStoreLock(kvs._mutex(self.key_foo), self.key_foo,
False):
self.assertNotIn(lock_key, kvs._region.backend._db)
self.assertNotIn(lock_key, kvs._region.backend._db)
def test_kvs_with_lock_action_context_manager_timeout(self):
kvs = self._get_kvs_region()
lock_timeout = 5
kvs.configure('openstack.kvs.Memory', lock_timeout=lock_timeout)
def do_with_lock_action_timeout(kvs_region, key, offset):
with kvs_region.get_lock(key) as lock_in_use:
self.assertTrue(lock_in_use.active)
# Subtract the offset from the acquire_time. If this puts the
# acquire_time difference from time.time() at >= lock_timeout
# this should raise a LockTimeout exception. This is because
# there is a built-in 1-second overlap where the context
# manager thinks the lock is expired but the lock is still
# active. This is to help mitigate race conditions on the
# time-check itself.
lock_in_use.acquire_time -= offset
with kvs_region._action_with_lock(key, lock_in_use):
pass
# This should succeed, we are not timed-out here.
do_with_lock_action_timeout(kvs, key=uuid.uuid4().hex, offset=2)
# Try it now with an offset equal to the lock_timeout
self.assertRaises(core.LockTimeout,
do_with_lock_action_timeout,
kvs_region=kvs,
key=uuid.uuid4().hex,
offset=lock_timeout)
# Final test with offset significantly greater than the lock_timeout
self.assertRaises(core.LockTimeout,
do_with_lock_action_timeout,
kvs_region=kvs,
key=uuid.uuid4().hex,
offset=100)
def test_kvs_with_lock_action_mismatched_keys(self):
kvs = self._get_kvs_region()
kvs.configure('openstack.kvs.Memory')
def do_with_lock_action(kvs_region, lock_key, target_key):
with kvs_region.get_lock(lock_key) as lock_in_use:
self.assertTrue(lock_in_use.active)
with kvs_region._action_with_lock(target_key, lock_in_use):
pass
# Ensure we raise a ValueError if the lock key mismatches from the
# target key.
self.assertRaises(ValueError,
do_with_lock_action,
kvs_region=kvs,
lock_key=self.key_foo,
target_key=self.key_bar)
def test_kvs_with_lock_action_context_manager(self):
# Make sure we're creating the correct key/value pairs for the backend
# distributed locking mutex.
self.config_fixture.config(group='kvs', enable_key_mangler=False)
kvs = self._get_kvs_region()
kvs.configure('openstack.kvs.KVSBackendFixture')
lock_key = '_lock' + self.key_foo
self.assertNotIn(lock_key, kvs._region.backend._db)
with kvs.get_lock(self.key_foo) as lock:
with kvs._action_with_lock(self.key_foo, lock):
self.assertTrue(lock.active)
self.assertIn(lock_key, kvs._region.backend._db)
self.assertIs(kvs._region.backend._db[lock_key], 1)
self.assertNotIn(lock_key, kvs._region.backend._db)
def test_kvs_with_lock_action_context_manager_no_lock(self):
# Make sure we're not locking unless an actual lock is passed into the
# context manager
self.config_fixture.config(group='kvs', enable_key_mangler=False)
kvs = self._get_kvs_region()
kvs.configure('openstack.kvs.KVSBackendFixture')
lock_key = '_lock' + self.key_foo
lock = None
self.assertNotIn(lock_key, kvs._region.backend._db)
with kvs._action_with_lock(self.key_foo, lock):
self.assertNotIn(lock_key, kvs._region.backend._db)
self.assertNotIn(lock_key, kvs._region.backend._db)
def test_kvs_backend_registration_does_not_reregister_backends(self):
# SetUp registers the test backends. Running this again would raise an
# exception if re-registration of the backends occurred.
kvs = self._get_kvs_region()
kvs.configure('openstack.kvs.Memory')
core._register_backends()
def test_kvs_memcached_manager_valid_dogpile_memcached_backend(self):
kvs = self._get_kvs_region()
kvs.configure('openstack.kvs.Memcached',
memcached_backend='TestDriver')
self.assertIsInstance(kvs._region.backend.driver,
TestMemcacheDriver)
def test_kvs_memcached_manager_invalid_dogpile_memcached_backend(self):
# Invalid dogpile memcache backend should raise ValueError
kvs = self._get_kvs_region()
self.assertRaises(ValueError,
kvs.configure,
backing_store='openstack.kvs.Memcached',
memcached_backend=uuid.uuid4().hex)
def test_kvs_memcache_manager_no_expiry_keys(self):
# Make sure the memcache backend recalculates the no-expiry keys
# correctly when a key-mangler is set on it.
def new_mangler(key):
return '_mangled_key_' + key
kvs = self._get_kvs_region()
no_expiry_keys = set(['test_key'])
kvs.configure('openstack.kvs.Memcached',
memcached_backend='TestDriver',
no_expiry_keys=no_expiry_keys)
calculated_keys = set([kvs._region.key_mangler(key)
for key in no_expiry_keys])
self.assertIs(kvs._region.backend.key_mangler, util.sha1_mangle_key)
self.assertSetEqual(calculated_keys,
kvs._region.backend.no_expiry_hashed_keys)
self.assertSetEqual(no_expiry_keys,
kvs._region.backend.raw_no_expiry_keys)
calculated_keys = set([new_mangler(key) for key in no_expiry_keys])
kvs._region.backend.key_mangler = new_mangler
self.assertSetEqual(calculated_keys,
kvs._region.backend.no_expiry_hashed_keys)
self.assertSetEqual(no_expiry_keys,
kvs._region.backend.raw_no_expiry_keys)
def test_kvs_memcache_key_mangler_set_to_none(self):
kvs = self._get_kvs_region()
no_expiry_keys = set(['test_key'])
kvs.configure('openstack.kvs.Memcached',
memcached_backend='TestDriver',
no_expiry_keys=no_expiry_keys)
self.assertIs(kvs._region.backend.key_mangler, util.sha1_mangle_key)
kvs._region.backend.key_mangler = None
self.assertSetEqual(kvs._region.backend.raw_no_expiry_keys,
kvs._region.backend.no_expiry_hashed_keys)
self.assertIsNone(kvs._region.backend.key_mangler)
def test_noncallable_key_mangler_set_on_driver_raises_type_error(self):
kvs = self._get_kvs_region()
kvs.configure('openstack.kvs.Memcached',
memcached_backend='TestDriver')
self.assertRaises(TypeError,
setattr,
kvs._region.backend,
'key_mangler',
'Non-Callable')
def test_kvs_memcache_set_arguments_and_memcache_expires_ttl(self):
# Test the "set_arguments" (arguments passed on all set calls) logic
# and the no-expiry-key modifications of set_arguments for the explicit
# memcache TTL.
self.config_fixture.config(group='kvs', enable_key_mangler=False)
kvs = self._get_kvs_region()
memcache_expire_time = 86400
expected_set_args = {'time': memcache_expire_time}
expected_no_expiry_args = {}
expected_foo_keys = [self.key_foo]
expected_bar_keys = [self.key_bar]
mapping_foo = {self.key_foo: self.value_foo}
mapping_bar = {self.key_bar: self.value_bar}
kvs.configure(backing_store='openstack.kvs.Memcached',
memcached_backend='TestDriver',
memcached_expire_time=memcache_expire_time,
some_other_arg=uuid.uuid4().hex,
no_expiry_keys=[self.key_bar])
kvs_driver = kvs._region.backend.driver
# Ensure the set_arguments are correct
self.assertDictEqual(
expected_set_args,
kvs._region.backend._get_set_arguments_driver_attr())
# Set a key that would have an expiry and verify the correct result
# occurred and that the correct set_arguments were passed.
kvs.set(self.key_foo, self.value_foo)
self.assertDictEqual(
expected_set_args,
kvs._region.backend.driver.client.set_arguments_passed)
observed_foo_keys = list(kvs_driver.client.keys_values.keys())
self.assertEqual(expected_foo_keys, observed_foo_keys)
self.assertEqual(
self.value_foo,
kvs._region.backend.driver.client.keys_values[self.key_foo][0])
# Set a key that would not have an expiry and verify the correct result
# occurred and that the correct set_arguments were passed.
kvs.set(self.key_bar, self.value_bar)
self.assertDictEqual(
expected_no_expiry_args,
kvs._region.backend.driver.client.set_arguments_passed)
observed_bar_keys = list(kvs_driver.client.keys_values.keys())
self.assertEqual(expected_bar_keys, observed_bar_keys)
self.assertEqual(
self.value_bar,
kvs._region.backend.driver.client.keys_values[self.key_bar][0])
# set_multi a dict that would have an expiry and verify the correct
# result occurred and that the correct set_arguments were passed.
kvs.set_multi(mapping_foo)
self.assertDictEqual(
expected_set_args,
kvs._region.backend.driver.client.set_arguments_passed)
observed_foo_keys = list(kvs_driver.client.keys_values.keys())
self.assertEqual(expected_foo_keys, observed_foo_keys)
self.assertEqual(
self.value_foo,
kvs._region.backend.driver.client.keys_values[self.key_foo][0])
# set_multi a dict that would not have an expiry and verify the correct
# result occurred and that the correct set_arguments were passed.
kvs.set_multi(mapping_bar)
self.assertDictEqual(
expected_no_expiry_args,
kvs._region.backend.driver.client.set_arguments_passed)
observed_bar_keys = list(kvs_driver.client.keys_values.keys())
self.assertEqual(expected_bar_keys, observed_bar_keys)
self.assertEqual(
self.value_bar,
kvs._region.backend.driver.client.keys_values[self.key_bar][0])
def test_memcached_lock_max_lock_attempts(self):
kvs = self._get_kvs_region()
max_lock_attempts = 1
test_key = uuid.uuid4().hex
kvs.configure(backing_store='openstack.kvs.Memcached',
memcached_backend='TestDriver',
max_lock_attempts=max_lock_attempts)
self.assertEqual(max_lock_attempts,
kvs._region.backend.max_lock_attempts)
# Simple Lock success test
with kvs.get_lock(test_key) as lock:
kvs.set(test_key, 'testing', lock)
def lock_within_a_lock(key):
with kvs.get_lock(key) as first_lock:
kvs.set(test_key, 'lock', first_lock)
with kvs.get_lock(key) as second_lock:
kvs.set(key, 'lock-within-a-lock', second_lock)
self.assertRaises(exception.UnexpectedError,
lock_within_a_lock,
key=test_key)
class TestMemcachedBackend(unit.TestCase):
@mock.patch('keystone.common.kvs.backends.memcached._', six.text_type)
def test_invalid_backend_fails_initialization(self):
raises_valueerror = matchers.Raises(matchers.MatchesException(
ValueError, r'.*FakeBackend.*'))
options = {
'url': 'needed to get to the focus of this test (the backend)',
'memcached_backend': 'FakeBackend',
}
self.assertThat(lambda: memcached.MemcachedBackend(options),
raises_valueerror)
| |
import functools
import os
from django import http
from django.db.models import Q
from django.shortcuts import get_object_or_404, redirect
import commonware.log
import jingo
import caching.base as caching
from tower import ugettext_lazy as _lazy, ugettext as _
import amo
from amo import messages
import sharing.views
from amo.decorators import login_required, post_required, json_view, write
from amo.urlresolvers import reverse
from amo.utils import urlparams, paginate
from access import acl
from addons.models import Addon
from addons.views import BaseFilter
from tags.models import Tag
from translations.query import order_by_translation
from users.models import UserProfile
from .models import (Collection, CollectionAddon, CollectionWatcher,
CollectionVote, SPECIAL_SLUGS)
from . import forms, tasks
log = commonware.log.getLogger('z.collections')
def get_collection(request, username, slug):
if (slug in SPECIAL_SLUGS.values() and request.user.is_authenticated()
and request.amo_user.username == username):
return getattr(request.amo_user, slug + '_collection')()
else:
return get_object_or_404(Collection.objects,
author__username=username, slug=slug)
def owner_required(f=None, require_owner=True):
"""Requires collection to be owner, by someone."""
def decorator(func):
@functools.wraps(func)
def wrapper(request, username, slug, *args, **kw):
collection = get_collection(request, username, slug)
if acl.check_collection_ownership(request, collection,
require_owner=require_owner):
return func(request, collection, username, slug, *args, **kw)
else:
return http.HttpResponseForbidden()
return wrapper
return decorator(f) if f else decorator
def legacy_redirect(request, uuid, edit=False):
# Nicknames have a limit of 30, so len == 36 implies a uuid.
key = 'uuid' if len(uuid) == 36 else 'nickname'
c = get_object_or_404(Collection.objects, **{key: uuid})
if edit:
return redirect(c.edit_url())
return redirect(c.get_url_path() + '?' + request.GET.urlencode())
def legacy_directory_redirects(request, page):
sorts = {'editors_picks': 'featured', 'popular': 'popular',
'users': 'followers'}
loc = base = reverse('collections.list')
if page in sorts:
loc = urlparams(base, sort=sorts[page])
elif request.user.is_authenticated():
if page == 'mine':
loc = reverse('collections.user', args=[request.amo_user.username])
elif page == 'favorites':
loc = reverse('collections.following')
return redirect(loc)
class CollectionFilter(BaseFilter):
opts = (('featured', _lazy(u'Featured')),
('followers', _lazy(u'Most Followers')),
('created', _lazy(u'Newest')))
extras = (('name', _lazy(u'Name')),
('updated', _lazy(u'Recently Updated')),
('popular', _lazy(u'Recently Popular')))
def filter(self, field):
qs = self.base_queryset
if field == 'featured':
return qs.filter(type=amo.COLLECTION_FEATURED)
elif field == 'followers':
return qs.order_by('-subscribers')
elif field == 'popular':
return qs.order_by('-weekly_subscribers')
elif field == 'updated':
return qs.order_by('-modified')
elif field == 'created':
return qs.order_by('-created')
elif field == 'name':
return order_by_translation(qs, 'name')
def get_filter(request, base=None):
if base is None:
base = Collection.objects.listed()
base = base.filter(Q(application=request.APP.id) | Q(application=None))
return CollectionFilter(request, base, key='sort', default='featured')
def render(request, template, data={}, extra={}):
data.update(dict(search_cat='collections'))
return jingo.render(request, template, data, **extra)
# TODO (potch): restore this when we do mobile bandwagon
# @mobile_template('bandwagon/{mobile/}collection_listing.html')
def collection_listing(request, base=None):
sort = request.GET.get('sort')
# We turn users into followers.
if sort == 'users':
return redirect(urlparams(reverse('collections.list'),
sort='followers'), permanent=True)
filter = get_filter(request, base)
collections = paginate(request, filter.qs)
try:
addon_collector = Addon.objects.get(id=11950)
except Addon.DoesNotExist:
addon_collector = None
return render(request, 'bandwagon/impala/collection_listing.html',
dict(collections=collections, src='co-hc-sidebar',
dl_src='co-dp-sidebar', filter=filter, sort=sort,
sorting=filter.field, addon_collector=addon_collector))
def get_votes(request, collections):
if not request.user.is_authenticated():
return {}
q = CollectionVote.objects.filter(
user=request.amo_user, collection__in=[c.id for c in collections])
return dict((v.collection_id, v) for v in q)
def user_listing(request, username):
author = get_object_or_404(UserProfile, username=username)
qs = (Collection.objects.filter(author__username=username)
.order_by('-created'))
if not (request.user.is_authenticated() and
request.amo_user.username == username):
qs = qs.filter(listed=True)
collections = paginate(request, qs)
votes = get_votes(request, collections.object_list)
return render(request, 'bandwagon/user_listing.html',
dict(collections=collections, collection_votes=votes,
page='mine', author=author, filter=get_filter(request)))
class CollectionAddonFilter(BaseFilter):
opts = (('added', _lazy(u'Added')),
('popular', _lazy(u'Popularity')),
('name', _lazy(u'Name')))
def filter(self, field):
if field == 'added':
return self.base_queryset.order_by('collectionaddon__created')
elif field == 'name':
return order_by_translation(self.base_queryset, 'name')
elif field == 'popular':
return (self.base_queryset.order_by('-weekly_downloads')
.with_index(addons='downloads_type_idx'))
def collection_detail(request, username, slug):
c = get_collection(request, username, slug)
if not (c.listed or acl.check_collection_ownership(request, c)):
return http.HttpResponseForbidden()
if request.GET.get('format') == 'rss':
return redirect(c.feed_url(), permanent=True)
base = Addon.objects.valid() & c.addons.all()
filter = CollectionAddonFilter(request, base,
key='sort', default='popular')
notes = get_notes(c)
# Go directly to CollectionAddon for the count to avoid joins.
count = CollectionAddon.objects.filter(
Addon.objects.valid_q(amo.VALID_STATUSES, prefix='addon__'),
collection=c.id)
addons = paginate(request, filter.qs, per_page=15,
count=count.count())
# The add-on query is not related to the collection, so we need to manually
# hook them up for invalidation. Bonus: count invalidation.
keys = [addons.object_list.flush_key(),
count.flush_key()]
caching.invalidator.add_to_flush_list({c.flush_key(): keys})
if c.author_id:
qs = Collection.objects.listed().filter(author=c.author)
others = amo.utils.randslice(qs, limit=4, exclude=c.id)
else:
others = []
perms = {
'view_stats': acl.check_ownership(request, c, require_owner=False),
}
tags = Tag.objects.filter(id__in=c.top_tags) if c.top_tags else []
return render(request, 'bandwagon/collection_detail.html',
{'collection': c, 'filter': filter, 'addons': addons,
'notes': notes, 'author_collections': others, 'tags': tags,
'perms': perms})
def get_notes(collection, raw=False):
# This might hurt in a big collection with lots of notes.
# It's a generator so we don't evaluate anything by default.
notes = CollectionAddon.objects.filter(collection=collection,
comments__isnull=False)
rv = {}
for note in notes:
# Watch out for comments in a language we didn't pick up.
if note.comments:
rv[note.addon_id] = (note.comments.localized_string if raw
else note.comments)
yield rv
@write
@login_required
def collection_vote(request, username, slug, direction):
c = get_collection(request, username, slug)
if request.method != 'POST':
return redirect(c.get_url_path())
vote = {'up': 1, 'down': -1}[direction]
qs = (CollectionVote.objects.using('default')
.filter(collection=c, user=request.amo_user))
if qs:
cv = qs[0]
if vote == cv.vote: # Double vote => cancel.
cv.delete()
else:
cv.vote = vote
cv.save(force_update=True)
else:
CollectionVote.objects.create(collection=c, user=request.amo_user,
vote=vote)
if request.is_ajax():
return http.HttpResponse()
else:
return redirect(c.get_url_path())
def initial_data_from_request(request):
return dict(author=request.amo_user, application_id=request.APP.id)
@write
@login_required
def add(request):
"Displays/processes a form to create a collection."
data = {}
if request.method == 'POST':
form = forms.CollectionForm(
request.POST, request.FILES,
initial=initial_data_from_request(request))
aform = forms.AddonsForm(request.POST)
if form.is_valid():
collection = form.save(default_locale=request.LANG)
collection.save()
if aform.is_valid():
aform.save(collection)
title = _("Collection created!")
msg = _("""Your new collection is shown below. You can <a
href="%(url)s">edit additional settings</a> if you'd
like.""") % {'url': collection.edit_url()}
messages.success(request, title, msg, extra_tags='collection',
message_safe=True)
log.info('Created collection %s' % collection.id)
return http.HttpResponseRedirect(collection.get_url_path())
else:
data['addons'] = Addon.objects.filter(pk__in=aform.clean_addon())
data['comments'] = aform.clean_addon_comment()
else:
form = forms.CollectionForm()
data.update(form=form, filter=get_filter(request))
return render(request, 'bandwagon/add.html', data)
@write
@login_required(redirect=False)
def ajax_new(request):
form = forms.CollectionForm(request.POST or None,
initial={'author': request.amo_user,
'application_id': request.APP.id},
)
if request.method == 'POST':
if form.is_valid():
collection = form.save()
addon_id = request.REQUEST['addon_id']
a = Addon.objects.get(pk=addon_id)
collection.add_addon(a)
log.info('Created collection %s' % collection.id)
return http.HttpResponseRedirect(reverse('collections.ajax_list')
+ '?addon_id=%s' % addon_id)
return jingo.render(request, 'bandwagon/ajax_new.html', {'form': form})
@login_required(redirect=False)
def ajax_list(request):
# Get collections associated with this user
collections = Collection.objects.publishable_by(request.amo_user)
try:
addon_id = int(request.GET['addon_id'])
except (KeyError, ValueError):
return http.HttpResponseBadRequest()
for collection in collections:
# See if the collections contains the addon
if addon_id in collection.addons.values_list('id', flat=True):
collection.has_addon = True
return jingo.render(request, 'bandwagon/ajax_list.html',
{'collections': collections})
@write
@login_required
@post_required
def collection_alter(request, username, slug, action):
c = get_collection(request, username, slug)
return change_addon(request, c, action)
def change_addon(request, collection, action):
if not acl.check_collection_ownership(request, collection):
return http.HttpResponseForbidden()
try:
addon = get_object_or_404(Addon.objects, pk=request.POST['addon_id'])
except (ValueError, KeyError):
return http.HttpResponseBadRequest()
getattr(collection, action + '_addon')(addon)
log.info(u'%s: %s %s to collection %s' %
(request.amo_user, action, addon.id, collection.id))
if request.is_ajax():
url = '%s?addon_id=%s' % (reverse('collections.ajax_list'), addon.id)
else:
url = collection.get_url_path()
return redirect(url)
@write
@login_required
@post_required
def ajax_collection_alter(request, action):
try:
c = get_object_or_404(Collection.objects, pk=request.POST['id'])
except (ValueError, KeyError):
return http.HttpResponseBadRequest()
return change_addon(request, c, action)
@write
@login_required
@owner_required(require_owner=False)
def edit(request, collection, username, slug):
is_admin = acl.action_allowed(request, 'Admin', '%')
if request.method == 'POST':
initial = initial_data_from_request(request)
if collection.author_id: # Don't try to change the author.
initial['author'] = collection.author
form = forms.CollectionForm(request.POST, request.FILES,
initial=initial,
instance=collection)
if form.is_valid():
collection = form.save()
title = _("Collection updated!")
msg = _(("""<a href="%(url)s">View your collection</a> to see the
changes.""")) % {'url': collection.get_url_path()}
messages.success(request, title, msg, extra_tags='collection',
message_safe=True)
log.info(u'%s edited collection %s' %
(request.amo_user, collection.id))
return http.HttpResponseRedirect(collection.edit_url())
else:
form = forms.CollectionForm(instance=collection)
qs = (CollectionAddon.uncached.using('default')
.filter(collection=collection))
meta = dict((c.addon_id, c) for c in qs)
addons = collection.addons.no_cache().all()
comments = get_notes(collection, raw=True).next()
if is_admin:
initial = dict(type=collection.type,
application=collection.application_id)
admin_form = forms.AdminForm(initial=initial)
else:
admin_form = None
data = dict(collection=collection,
form=form,
user=request.amo_user,
username=username,
slug=slug,
meta=meta,
filter=get_filter(request),
is_admin=is_admin,
admin_form=admin_form,
addons=addons,
comments=comments)
return render(request, 'bandwagon/edit.html', data)
@write
@login_required
@owner_required(require_owner=False)
@post_required
def edit_addons(request, collection, username, slug):
if request.method == 'POST':
form = forms.AddonsForm(request.POST)
if form.is_valid():
form.save(collection)
messages.success(request, _('Your collection has been updated.'))
log.info(u'%s added add-ons to %s' %
(request.amo_user, collection.id))
return http.HttpResponseRedirect(collection.edit_url() + '#addons-edit')
@write
@login_required
@owner_required
@post_required
def edit_contributors(request, collection, username, slug):
is_admin = acl.action_allowed(request, 'Admin', '%')
if is_admin:
admin_form = forms.AdminForm(request.POST)
if admin_form.is_valid():
admin_form.save(collection)
form = forms.ContributorsForm(request.POST)
if form.is_valid():
form.save(collection)
messages.success(request, _('Your collection has been updated.'))
if form.cleaned_data['new_owner']:
return http.HttpResponseRedirect(collection.get_url_path())
return http.HttpResponseRedirect(collection.edit_url() + '#users-edit')
@write
@login_required
@owner_required
@post_required
def edit_privacy(request, collection, username, slug):
collection.listed = not collection.listed
collection.save()
log.info(u'%s changed privacy on collection %s' %
(request.amo_user, collection.id))
return redirect(collection.get_url_path())
@write
@login_required
def delete(request, username, slug):
collection = get_object_or_404(Collection, author__username=username,
slug=slug)
if not acl.check_collection_ownership(request, collection, True):
log.info(u'%s is trying to delete collection %s'
% (request.amo_user, collection.id))
return http.HttpResponseForbidden()
data = dict(collection=collection, username=username, slug=slug)
if request.method == 'POST':
if request.POST['sure'] == '1':
collection.delete()
log.info(u'%s deleted collection %s' %
(request.amo_user, collection.id))
url = reverse('collections.user', args=[username])
return http.HttpResponseRedirect(url)
else:
return http.HttpResponseRedirect(collection.get_url_path())
return render(request, 'bandwagon/delete.html', data)
@write
@login_required
@owner_required
@json_view
def delete_icon(request, collection, username, slug):
log.debug(u"User deleted collection (%s) icon " % slug)
tasks.delete_icon(os.path.join(collection.get_img_dir(),
'%d.png' % collection.id))
collection.icontype = ''
collection.save()
if request.is_ajax():
return {'icon': collection.icon_url}
else:
messages.success(request, _('Icon Deleted'))
return redirect(collection.edit_url())
@login_required
@post_required
@json_view
def watch(request, username, slug):
"""
POST /collections/:user/:slug/watch to toggle the user's watching status.
For ajax, return {watching: true|false}. (reflects the new value)
Otherwise, redirect to the collection page.
"""
collection = get_collection(request, username, slug)
d = dict(user=request.amo_user, collection=collection)
qs = CollectionWatcher.uncached.using('default').filter(**d)
watching = not qs # Flip the bool since we're about to change it.
if qs:
qs.delete()
else:
CollectionWatcher.objects.create(**d)
if request.is_ajax():
return {'watching': watching}
else:
return redirect(collection.get_url_path())
def share(request, username, slug):
collection = get_collection(request, username, slug)
return sharing.views.share(request, collection,
name=collection.name,
description=collection.description)
@login_required
def following(request):
qs = (Collection.objects.filter(following__user=request.amo_user)
.order_by('-following__created'))
collections = paginate(request, qs)
votes = get_votes(request, collections.object_list)
return render(request, 'bandwagon/user_listing.html',
dict(collections=collections, votes=votes,
page='following', filter=get_filter(request)))
@login_required
def mine(request, slug=None):
username = request.amo_user.username
if slug is None:
loc = reverse('collections.user', args=[username])
else:
loc = reverse('collections.detail', args=[username, slug])
return redirect(loc)
| |
"""Tests for tools for manipulating of large commutative expressions. """
from sympy import (S, Add, sin, Mul, Symbol, oo, Integral, sqrt, Tuple, I,
Interval, O, symbols, simplify, collect, Sum, Basic, Dict,
root, exp, cos, sin, oo, Dummy, log)
from sympy.core.exprtools import (decompose_power, Factors, Term, _gcd_terms,
gcd_terms, factor_terms, factor_nc,
_monotonic_sign)
from sympy.core.mul import _keep_coeff as _keep_coeff
from sympy.simplify.cse_opts import sub_pre
from sympy.utilities.pytest import raises
from sympy.abc import a, b, t, x, y, z
def test_decompose_power():
assert decompose_power(x) == (x, 1)
assert decompose_power(x**2) == (x, 2)
assert decompose_power(x**(2*y)) == (x**y, 2)
assert decompose_power(x**(2*y/3)) == (x**(y/3), 2)
def test_Factors():
assert Factors() == Factors({}) == Factors(S(1))
assert Factors().as_expr() == S.One
assert Factors({x: 2, y: 3, sin(x): 4}).as_expr() == x**2*y**3*sin(x)**4
assert Factors(S.Infinity) == Factors({oo: 1})
assert Factors(S.NegativeInfinity) == Factors({oo: 1, -1: 1})
a = Factors({x: 5, y: 3, z: 7})
b = Factors({ y: 4, z: 3, t: 10})
assert a.mul(b) == a*b == Factors({x: 5, y: 7, z: 10, t: 10})
assert a.div(b) == divmod(a, b) == \
(Factors({x: 5, z: 4}), Factors({y: 1, t: 10}))
assert a.quo(b) == a/b == Factors({x: 5, z: 4})
assert a.rem(b) == a % b == Factors({y: 1, t: 10})
assert a.pow(3) == a**3 == Factors({x: 15, y: 9, z: 21})
assert b.pow(3) == b**3 == Factors({y: 12, z: 9, t: 30})
assert a.gcd(b) == Factors({y: 3, z: 3})
assert a.lcm(b) == Factors({x: 5, y: 4, z: 7, t: 10})
a = Factors({x: 4, y: 7, t: 7})
b = Factors({z: 1, t: 3})
assert a.normal(b) == (Factors({x: 4, y: 7, t: 4}), Factors({z: 1}))
assert Factors(sqrt(2)*x).as_expr() == sqrt(2)*x
assert Factors(-I)*I == Factors()
assert Factors({S(-1): S(3)})*Factors({S(-1): S(1), I: S(5)}) == \
Factors(I)
assert Factors(S(2)**x).div(S(3)**x) == \
(Factors({S(2): x}), Factors({S(3): x}))
assert Factors(2**(2*x + 2)).div(S(8)) == \
(Factors({S(2): 2*x + 2}), Factors({S(8): S(1)}))
# coverage
# /!\ things break if this is not True
assert Factors({S(-1): S(3)/2}) == Factors({I: S.One, S(-1): S.One})
assert Factors({I: S(1), S(-1): S(1)/3}).as_expr() == I*(-1)**(S(1)/3)
assert Factors(-1.) == Factors({S(-1): S(1), S(1.): 1})
assert Factors(-2.) == Factors({S(-1): S(1), S(2.): 1})
assert Factors((-2.)**x) == Factors({S(-2.): x})
assert Factors(S(-2)) == Factors({S(-1): S(1), S(2): 1})
assert Factors(S.Half) == Factors({S(2): -S.One})
assert Factors(S(3)/2) == Factors({S(3): S.One, S(2): S(-1)})
assert Factors({I: S(1)}) == Factors(I)
assert Factors({-1.0: 2, I: 1}) == Factors({S(1.0): 1, I: 1})
assert Factors({S.NegativeOne: -S(3)/2}).as_expr() == I
A = symbols('A', commutative=False)
assert Factors(2*A**2) == Factors({S(2): 1, A**2: 1})
assert Factors(I) == Factors({I: S.One})
assert Factors(x).normal(S(2)) == (Factors(x), Factors(S(2)))
assert Factors(x).normal(S(0)) == (Factors(), Factors(S(0)))
raises(ZeroDivisionError, lambda: Factors(x).div(S(0)))
assert Factors(x).mul(S(2)) == Factors(2*x)
assert Factors(x).mul(S(0)).is_zero
assert Factors(x).mul(1/x).is_one
assert Factors(x**sqrt(2)**3).as_expr() == x**(2*sqrt(2))
assert Factors(x)**Factors(S(2)) == Factors(x**2)
assert Factors(x).gcd(S(0)) == Factors(x)
assert Factors(x).lcm(S(0)).is_zero
assert Factors(S(0)).div(x) == (Factors(S(0)), Factors())
assert Factors(x).div(x) == (Factors(), Factors())
assert Factors({x: .2})/Factors({x: .2}) == Factors()
assert Factors(x) != Factors()
assert Factors(S(0)).normal(x) == (Factors(S(0)), Factors())
n, d = x**(2 + y), x**2
f = Factors(n)
assert f.div(d) == f.normal(d) == (Factors(x**y), Factors())
assert f.gcd(d) == Factors()
d = x**y
assert f.div(d) == f.normal(d) == (Factors(x**2), Factors())
assert f.gcd(d) == Factors(d)
n = d = 2**x
f = Factors(n)
assert f.div(d) == f.normal(d) == (Factors(), Factors())
assert f.gcd(d) == Factors(d)
n, d = 2**x, 2**y
f = Factors(n)
assert f.div(d) == f.normal(d) == (Factors({S(2): x}), Factors({S(2): y}))
assert f.gcd(d) == Factors()
# extraction of constant only
n = x**(x + 3)
assert Factors(n).normal(x**-3) == (Factors({x: x + 6}), Factors({}))
assert Factors(n).normal(x**3) == (Factors({x: x}), Factors({}))
assert Factors(n).normal(x**4) == (Factors({x: x}), Factors({x: 1}))
assert Factors(n).normal(x**(y - 3)) == \
(Factors({x: x + 6}), Factors({x: y}))
assert Factors(n).normal(x**(y + 3)) == (Factors({x: x}), Factors({x: y}))
assert Factors(n).normal(x**(y + 4)) == \
(Factors({x: x}), Factors({x: y + 1}))
assert Factors(n).div(x**-3) == (Factors({x: x + 6}), Factors({}))
assert Factors(n).div(x**3) == (Factors({x: x}), Factors({}))
assert Factors(n).div(x**4) == (Factors({x: x}), Factors({x: 1}))
assert Factors(n).div(x**(y - 3)) == \
(Factors({x: x + 6}), Factors({x: y}))
assert Factors(n).div(x**(y + 3)) == (Factors({x: x}), Factors({x: y}))
assert Factors(n).div(x**(y + 4)) == \
(Factors({x: x}), Factors({x: y + 1}))
def test_Term():
a = Term(4*x*y**2/z/t**3)
b = Term(2*x**3*y**5/t**3)
assert a == Term(4, Factors({x: 1, y: 2}), Factors({z: 1, t: 3}))
assert b == Term(2, Factors({x: 3, y: 5}), Factors({t: 3}))
assert a.as_expr() == 4*x*y**2/z/t**3
assert b.as_expr() == 2*x**3*y**5/t**3
assert a.inv() == \
Term(S(1)/4, Factors({z: 1, t: 3}), Factors({x: 1, y: 2}))
assert b.inv() == Term(S(1)/2, Factors({t: 3}), Factors({x: 3, y: 5}))
assert a.mul(b) == a*b == \
Term(8, Factors({x: 4, y: 7}), Factors({z: 1, t: 6}))
assert a.quo(b) == a/b == Term(2, Factors({}), Factors({x: 2, y: 3, z: 1}))
assert a.pow(3) == a**3 == \
Term(64, Factors({x: 3, y: 6}), Factors({z: 3, t: 9}))
assert b.pow(3) == b**3 == Term(8, Factors({x: 9, y: 15}), Factors({t: 9}))
assert a.pow(-3) == a**(-3) == \
Term(S(1)/64, Factors({z: 3, t: 9}), Factors({x: 3, y: 6}))
assert b.pow(-3) == b**(-3) == \
Term(S(1)/8, Factors({t: 9}), Factors({x: 9, y: 15}))
assert a.gcd(b) == Term(2, Factors({x: 1, y: 2}), Factors({t: 3}))
assert a.lcm(b) == Term(4, Factors({x: 3, y: 5}), Factors({z: 1, t: 3}))
a = Term(4*x*y**2/z/t**3)
b = Term(2*x**3*y**5*t**7)
assert a.mul(b) == Term(8, Factors({x: 4, y: 7, t: 4}), Factors({z: 1}))
assert Term((2*x + 2)**3) == Term(8, Factors({x + 1: 3}), Factors({}))
assert Term((2*x + 2)*(3*x + 6)**2) == \
Term(18, Factors({x + 1: 1, x + 2: 2}), Factors({}))
def test_gcd_terms():
f = 2*(x + 1)*(x + 4)/(5*x**2 + 5) + (2*x + 2)*(x + 5)/(x**2 + 1)/5 + \
(2*x + 2)*(x + 6)/(5*x**2 + 5)
assert _gcd_terms(f) == ((S(6)/5)*((1 + x)/(1 + x**2)), 5 + x, 1)
assert _gcd_terms(Add.make_args(f)) == \
((S(6)/5)*((1 + x)/(1 + x**2)), 5 + x, 1)
newf = (S(6)/5)*((1 + x)*(5 + x)/(1 + x**2))
assert gcd_terms(f) == newf
args = Add.make_args(f)
# non-Basic sequences of terms treated as terms of Add
assert gcd_terms(list(args)) == newf
assert gcd_terms(tuple(args)) == newf
assert gcd_terms(set(args)) == newf
# but a Basic sequence is treated as a container
assert gcd_terms(Tuple(*args)) != newf
assert gcd_terms(Basic(Tuple(1, 3*y + 3*x*y), Tuple(1, 3))) == \
Basic((1, 3*y*(x + 1)), (1, 3))
# but we shouldn't change keys of a dictionary or some may be lost
assert gcd_terms(Dict((x*(1 + y), 2), (x + x*y, y + x*y))) == \
Dict({x*(y + 1): 2, x + x*y: y*(1 + x)})
assert gcd_terms((2*x + 2)**3 + (2*x + 2)**2) == 4*(x + 1)**2*(2*x + 3)
assert gcd_terms(0) == 0
assert gcd_terms(1) == 1
assert gcd_terms(x) == x
assert gcd_terms(2 + 2*x) == Mul(2, 1 + x, evaluate=False)
arg = x*(2*x + 4*y)
garg = 2*x*(x + 2*y)
assert gcd_terms(arg) == garg
assert gcd_terms(sin(arg)) == sin(garg)
# issue 6139-like
alpha, alpha1, alpha2, alpha3 = symbols('alpha:4')
a = alpha**2 - alpha*x**2 + alpha + x**3 - x*(alpha + 1)
rep = (alpha, (1 + sqrt(5))/2 + alpha1*x + alpha2*x**2 + alpha3*x**3)
s = (a/(x - alpha)).subs(*rep).series(x, 0, 1)
assert simplify(collect(s, x)) == -sqrt(5)/2 - S(3)/2 + O(x)
# issue 5917
assert _gcd_terms([S.Zero, S.Zero]) == (0, 0, 1)
assert _gcd_terms([2*x + 4]) == (2, x + 2, 1)
eq = x/(x + 1/x)
assert gcd_terms(eq, fraction=False) == eq
def test_factor_terms():
A = Symbol('A', commutative=False)
assert factor_terms(9*(x + x*y + 1) + (3*x + 3)**(2 + 2*x)) == \
9*x*y + 9*x + _keep_coeff(S(3), x + 1)**_keep_coeff(S(2), x + 1) + 9
assert factor_terms(9*(x + x*y + 1) + (3)**(2 + 2*x)) == \
_keep_coeff(S(9), 3**(2*x) + x*y + x + 1)
assert factor_terms(3**(2 + 2*x) + a*3**(2 + 2*x)) == \
9*3**(2*x)*(a + 1)
assert factor_terms(x + x*A) == \
x*(1 + A)
assert factor_terms(sin(x + x*A)) == \
sin(x*(1 + A))
assert factor_terms((3*x + 3)**((2 + 2*x)/3)) == \
_keep_coeff(S(3), x + 1)**_keep_coeff(S(2)/3, x + 1)
assert factor_terms(x + (x*y + x)**(3*x + 3)) == \
x + (x*(y + 1))**_keep_coeff(S(3), x + 1)
assert factor_terms(a*(x + x*y) + b*(x*2 + y*x*2)) == \
x*(a + 2*b)*(y + 1)
i = Integral(x, (x, 0, oo))
assert factor_terms(i) == i
# check radical extraction
eq = sqrt(2) + sqrt(10)
assert factor_terms(eq) == eq
assert factor_terms(eq, radical=True) == sqrt(2)*(1 + sqrt(5))
eq = root(-6, 3) + root(6, 3)
assert factor_terms(eq, radical=True) == 6**(S(1)/3)*(1 + (-1)**(S(1)/3))
eq = [x + x*y]
ans = [x*(y + 1)]
for c in [list, tuple, set]:
assert factor_terms(c(eq)) == c(ans)
assert factor_terms(Tuple(x + x*y)) == Tuple(x*(y + 1))
assert factor_terms(Interval(0, 1)) == Interval(0, 1)
e = 1/sqrt(a/2 + 1)
assert factor_terms(e, clear=False) == 1/sqrt(a/2 + 1)
assert factor_terms(e, clear=True) == sqrt(2)/sqrt(a + 2)
eq = x/(x + 1/x) + 1/(x**2 + 1)
assert factor_terms(eq, fraction=False) == eq
assert factor_terms(eq, fraction=True) == 1
assert factor_terms((1/(x**3 + x**2) + 2/x**2)*y) == \
y*(2 + 1/(x + 1))/x**2
# if not True, then processesing for this in factor_terms is not necessary
assert gcd_terms(-x - y) == -x - y
assert factor_terms(-x - y) == Mul(-1, x + y, evaluate=False)
# if not True, then "special" processesing in factor_terms is not necessary
assert gcd_terms(exp(Mul(-1, x + 1))) == exp(-x - 1)
e = exp(-x - 2) + x
assert factor_terms(e) == exp(Mul(-1, x + 2, evaluate=False)) + x
assert factor_terms(e, sign=False) == e
assert factor_terms(exp(-4*x - 2) - x) == -x + exp(Mul(-2, 2*x + 1, evaluate=False))
def test_xreplace():
e = Mul(2, 1 + x, evaluate=False)
assert e.xreplace({}) == e
assert e.xreplace({y: x}) == e
def test_factor_nc():
x, y = symbols('x,y')
k = symbols('k', integer=True)
n, m, o = symbols('n,m,o', commutative=False)
# mul and multinomial expansion is needed
from sympy.core.function import _mexpand
e = x*(1 + y)**2
assert _mexpand(e) == x + x*2*y + x*y**2
def factor_nc_test(e):
ex = _mexpand(e)
assert ex.is_Add
f = factor_nc(ex)
assert not f.is_Add and _mexpand(f) == ex
factor_nc_test(x*(1 + y))
factor_nc_test(n*(x + 1))
factor_nc_test(n*(x + m))
factor_nc_test((x + m)*n)
factor_nc_test(n*m*(x*o + n*o*m)*n)
s = Sum(x, (x, 1, 2))
factor_nc_test(x*(1 + s))
factor_nc_test(x*(1 + s)*s)
factor_nc_test(x*(1 + sin(s)))
factor_nc_test((1 + n)**2)
factor_nc_test((x + n)*(x + m)*(x + y))
factor_nc_test(x*(n*m + 1))
factor_nc_test(x*(n*m + x))
factor_nc_test(x*(x*n*m + 1))
factor_nc_test(x*n*(x*m + 1))
factor_nc_test(x*(m*n + x*n*m))
factor_nc_test(n*(1 - m)*n**2)
factor_nc_test((n + m)**2)
factor_nc_test((n - m)*(n + m)**2)
factor_nc_test((n + m)**2*(n - m))
factor_nc_test((m - n)*(n + m)**2*(n - m))
assert factor_nc(n*(n + n*m)) == n**2*(1 + m)
assert factor_nc(m*(m*n + n*m*n**2)) == m*(m + n*m*n)*n
eq = m*sin(n) - sin(n)*m
assert factor_nc(eq) == eq
# for coverage:
from sympy.physics.secondquant import Commutator
from sympy import factor
eq = 1 + x*Commutator(m, n)
assert factor_nc(eq) == eq
eq = x*Commutator(m, n) + x*Commutator(m, o)*Commutator(m, n)
assert factor(eq) == x*(1 + Commutator(m, o))*Commutator(m, n)
# issue 6534
assert (2*n + 2*m).factor() == 2*(n + m)
# issue 6701
assert factor_nc(n**k + n**(k + 1)) == n**k*(1 + n)
assert factor_nc((m*n)**k + (m*n)**(k + 1)) == (1 + m*n)*(m*n)**k
# issue 6918
assert factor_nc(-n*(2*x**2 + 2*x)) == -2*n*x*(x + 1)
def test_issue_6360():
a, b = symbols("a b")
apb = a + b
eq = apb + apb**2*(-2*a - 2*b)
assert factor_terms(sub_pre(eq)) == a + b - 2*(a + b)**3
def test_issue_7903():
a = symbols(r'a', real=True)
t = exp(I*cos(a)) + exp(-I*sin(a))
assert t.simplify()
def test_monotonic_sign():
F = _monotonic_sign
x = symbols('x')
assert F(x) is None
assert F(-x) is None
assert F(Dummy(prime=True)) == 2
assert F(Dummy(prime=True, odd=True)) == 3
assert F(Dummy(positive=True, integer=True)) == 1
assert F(Dummy(positive=True, even=True)) == 2
assert F(Dummy(negative=True, integer=True)) == -1
assert F(Dummy(negative=True, even=True)) == -2
assert F(Dummy(zero=True)) == 0
assert F(Dummy(nonnegative=True)) == 0
assert F(Dummy(nonpositive=True)) == 0
assert F(Dummy(positive=True) + 1).is_positive
assert F(Dummy(positive=True, integer=True) - 1).is_nonnegative
assert F(Dummy(positive=True) - 1) is None
assert F(Dummy(negative=True) + 1) is None
assert F(Dummy(negative=True, integer=True) - 1).is_nonpositive
assert F(Dummy(negative=True) - 1).is_negative
assert F(-Dummy(positive=True) + 1) is None
assert F(-Dummy(positive=True, integer=True) - 1).is_negative
assert F(-Dummy(positive=True) - 1).is_negative
assert F(-Dummy(negative=True) + 1).is_positive
assert F(-Dummy(negative=True, integer=True) - 1).is_nonnegative
assert F(-Dummy(negative=True) - 1) is None
x = Dummy(negative=True)
assert F(x**3).is_nonpositive
assert F(x**3 + log(2)*x - 1).is_negative
x = Dummy(positive=True)
assert F(-x**3).is_nonpositive
p = Dummy(positive=True)
assert F(1/p).is_positive
assert F(p/(p + 1)).is_positive
p = Dummy(nonnegative=True)
assert F(p/(p + 1)).is_nonnegative
p = Dummy(positive=True)
assert F(-1/p).is_negative
p = Dummy(nonpositive=True)
assert F(p/(-p + 1)).is_nonpositive
p = Dummy(positive=True, integer=True)
q = Dummy(positive=True, integer=True)
assert F(-2/p/q).is_negative
assert F(-2/(p - 1)/q) is None
assert F((p - 1)*q + 1).is_positive
assert F(-(p - 1)*q - 1).is_negative
| |
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import namedtuple
import struct
import sys
class VMDK(object):
_SECT = 512
_HEADER = namedtuple('Header', 'magicNumber version flags capacity '
'grainSize descriptorOffset descriptorSize numGTEsPerGT rgdOffet '
'gdOffset overHead uncleanShutdown singleEndLineChar nonEndLineChar '
'doubleEndLineChar1 doubleEndLineChar2 compressAlgorithm pad')
_MARKER = namedtuple('Marker', 'val size data')
_GRAIN_MARKER = namedtuple('GrainMarker', 'lba size data offset')
_METADATA_MARKER = namedtuple('Metadata', 'numSectors size type pad metadata offset')
class Marker(object):
EOS = 0
GT = 1
GD = 2
FOOTER = 3
Pad = '\0' * 496
_StringRepr = [ 'EOS', 'GT', 'GD', 'FOOTER', ]
@classmethod
def DecodeType(cls, data):
return struct.unpack("<I", data)[0]
@classmethod
def toTypeString(cls, intVal):
return cls._StringRepr[intVal]
class BaseGrainTable(object):
__slots__ = [ 'map' ]
def __init__(self):
self.reset()
def reset(self):
self.map = self.empty()
def asTuple(self):
return tuple(self.map)
class GrainTable(BaseGrainTable):
__slots__ = [ 'offset', 'lba', ]
_format = "<512I"
BLOCK_SIZE = 65536
def __init__(self):
VMDK.BaseGrainTable.__init__(self)
self.offset = 0
self.lba = -1
def add(self, marker):
idx = (marker.lba % self.BLOCK_SIZE) / 128
self.map[idx] = marker.offset
self.lba = marker.lba / self.BLOCK_SIZE
def fromMarker(self, marker):
self.offset = marker.offset + 1
@classmethod
def decode(cls, marker, data):
if len(data) != 2048:
return []
return struct.unpack(cls._format, data)
@classmethod
def empty(cls):
return [ 0 ] * 512
def isEmpty(self):
for i in self.map:
if i != 0:
return False
return True
class GrainDirectory(BaseGrainTable):
__slots__ = []
def add(self, grainTable):
if grainTable.isEmpty():
return
idx = grainTable.lba
mapLen = len(self.map)
# We may have skipped some tables, but we need to add a full
# block of 128
while mapLen <= idx:
self.map.extend([ 0 ] * 128)
mapLen += 128
self.map[idx] = grainTable.offset
@classmethod
def decode(cls, marker, data):
sformat = "<%sI" % (marker.val * VMDK._SECT / 4)
metadata = struct.unpack(sformat, data)
return metadata
def empty(self):
return []
class MarkerFooter(object):
_format = "<4sIIQQQQIQQQBccccI431s"
@classmethod
def decode(cls, marker, data):
return struct.unpack(cls._format, data)
def __init__(self, fobj):
self._fobj = fobj
def inspect(self):
headerData = self._fobj.read(self._SECT)
self.header = self._HEADER(*struct.unpack("<4sIIQQQQIQQQBccccI431s", headerData))
self.assertEquals(self.header.magicNumber, 'KDMV')
self._fobj.seek(-3 * self._SECT, 2)
footerMarker = self._readMarker()
self.assertEquals(footerMarker.type, self.Marker.FOOTER)
# Look for the footer
footer = footerMarker.metadata
self.assertEquals(footer.magicNumber, 'KDMV')
# Find the gd marker
self._fobj.seek(self._SECT * (footer.gdOffset - 1))
gdMarker = self._readMarker()
self.assertEquals(gdMarker.type, self.Marker.GD)
# Look for the last non-zero GT
for gtOffset in reversed(gdMarker.metadata):
if gtOffset != 0:
break
else: # for
raise Exception("All-zero GTs")
# Seek to the gt marker
self._fobj.seek((gtOffset - 1) * self._SECT)
marker = self._readMarker()
assert marker.type == self.Marker.GT
grainTable = self.GrainTable()
# Build a GD by copying the previous GD
grainDirectory = self.GrainDirectory()
grainDirectory.map = list(gdMarker.metadata)
correct = True
while 1:
marker = self._readMarker(withData=False)
self._align()
if isinstance(marker, self._METADATA_MARKER):
if marker.type == self.Marker.GD:
self.assertEquals(marker.metadata, grainDirectory.asTuple())
if correct:
print "Image is correctly built"
return
break
correct = False
if marker.type == self.Marker.GT:
grainTable.fromMarker(marker)
grainDirectory.add(grainTable)
self.assertEquals(marker.metadata, grainTable.asTuple())
grainTable.reset()
continue
continue
grainTable.add(marker)
# XXX Write back gd marker + gd, footer marker, EOS
def assertEquals(self, first, second):
assert first == second, "%s != %s" % (first, second)
def _readMarker(self, withData=False):
offset = self._fobj.tell()
assert offset % self._SECT == 0
offset /= self._SECT
markerData = self._fobj.read(16)
marker, markerType = self._readMarkerFromData(markerData)
if marker.size:
if withData:
grainData = marker.data + self._fobj.read(marker.size - 4)
else:
grainData = "..."
# Seek from current position, to pretend we're reading
self._fobj.seek(marker.size - 4, 1)
marker = self._GRAIN_MARKER(marker.val, marker.size, grainData, offset)
else:
# Realign to read the metadata
self._align()
metadata = self._fobj.read(marker.val * self._SECT)
if markerType == self.Marker.GD:
# Grain directories have a variable number of
# entries, depending on the extent size, and contain
# an unsigned int (4 byte)
metadata = self.GrainDirectory.decode(marker, metadata)
elif markerType == self.Marker.GT:
metadata = self.GrainTable.decode(marker, metadata)
elif markerType == self.Marker.FOOTER:
metadata = self._HEADER(*self.MarkerFooter.decode(marker, metadata))
marker = self._METADATA_MARKER(marker.val, marker.size, markerType,
self.Marker.Pad, metadata, offset)
return marker
@classmethod
def _readMarkerFromData(cls, markerData, checkMarkerType=True):
marker = cls._MARKER(*struct.unpack("<QI4s", markerData[:16]))
if marker.size:
# Compressed grain. Type not needed
markerType = -1
else:
markerType = cls.Marker.DecodeType(marker.data)
if checkMarkerType:
assert 0 <= markerType < len(cls.Marker._StringRepr)
return marker, markerType
def _align(self):
"Align to 512 byte boundary"
pos = self._fobj.tell()
padding = pos % self._SECT
if padding:
self._fobj.read(self._SECT - padding)
def main():
if len(sys.argv) != 2:
print "Usage: %s <file>" % sys.argv[0]
return 1
vmdkFile = file(sys.argv[1])
vmdk = VMDK(vmdkFile)
vmdk.inspect()
if __name__ == '__main__':
sys.exit(main())
| |
from math import isnan
import warnings
import unittest
from unittest.mock import MagicMock
import numpy as np
from numpy.testing import assert_array_equal
from Orange.data import \
Instance, Domain, Unknown, Value, \
DiscreteVariable, ContinuousVariable, StringVariable
class TestInstance(unittest.TestCase):
attributes = ["Feature %i" % i for i in range(10)]
class_vars = ["Class %i" % i for i in range(1)]
metas = [DiscreteVariable("Meta 1", values="XYZ"),
ContinuousVariable("Meta 2"),
StringVariable("Meta 3")]
def mock_domain(self, with_classes=False, with_metas=False):
attributes = self.attributes
class_vars = self.class_vars if with_classes else []
metas = self.metas if with_metas else []
variables = attributes + class_vars
return MagicMock(Domain,
attributes=attributes,
class_vars=class_vars,
metas=metas,
variables=variables)
def create_domain(self, attributes=(), classes=(), metas=()):
attr_vars = [ContinuousVariable(name=a) if isinstance(a, str) else a
for a in attributes]
class_vars = [ContinuousVariable(name=c) if isinstance(c, str) else c
for c in classes]
meta_vars = [DiscreteVariable(name=m, values=map(str, range(5)))
if isinstance(m, str) else m
for m in metas]
domain = Domain(attr_vars, class_vars, meta_vars)
return domain
def test_init_x_no_data(self):
domain = self.mock_domain()
inst = Instance(domain)
self.assertIsInstance(inst, Instance)
self.assertIs(inst.domain, domain)
self.assertEqual(inst._x.shape, (len(self.attributes), ))
self.assertEqual(inst._y.shape, (0, ))
self.assertEqual(inst._metas.shape, (0, ))
self.assertTrue(all(isnan(x) for x in inst._x))
def test_init_xy_no_data(self):
domain = self.mock_domain(with_classes=True)
inst = Instance(domain)
self.assertIsInstance(inst, Instance)
self.assertIs(inst.domain, domain)
self.assertEqual(inst._x.shape, (len(self.attributes), ))
self.assertEqual(inst._y.shape, (len(self.class_vars), ))
self.assertEqual(inst._metas.shape, (0, ))
self.assertTrue(all(isnan(x) for x in inst._x))
self.assertTrue(all(isnan(x) for x in inst._y))
def test_init_xym_no_data(self):
domain = self.mock_domain(with_classes=True, with_metas=True)
inst = Instance(domain)
self.assertIsInstance(inst, Instance)
self.assertIs(inst.domain, domain)
self.assertEqual(inst._x.shape, (len(self.attributes), ))
self.assertEqual(inst._y.shape, (len(self.class_vars), ))
self.assertEqual(inst._metas.shape, (3, ))
self.assertTrue(all(isnan(x) for x in inst._x))
self.assertTrue(all(isnan(x) for x in inst._y))
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
assert_array_equal(inst._metas,
np.array([var.Unknown for var in domain.metas],
dtype=object))
def test_init_x_arr(self):
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")])
vals = np.array([42, 0])
inst = Instance(domain, vals)
assert_array_equal(inst._x, vals)
self.assertEqual(inst._y.shape, (0, ))
self.assertEqual(inst._metas.shape, (0, ))
domain = self.create_domain()
inst = Instance(domain, np.empty((0,)))
self.assertEqual(inst._x.shape, (0, ))
self.assertEqual(inst._y.shape, (0, ))
self.assertEqual(inst._metas.shape, (0, ))
def test_init_x_list(self):
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")])
lst = [42, 0]
vals = np.array(lst)
inst = Instance(domain, vals)
assert_array_equal(inst._x, vals)
self.assertEqual(inst._y.shape, (0, ))
self.assertEqual(inst._metas.shape, (0, ))
domain = self.create_domain()
inst = Instance(domain, [])
self.assertEqual(inst._x.shape, (0, ))
self.assertEqual(inst._y.shape, (0, ))
self.assertEqual(inst._metas.shape, (0, ))
def test_init_xy_arr(self):
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")],
[DiscreteVariable("y", values="ABC")])
vals = np.array([42, 0, 1])
inst = Instance(domain, vals)
assert_array_equal(inst._x, vals[:2])
self.assertEqual(inst._y.shape, (1, ))
self.assertEqual(inst._y[0], 1)
self.assertEqual(inst._metas.shape, (0, ))
def test_init_xy_list(self):
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")],
[DiscreteVariable("y", values="ABC")])
lst = [42, "M", "C"]
vals = np.array([42, 0, 2])
inst = Instance(domain, vals)
assert_array_equal(inst._x, vals[:2])
self.assertEqual(inst._y.shape, (1, ))
self.assertEqual(inst._y[0], 2)
self.assertEqual(inst._metas.shape, (0, ))
def test_init_xym_arr(self):
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")],
[DiscreteVariable("y", values="ABC")],
self.metas)
vals = np.array([42, "M", "B", "X", 43, "Foo"], dtype=object)
inst = Instance(domain, vals)
self.assertIsInstance(inst, Instance)
self.assertIs(inst.domain, domain)
self.assertEqual(inst._x.shape, (2, ))
self.assertEqual(inst._y.shape, (1, ))
self.assertEqual(inst._metas.shape, (3, ))
assert_array_equal(inst._x, np.array([42, 0]))
self.assertEqual(inst._y[0], 1)
assert_array_equal(inst._metas, np.array([0, 43, "Foo"], dtype=object))
def test_init_xym_list(self):
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")],
[DiscreteVariable("y", values="ABC")],
self.metas)
vals = [42, "M", "B", "X", 43, "Foo"]
inst = Instance(domain, vals)
self.assertIsInstance(inst, Instance)
self.assertIs(inst.domain, domain)
self.assertEqual(inst._x.shape, (2, ))
self.assertEqual(inst._y.shape, (1, ))
self.assertEqual(inst._metas.shape, (3, ))
assert_array_equal(inst._x, np.array([42, 0]))
self.assertEqual(inst._y[0], 1)
assert_array_equal(inst._metas, np.array([0, 43, "Foo"], dtype=object))
def test_init_inst(self):
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")],
[DiscreteVariable("y", values="ABC")],
self.metas)
vals = [42, "M", "B", "X", 43, "Foo"]
inst = Instance(domain, vals)
inst2 = Instance(domain, inst)
assert_array_equal(inst2._x, np.array([42, 0]))
self.assertEqual(inst2._y[0], 1)
assert_array_equal(inst2._metas, np.array([0, 43, "Foo"], dtype=object))
domain2 = self.create_domain(["z", domain[1], self.metas[1]],
domain.class_vars,
[self.metas[0], "w", domain[0]])
inst2 = Instance(domain2, inst)
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
assert_array_equal(inst2._x, np.array([Unknown, 0, 43]))
self.assertEqual(inst2._y[0], 1)
assert_array_equal(inst2._metas, np.array([0, Unknown, 42],
dtype=object))
def test_get_item(self):
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")],
[DiscreteVariable("y", values="ABC")],
self.metas)
vals = [42, "M", "B", "X", 43, "Foo"]
inst = Instance(domain, vals)
val = inst[0]
self.assertIsInstance(val, Value)
self.assertEqual(inst[0], 42)
self.assertEqual(inst["x"], 42)
self.assertEqual(inst[domain[0]], 42)
val = inst[1]
self.assertIsInstance(val, Value)
self.assertEqual(inst[1], "M")
self.assertEqual(inst["g"], "M")
self.assertEqual(inst[domain[1]], "M")
val = inst[2]
self.assertIsInstance(val, Value)
self.assertEqual(inst[2], "B")
self.assertEqual(inst["y"], "B")
self.assertEqual(inst[domain.class_var], "B")
val = inst[-2]
self.assertIsInstance(val, Value)
self.assertEqual(inst[-2], 43)
self.assertEqual(inst["Meta 2"], 43)
self.assertEqual(inst[self.metas[1]], 43)
with self.assertRaises(ValueError):
inst["asdf"] = 42
with self.assertRaises(ValueError):
inst[ContinuousVariable("asdf")] = 42
def test_list(self):
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")],
[DiscreteVariable("y", values="ABC")],
self.metas)
vals = [42, "M", "B", "X", 43, "Foo"]
inst = Instance(domain, vals)
l = inst.list
self.assertIsInstance(l, list)
self.assertEqual(l, [42, "M", "B", "X", 43, "Foo"])
self.assertGreater(len(l), len(inst))
self.assertEqual(len(l), 6)
def test_set_item(self):
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")],
[DiscreteVariable("y", values="ABC")],
self.metas)
vals = [42, "M", "B", "X", 43, "Foo"]
inst = Instance(domain, vals)
inst[0] = 43
self.assertEqual(inst[0], 43)
inst["x"] = 44
self.assertEqual(inst[0], 44)
inst[domain[0]] = 45
self.assertEqual(inst[0], 45)
inst[1] = "F"
self.assertEqual(inst[1], "F")
inst["g"] = "M"
self.assertEqual(inst[1], "M")
with self.assertRaises(ValueError):
inst[1] = "N"
with self.assertRaises(ValueError):
inst["asdf"] = 42
inst[2] = "C"
self.assertEqual(inst[2], "C")
inst["y"] = "A"
self.assertEqual(inst[2], "A")
inst[domain.class_var] = "B"
self.assertEqual(inst[2], "B")
inst[-1] = "Y"
self.assertEqual(inst[-1], "Y")
inst["Meta 1"] = "Z"
self.assertEqual(inst[-1], "Z")
inst[domain.metas[0]] = "X"
self.assertEqual(inst[-1], "X")
def test_str(self):
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")])
inst = Instance(domain, [42, 0])
self.assertEqual(str(inst), "[42.000, M]")
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")],
[DiscreteVariable("y", values="ABC")])
inst = Instance(domain, [42, "M", "B"])
self.assertEqual(str(inst), "[42.000, M | B]")
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")],
[DiscreteVariable("y", values="ABC")],
self.metas)
inst = Instance(domain, [42, "M", "B", "X", 43, "Foo"])
self.assertEqual(str(inst), "[42.000, M | B] {X, 43.000, Foo}")
domain = self.create_domain([],
[DiscreteVariable("y", values="ABC")],
self.metas)
inst = Instance(domain, ["B", "X", 43, "Foo"])
self.assertEqual(str(inst), "[ | B] {X, 43.000, Foo}")
domain = self.create_domain([],
[],
self.metas)
inst = Instance(domain, ["X", 43, "Foo"])
self.assertEqual(str(inst), "[] {X, 43.000, Foo}")
domain = self.create_domain(self.attributes)
inst = Instance(domain, range(len(self.attributes)))
self.assertEqual(
str(inst),
"[{}]".format(", ".join("{:.3f}".format(x)
for x in range(len(self.attributes)))))
for attr in domain:
attr.number_of_decimals = 0
self.assertEqual(
str(inst),
"[{}]".format(", ".join("{}".format(x)
for x in range(len(self.attributes)))))
def test_repr(self):
domain = self.create_domain(self.attributes)
inst = Instance(domain, range(len(self.attributes)))
self.assertEqual(repr(inst), "[0.000, 1.000, 2.000, 3.000, 4.000, ...]")
for attr in domain:
attr.number_of_decimals = 0
self.assertEqual(repr(inst), "[0, 1, 2, 3, 4, ...]")
def test_eq(self):
domain = self.create_domain(["x", DiscreteVariable("g", values="MF")],
[DiscreteVariable("y", values="ABC")],
self.metas)
vals = [42, "M", "B", "X", 43, "Foo"]
inst = Instance(domain, vals)
inst2 = Instance(domain, vals)
self.assertTrue(inst == inst2)
self.assertTrue(inst2 == inst)
inst2[0] = 43
self.assertFalse(inst == inst2)
inst2[0] = Unknown
self.assertFalse(inst == inst2)
inst2 = Instance(domain, vals)
inst2[2] = "C"
self.assertFalse(inst == inst2)
inst2 = Instance(domain, vals)
inst2[-1] = "Y"
self.assertFalse(inst == inst2)
inst2 = Instance(domain, vals)
inst2[-2] = "33"
self.assertFalse(inst == inst2)
inst2 = Instance(domain, vals)
inst2[-3] = "Bar"
self.assertFalse(inst == inst2)
def test_instance_id(self):
domain = self.create_domain(["x"])
vals = [42]
inst = Instance(domain, vals, id=42)
self.assertEqual(inst.id, 42)
inst2 = Instance(domain, vals)
inst3 = Instance(domain, vals)
self.assertNotEqual(inst2.id, inst3.id)
| |
# -*- coding: utf-8 -*-
"""
lantz.drivers.ni.daqmx.channels
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Implementation of specialized channel classes.
:copyright: 2015 by Lantz Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from .base import Channel, Task
from .constants import Constants
class VoltageInputChannel(Channel):
"""Creates channel(s) to measure voltage and adds the channel(s)
to the task you specify with taskHandle.
If your measurement requires the use of internal excitation or you need
the voltage to be scaled by excitation, call DAQmxCreateAIVoltageChanWithExcit.
:param phys_channel: The names of the physical channels to use
to create virtual channels. You can specify
a list or range of physical channels.
:param channel_name: The name(s) to assign to the created virtual channel(s).
If you do not specify a name, NI-DAQmx uses the physical
channel name as the virtual channel name. If you specify
your own names for nameToAssignToChannel, you must use the
names when you refer to these channels in other NI-DAQmx
functions.
If you create multiple virtual channels with one call to
this function, you can specify a list of names separated by
commas. If you provide fewer names than the number of
virtual channels you create, NI-DAQmx automatically assigns
names to the virtual channels.
:param terminal: {'default', 'rse', 'nrse', 'diff', 'pseudodiff'}
The input terminal configuration for the channel:
'default'
At run time, NI-DAQmx chooses the default terminal
configuration for the channel.
'rse'
Referenced single-ended mode
'nrse'
Nonreferenced single-ended mode
'diff'
Differential mode
'pseudodiff'
Pseudodifferential mode
:param min_val: The minimum value, in units, that you expect to measure.
:param max_val: The maximum value, in units, that you expect to measure.
:param units: units to use to return the voltage measurements
"""
IO_TYPE = 'AI'
CREATE_FUN = 'CreateAIVoltageChan'
terminal_map = dict (default = Constants.Val_Cfg_Default,
rse = Constants.Val_RSE,
nrse = Constants.Val_NRSE,
diff = Constants.Val_Diff,
pseudodiff = Constants.Val_PseudoDiff)
def __init__(self, phys_channel, name='', terminal='default',
min_max=(-10., 10.), units='volts', task=None):
if not name:
name = ''#phys_channel
terminal_val = self.terminal_map[terminal]
if units != 'volts':
custom_scale_name = units
units = Constants.Val_FromCustomScale
else:
custom_scale_name = None
units = Constants.Val_Volts
self._create_args = (phys_channel, name, terminal_val,
min_max[0], min_max[1], units, custom_scale_name)
super().__init__(task=task, name=name)
class VoltageOutputChannel(Channel):
"""Creates channel(s) to generate voltage and adds the channel(s)
to the task you specify with taskHandle.
See VoltageOutputChannel
"""
CHANNEL_TYPE = 'AO'
def __init__(self, phys_channel, channel_name='', terminal='default', min_max=(-1, -1), units='volts'):
terminal_val = self.terminal_map[terminal]
if units != 'volts':
custom_scale_name = units
units = Constants.FROM_CUSTOM_SCALE
else:
custom_scale_name = None
units = Constants.VOLTS
err = self.lib.CreateAOVoltageChan(phys_channel, channel_name,
min_max[0], min_max[1], units, custom_scale_name)
# Not implemented:
# DAQmxCreateAIAccelChan, DAQmxCreateAICurrentChan, DAQmxCreateAIFreqVoltageChan,
# DAQmxCreateAIMicrophoneChan, DAQmxCreateAIResistanceChan, DAQmxCreateAIRTDChan,
# DAQmxCreateAIStrainGageChan, DAQmxCreateAITempBuiltInSensorChan,
# DAQmxCreateAIThrmcplChan, DAQmxCreateAIThrmstrChanIex, DAQmxCreateAIThrmstrChanVex,
# DAQmxCreateAIVoltageChanWithExcit
# DAQmxCreateAIPosLVDTChan, DAQmxCreateAIPosRVDTChan/
# DAQmxCreateTEDSAI*
# Not implemented: DAQmxCreateAOCurrentChan
# DAQmxCreateDIChan, DAQmxCreateDOChan
# DAQmxCreateCI*, DAQmxCreateCO*
class DigitalInputChannel(Channel):
"""
Creates channel(s) to measure digital signals and adds the
channel(s) to the task you specify with taskHandle. You can
group digital lines into one digital channel or separate them
into multiple digital channels. If you specify one or more
entire ports in lines by using port physical channel names,
you cannot separate the ports into multiple channels. To
separate ports into multiple channels, use this function
multiple times with a different port each time.
Parameters
----------
lines : str
The names of the digital lines used to create a virtual
channel. You can specify a list or range of lines.
name : str
The name of the created virtual channel(s). If you create
multiple virtual channels with one call to this function,
you can specify a list of names separated by commas. If you
do not specify a name, NI-DAQmx uses the physical channel
name as the virtual channel name. If you specify your own
names for name, you must use the names when you refer to
these channels in other NI-DAQmx functions.
group_by : {'line', 'all_lines'}
Specifies whether to group digital lines into one or more
virtual channels. If you specify one or more entire ports in
lines, you must set grouping to 'for_all_lines':
'line' - One channel for each line
'all_lines' - One channel for all lines
"""
def __init__(self, lines, name='', group_by='line'):
if group_by == 'line':
grouping_val = Constants.ChanPerLine
self.one_channel_for_all_lines = False
else:
grouping_val = Constants.ChanForAllLines
self.one_channel_for_all_lines = True
self.lib.CreateDIChan(lines, name, grouping_val)
class DigitalOutputChannel(Channel):
"""
Creates channel(s) to generate digital signals and adds the
channel(s) to the task you specify with taskHandle. You can
group digital lines into one digital channel or separate them
into multiple digital channels. If you specify one or more
entire ports in lines by using port physical channel names,
you cannot separate the ports into multiple channels. To
separate ports into multiple channels, use this function
multiple times with a different port each time.
See DigitalInputChannel
"""
def __init__(self, lines, name='', group_by='line'):
if group_by == 'line':
grouping_val = Constants.ChanPerLine
self.one_channel_for_all_lines = False
else:
grouping_val = Constants.ChanForAllLines
self.one_channel_for_all_lines = True
self.lib.CreateDOChan(lines, name, grouping_val)
class CountEdgesChannel(Channel):
"""
Creates a channel to count the number of rising or falling
edges of a digital signal and adds the channel to the task you
specify with taskHandle. You can create only one counter input
channel at a time with this function because a task can
include only one counter input channel. To read from multiple
counters simultaneously, use a separate task for each
counter. Connect the input signal to the default input
terminal of the counter unless you select a different input
terminal.
Parameters
----------
counter : str
The name of the counter to use to create virtual channels.
name : str
The name(s) to assign to the created virtual channel(s). If
you do not specify a name, NI-DAQmx uses the physical
channel name as the virtual channel name. If you specify
your own names for nameToAssignToChannel, you must use the
names when you refer to these channels in other NI-DAQmx
functions.
If you create multiple virtual channels with one call to
this function, you can specify a list of names separated by
commas. If you provide fewer names than the number of
virtual channels you create, NI-DAQmx automatically assigns
names to the virtual channels.
edge : {'rising', 'falling'}
Specifies on which edges of the input signal to increment or
decrement the count, rising or falling edge(s).
init : int
The value from which to start counting.
direction : {'up', 'down', 'ext'}
Specifies whether to increment or decrement the
counter on each edge:
'up' - Increment the count register on each edge.
'down' - Decrement the count register on each edge.
'ext' - The state of a digital line controls the count
direction. Each counter has a default count direction
terminal.
"""
CHANNEL_TYPE = 'CI'
def __init__ (self, counter, name="", edge='rising', init=0, direction='up'):
if edge == 'rising':
edge_val = Constants.RISING
else:
edge_val = Constants.FALLING
if direction == 'up':
direction_val = Constants.COUNT_UP
else:
direction_val = Constants.COUNT_DOWN
self.lib.CreateCICountEdgesChan(counter, name, edge_val, direction_val)
class LinearEncoderChannel(Channel):
"""
Creates a channel that uses a linear encoder to measure linear position.
You can create only one counter input channel at a time with this function
because a task can include only one counter input channel. To read from
multiple counters simultaneously, use a separate task for each counter.
Connect the input signals to the default input terminals of the counter
unless you select different input terminals.
Parameters
----------
counter : str
The name of the counter to use to create virtual channels.
name : str
The name(s) to assign to the created virtual channel(s). If
you do not specify a name, NI-DAQmx uses the physical
channel name as the virtual channel name. If you specify
your own names for nameToAssignToChannel, you must use the
names when you refer to these channels in other NI-DAQmx
functions.
If you create multiple virtual channels with one call to
this function, you can specify a list of names separated by
commas. If you provide fewer names than the number of
virtual channels you create, NI-DAQmx automatically assigns
names to the virtual channels.
decodingType : {'X1', 'X2', 'X4', 'TwoPulseCounting'}
Specifies how to count and interpret the pulses that the encoder
generates on signal A and signal B. X1, X2, and X4 are valid for
quadrature encoders only. TwoPulseCounting is valid only for
two-pulse encoders.
X2 and X4 decoding are more sensitive to smaller changes in position
than X1 encoding, with X4 being the most sensitive. However, more
sensitive decoding is more likely to produce erroneous measurements
if there is vibration in the encoder or other noise in the signals.
ZidxEnable : bool
Specifies whether to enable z indexing for the measurement.
ZidxVal : float
The value, in units, to which to reset the measurement when signal Z
is high and signal A and signal B are at the states you specify with
ZidxPhase.
ZidxPhase : {'AHighBHigh', 'AHighBLow', 'ALowBHigh', 'ALowBLow'}
The states at which signal A and signal B must be while signal Z is high
for NI-DAQmx to reset the measurement. If signal Z is never high while
the signal A and signal B are high, for example, you must choose a phase
other than Constants.Val_AHighBHigh.
When signal Z goes high and how long it stays high varies from encoder to
encoder. Refer to the documentation for the encoder to determine the
timing of signal Z with respect to signal A and signal B.
units : {'Meters', 'Inches', 'Ticks', 'FromCustomScale'}
The units to use to return linear position measurements from the channel.
distPerPulse : float
The distance measured for each pulse the encoder generates. Specify this
value in units.
init : float
The position of the encoder when the measurement begins. This value is
in units.
customScaleName : str
The name of a custom scale to apply to the channel. To use this parameter,
you must set units to Constants.Val_FromCustomScale. If you do not set units
to FromCustomScale, you must set customScaleName to NULL.
"""
def __init__(
self,
counter,
name="",
decodingType='X1',
ZidxEnable=False,
ZidxVal=0.0,
ZidxPhase='AHighBHigh',
units='Ticks',
distPerPulse=1.0,
init=0.0,
customScaleName=None
):
counter = str(counter)
name = str(name)
decodingType_map = dict(X1=Constants.Val_X1, X2=Constants.Val_X2, X4=Constants.Val_X4,
TwoPulseCounting=Constants.Val_TwoPulseCounting)
ZidxPhase_map = dict(AHighBHigh=Constants.Val_AHighBHigh, AHighBLow=Constants.Val_AHighBLow,
ALowBHigh=Constants.Val_ALowBHigh, ALowBLow=Constants.Val_ALowBLow)
units_map = dict(Meters=Constants.Val_Meters, Inches=Constants.Val_Inches,
Ticks=Constants.Val_Ticks, FromCustomScale=Constants.Val_FromCustomScale)
decodingType_val = self._get_map_value ('decodingType', decodingType_map, decodingType)
ZidxPhase_val = self._get_map_value ('ZidxPhase', ZidxPhase_map, ZidxPhase)
units_val = self._get_map_value ('units', units_map, units)
if units_val != Constants.Val_FromCustomScale:
customScaleName = None
CALL(
'CreateCILinEncoderChan',
self,
counter,
name,
decodingType_val,
bool32(ZidxEnable),
float64(ZidxVal),
ZidxPhase_val,
units_val,
float64(distPerPulse),
float64(init),
customScaleName
)==0
class MeasureFrequencyChannel(Channel):
"""
Creates a channel to measure the frequency of a digital signal
and adds the channel to the task. You can create only one
counter input channel at a time with this function because a
task can include only one counter input channel. To read from
multiple counters simultaneously, use a separate task for each
counter. Connect the input signal to the default input
terminal of the counter unless you select a different input
terminal.
Parameters
----------
counter : str
The name of the counter to use to create virtual channels.
name : str
The name(s) to assign to the created virtual channel(s). If
you do not specify a name, NI-DAQmx uses the physical
channel name as the virtual channel name. If you specify
your own names for nameToAssignToChannel, you must use the
names when you refer to these channels in other NI-DAQmx
functions.
If you create multiple virtual channels with one call to
this function, you can specify a list of names separated by
commas. If you provide fewer names than the number of
virtual channels you create, NI-DAQmx automatically assigns
names to the virtual channels.
min_val : float
The minimum value, in units, that you expect to measure.
max_val : float
The maximum value, in units, that you expect to measure.
units : {'hertz', 'ticks', 'custom'}
Units to use to return the measurement and to specify the
min/max expected value.
'hertz' - Hertz, cycles per second
'ticks' - timebase ticks
'custom' - use custom_scale_name to specify units
edge : {'rising', 'falling'}
Specifies which edges to measure the frequency or period of the signal.
method : {'low_freq', 'high_freq', 'large_range'}
The method used to calculate the period or frequency of the
signal. See the M series DAQ User Manual (371022K-01), page
7-9 for more information.
'low_freq'
Use one counter that uses a constant timebase to measure
the input signal.
'high_freq'
Use two counters, one of which counts pulses of the
signal to measure during the specified measurement time.
'large_range'
Use one counter to divide the frequency of the input
signal to create a lower frequency signal that the
second counter can more easily measure.
meas_time : float
The length of time to measure the frequency or period of the
signal, when meas_method is 'high_freq'. Measurement accuracy
increases with increased meas_time and with increased signal
frequency. Ensure that the meas_time is low enough to prevent
the counter register from overflowing.
divisor : int
The value by which to divide the input signal, when
meas_method is 'large_range'. The larger this value, the more
accurate the measurement, but too large a value can cause the
count register to roll over, resulting in an incorrect
measurement.
custom_scale_name : str
The name of a custom scale to apply to the channel. To use
this parameter, you must set units to 'custom'. If you do
not set units to 'custom', you must set custom_scale_name to
None.
"""
def __init__(self, counter, name='', min_val=1e2, max_val=1e3,
units="hertz", edge="rising", method="low_freq",
meas_time=1.0, divisor=1, custom_scale_name=None):
self.data_type = float
assert divisor > 0
if method == 'low_freq':
meas_meth_val = Constants.LOW_FREQ1_CTR
elif method == 'high_freq':
meas_meth_val = Constants.HIGH_FREQ2_CTR
elif method == 'large_range':
meas_meth_val = Constants.LARGE_RANGE2_CTR
if units != ('hertz', 'ticks'):
custom_scale_name = units
units = Constants.FROM_CUSTOM_SCALE
else:
custom_scale_name = None
if units == 'hertz':
units = Constants.HZ
else:
units = Contstants.TICKS
self.lib.CreateCIFreqChan(counter, name, min_max[0], min_max[1],
units_val, edge_val, meas_meth_val,
meas_time, divisor, custom_scale_name)
def create_channel_frequency(self, counter, name="", units='hertz', idle_state='low',
delay=0.0, freq=1.0, duty_cycle=0.5):
"""
Creates channel(s) to generate digital pulses that freq and
duty_cycle define and adds the channel to the task. The
pulses appear on the default output terminal of the counter
unless you select a different output terminal.
Parameters
----------
counter : str
The name of the counter to use to create virtual
channels. You can specify a list or range of physical
channels.
name : str
The name(s) to assign to the created virtual channel(s). If
you do not specify a name, NI-DAQmx uses the physical
channel name as the virtual channel name. If you specify
your own names for nameToAssignToChannel, you must use the
names when you refer to these channels in other NI-DAQmx
functions.
If you create multiple virtual channels with one call to
this function, you can specify a list of names separated by
commas. If you provide fewer names than the number of
virtual channels you create, NI-DAQmx automatically assigns
names to the virtual channels.
units : {'hertz'}
The units in which to specify freq:
'hertz' - hertz
idle_state : {'low', 'high'}
The resting state of the output terminal.
delay : float
The amount of time in seconds to wait before generating the
first pulse.
freq : float
The frequency at which to generate pulses.
duty_cycle : float
The width of the pulse divided by the pulse period. NI-DAQmx
uses this ratio, combined with frequency, to determine pulse
width and the interval between pulses.
Returns
-------
success_status : bool
"""
counter = str(counter)
name = str(name)
units_map = dict (hertz = Constants.Val_Hz)
idle_state_map = dict (low=Constants.Val_Low, high=Constants.Val_High)
units_val = self._get_map_value('units', units_map, units)
idle_state_val = self._get_map_value('idle_state', idle_state_map, idle_state)
self.lib.CreateCOPulseChanFreq(counter, name, units_val, idle_state_val,
delay, freq, (duty_cycle))
def create_channel_ticks(self, counter, name="", source="", idle_state='low',
delay = 0, low_ticks=1, high_ticks=1):
"""
Creates channel(s) to generate digital pulses defined by the
number of timebase ticks that the pulse is at a high state and
the number of timebase ticks that the pulse is at a low state
and also adds the channel to the task. The pulses appear on
the default output terminal of the counter unless you select a
different output terminal.
Parameters
----------
counter : str
The name of the counter to use to create virtual
channels. You can specify a list or range of physical
channels.
name : str
The name(s) to assign to the created virtual channel(s). If
you do not specify a name, NI-DAQmx uses the physical
channel name as the virtual channel name. If you specify
your own names for nameToAssignToChannel, you must use the
names when you refer to these channels in other NI-DAQmx
functions.
If you create multiple virtual channels with one call to
this function, you can specify a list of names separated by
commas. If you provide fewer names than the number of
virtual channels you create, NI-DAQmx automatically assigns
names to the virtual channels.
source : str
The terminal to which you connect an external timebase. You
also can specify a source terminal by using a terminal name.
idle_state : {'low', 'high'}
The resting state of the output terminal.
delay : int
The number of timebase ticks to wait before generating the
first pulse.
low_ticks : int
The number of timebase ticks that the pulse is low.
high_ticks : int
The number of timebase ticks that the pulse is high.
Returns
-------
success_status : bool
"""
counter = str(counter)
name = str(name)
idle_state_map = dict (low=Constants.Val_Low, high=Constants.Val_High)
idle_state_val = self._get_map_value('idle_state', idle_state_map, idle_state)
return CALL('CreateCOPulseChanTicks', self, counter, name, source, idle_state_val,
int32 (delay), int32 (low_ticks), int32 (high_ticks))==0
def create_channel_time(self, counter, name="", units="seconds", idle_state='low',
delay = 0, low_time=1, high_time=1):
"""
Creates channel(s) to generate digital pulses defined by the
number of timebase ticks that the pulse is at a high state and
the number of timebase ticks that the pulse is at a low state
and also adds the channel to the task. The pulses appear on
the default output terminal of the counter unless you select a
different output terminal.
Parameters
----------
counter : str
The name of the counter to use to create virtual
channels. You can specify a list or range of physical
channels.
name : str
The name(s) to assign to the created virtual channel(s). If
you do not specify a name, NI-DAQmx uses the physical
channel name as the virtual channel name. If you specify
your own names for nameToAssignToChannel, you must use the
names when you refer to these channels in other NI-DAQmx
functions.
If you create multiple virtual channels with one call to
this function, you can specify a list of names separated by
commas. If you provide fewer names than the number of
virtual channels you create, NI-DAQmx automatically assigns
names to the virtual channels.
units : {'seconds'}
The units in which to specify high and low time.
idle_state : {'low', 'high'}
The resting state of the output terminal.
delay : float
The amount of time in seconds to wait before generating the
first pulse.
low_time : float
The amount of time the pulse is low, in seconds.
high_time : float
The amount of time the pulse is high, in seconds.
Returns
-------
success_status : bool
"""
counter = str(counter)
name = str(name)
units_map = dict (seconds = Constants.Val_Seconds)
idle_state_map = dict (low=Constants.Val_Low, high=Constants.Val_High)
units_val = self._get_map_value('units', units_map, units)
idle_state_val = self._get_map_value('idle_state', idle_state_map, idle_state)
return CALL('CreateCOPulseChanTime', self, counter, name, units_val, idle_state_val,
float64 (delay), float64(low_time), float64(high_time))==0
| |
import re
import sys
import argparse
from IPython.nbformat.v3.rwbase import NotebookReader
from IPython.nbformat.v3.nbjson import JSONWriter
import IPython.nbformat.v3.nbbase as nbbase
class MarkdownReader(NotebookReader):
"""Import markdown to IPython Notebook.
The markdown is split into blocks: code and not-code. These
blocks are used as the source for cells in the notebook. Code
blocks become code cells; not-code blocks become markdown cells.
Only supports two kinds of notebook cell: code and markdown.
"""
## type identifiers
code = u'code'
markdown = u'markdown'
python = u'python'
## regular expressions to match a code block, splitting into groups
## N.B you can't share group names between these patterns.
## this is necessary for format agnostic code block detection.
## These two pattern strings are ORed to create a master pattern
## and the python re module doesn't allow sharing group names
## in a single regular expression.
# fenced code
fenced_regex = r"""
\n* # any number of newlines followed by
^(?P<fence>`{3,}|~{3,}) # a line starting with a fence of 3 or more ` or ~
\{*(?P<language> # followed by the group 'language',
[\w+-]*) # a word of alphanumerics, _, - or +
[ ]* # followed by spaces
(?P<options>.*) # followed by any text
\n # followed by a newline
(?P<content> # start a group 'content'
[\s\S]*?) # that includes anything
\n(?P=fence)$ # up until the same fence that we started with
\n* # followed by any number of newlines
"""
# indented code
indented_regex = r"""
\n* # any number of newlines
(?P<icontent> # start group 'icontent'
(?P<indent>^([ ]{4,}|\t)) # an indent of at least four spaces or one tab
[\s\S]*?) # any code
\n* # any number of newlines
^(?!(?P=indent)) # stop when there is a line without at least
# the indent of the first one
"""
def __init__(self, code_regex=None):
"""
code_regex - Either 'fenced' or 'indented' or
a regular expression that matches code blocks in
markdown text. Will be passed to re.compile with
re.VERBOSE and re.MULTILINE flags.
Default is to look for both indented and fenced
code blocks.
"""
if not code_regex:
self.code_regex = r"({}|{})".format(self.fenced_regex,
self.indented_regex)
elif code_regex == 'fenced':
self.code_regex = self.fenced_regex
elif code_regex == 'indented':
self.code_regex = self.indented_regex
else:
self.code_regex = code_regex
re_flags = re.MULTILINE | re.VERBOSE
self.code_pattern = re.compile(self.code_regex, re_flags)
def reads(self, s, **kwargs):
"""Read string s to notebook. Returns a notebook."""
return self.to_notebook(s, **kwargs)
def to_notebook(self, s, **kwargs):
"""Convert the markdown string s to an IPython notebook.
Returns a notebook.
"""
all_blocks = self.parse_blocks(s)
cells = []
for block in all_blocks:
if block['type'] == self.code:
kwargs = {'input': block['content'],
'language': block['language']}
code_cell = nbbase.new_code_cell(**kwargs)
cells.append(code_cell)
elif block['type'] == self.markdown:
kwargs = {'cell_type': block['type'],
'source': block['content']}
markdown_cell = nbbase.new_text_cell(**kwargs)
cells.append(markdown_cell)
else:
raise NotImplementedError("{} is not supported as a cell"
"type".format(block['type']))
ws = nbbase.new_worksheet(cells=cells)
nb = nbbase.new_notebook(worksheets=[ws])
return nb
def parse_blocks(self, text):
"""Extract the code and non-code blocks from given markdown text.
Returns a list of block dictionaries.
Each dictionary has at least the keys 'type' and 'content',
containing the type of the block ('markdown', 'code') and
the contents of the block.
Additional keys may be parsed as well.
We should switch to an external markdown library if this
gets much more complicated!
"""
code_matches = [m for m in self.code_pattern.finditer(text)]
# determine where the limits of the non code bits are
# based on the code block edges
text_starts = [0] + [m.end() for m in code_matches]
text_stops = [m.start() for m in code_matches] + [len(text)]
text_limits = zip(text_starts, text_stops)
# list of the groups from the code blocks
code_blocks = [m.groupdict() for m in code_matches]
# update with a type field
code_blocks = [dict(d.items() + [('type', self.code)]) for d in
code_blocks]
# remove indents, add code magic, etc.
map(self.pre_process_code_block, code_blocks)
text_blocks = [{'content': text[i:j], 'type': self.markdown} for i, j
in text_limits]
# create a list of the right length
all_blocks = range(len(text_blocks) + len(code_blocks))
# cells must alternate in order
all_blocks[::2] = text_blocks
all_blocks[1::2] = code_blocks
# remove possible empty first, last text cells
all_blocks = [cell for cell in all_blocks if cell['content']]
return all_blocks
def pre_process_code_block(self, block):
"""Preprocess the content of a code block, modifying the code
block in place.
Remove indentation and do magic with the cell language
if applicable.
If nothing else, we need to deal with the 'content', 'icontent'
difference.
"""
# homogenise content attribute of fenced and indented blocks
block['content'] = block.get('content') or block['icontent']
# dedent indented code blocks
if 'indent' in block and block['indent']:
indent = r"^" + block['indent']
content = block['content'].splitlines()
dedented = [re.sub(indent, '', line) for line in content]
block['content'] = '\n'.join(dedented)
# alternate descriptions for python code
python_aliases = ['python', 'py', '', None]
# ensure one identifier for python code
if 'language' in block and block['language'] in python_aliases:
block['language'] = self.python
# add alternate language execution magic
if 'language' in block and block['language'] != self.python:
code_magic = "%%{}\n".format(block['language'])
block['content'] = code_magic + block['content']
def cli():
"""Execute for command line usage."""
description = "Create an IPython notebook from markdown."
example_use = "Example: notedown some_markdown.md > new_notebook.ipynb"
parser = argparse.ArgumentParser(description=description,
epilog=example_use)
parser.add_argument('input_file',
help="markdown input file (default STDIN)",
nargs="?",
type=argparse.FileType('r'),
default=sys.stdin)
parser.add_argument('--output',
help="output file, (default STDOUT)",
type=argparse.FileType('w'),
default=sys.stdout)
parser.add_argument('--code_block',
help=("choose to match only 'fenced' or 'indented' "
"code blocks or give a regular expression to "
"match code blocks. Will be compiled with "
"re.MULTILINE | re.VERBOSE."
"Default is to match both "
"fenced and indented code blocks."),
default=None)
args = parser.parse_args()
with args.input_file as ip, args.output as op:
# if no stdin and no input file
if args.input_file.isatty():
parser.print_help()
exit()
reader = MarkdownReader(code_regex=args.code_block)
writer = JSONWriter()
notebook = reader.read(ip)
writer.write(notebook, op)
if __name__ == '__main__':
cli()
| |
#!/usr/bin/env python
# arguments are:
# - cluster scope
# - cluster role
# - master connection string
# for the AWS, the folliowing environment variables should be defined:
# - WALE_ENV_DIR: directory where WAL-E environment is kept
# - WAL_S3_BUCKET: a name of the S3 bucket for WAL-E
# - WALE_BACKUP_THRESHOLD_MEGABYTES if WAL amount is above that - use pg_basebackup
# - WALE_BACKUP_THRESHOLD_PERCENTAGE if WAL size exceeds a certain percentage of the
# latest backup size
from collections import namedtuple
import logging
import os
import psycopg2
import subprocess
import sys
if sys.hexversion >= 0x03000000:
long = int
logger = logging.getLogger(__name__)
class Restore(object):
def __init__(self, scope, role, datadir, connstring, env=None):
self.scope = scope
self.role = role
self.master_connection = Restore.parse_connstring(connstring)
self.data_dir = datadir
self.env = os.environ.copy() if not env else env
@staticmethod
def parse_connstring(connstring):
# the connection string is in the form host= port= user=
# return the dictionary with all components as separare keys
result = {}
if connstring:
for x in connstring.split():
if x and '=' in x:
key, val = x.split('=')
result[key.strip()] = val.strip()
return result
def setup(self):
pass
def replica_method(self):
return self.create_replica_with_pg_basebackup
def replica_fallback_method(self):
return None
def run(self):
""" creates a new replica using either pg_basebackup or WAL-E """
method_fn = self.replica_method()
ret = method_fn() if method_fn else 1
if ret != 0 and self.replica_fallback_method() is not None:
ret = (self.replica_fallback_method())()
return ret
def create_replica_with_pg_basebackup(self):
try:
ret = subprocess.call(['pg_basebackup', '-R', '-D',
self.data_dir, '--host=' + self.master_connection['host'],
'--port=' + str(self.master_connection['port']),
'-U', self.master_connection['user']],
env=self.env)
except Exception as e:
logger.error('Error when fetching backup with pg_basebackup: {0}'.format(e))
return 1
return ret
class WALERestore(Restore):
def __init__(self, scope, role, datadir, connstring, env=None):
super(WALERestore, self).__init__(scope, role, datadir, connstring, env)
# check the environment variables
self.init_error = False
def setup(self):
if (self.env.get('WAL_S3_BUCKET') and
self.env.get('WALE_BACKUP_THRESHOLD_PERCENTAGE') and
self.env.get('WALE_BACKUP_THRESHOLD_MEGABYTES')) is None:
self.init_error = True
else:
self.wal_e = namedtuple('WALE',
'threshold_megabytes threshold_backup_size_percentage s3_bucket cmd dir env_file')
self.wal_e.dir = self.env.get('WALE_ENV_DIR', '/home/postgres/etc/wal-e.d/env')
self.wal_e.env_file = os.path.join(self.wal_e.dir, 'WALE_S3_PREFIX')
self.wal_e.cmd = 'envdir {} wal-e --aws-instance-profile '.\
format(self.wal_e.dir)
self.wal_e.s3_bucket = self.env['WAL_S3_BUCKET']
self.wal_e.threshold_megabytes = self.env['WALE_BACKUP_THRESHOLD_MEGABYTES']
self.wal_e.threshold_backup_size_percentage = self.env['WALE_BACKUP_THRESHOLD_PERCENTAGE']
# check that the env file exists, create it otherwise
try:
if not os.path.exists(self.wal_e.dir):
os.makedirs(self.wal_e.dir)
# if this is a directory - make sure we have full access there
elif not (os.path.isdir(self.wal_e.dir) and os.access(self.wal_e.dir, os.R_OK | os.W_OK | os.X_OK)):
logger.error("Unable to access {} or not a directory".format(self.wal_e.dir))
self.init_error = True
# if WAL_S3_PREFIX is not there - create it and write the full path to bucket
if not self.init_error and not os.path.exists(self.wal_e.env_file):
with open(self.wal_e.env_file, 'w') as f:
f.write("s3://{0}/spilo/{1}/wal/\n".format(self.wal_e.s3_bucket, self.scope))
except (os.error, IOError) as e:
logger.error("{0}: WAL-e archiving is disabled".format(e))
self.init_error = True
def replica_method(self):
if self.should_use_s3_to_create_replica():
return self.create_replica_with_s3
return None
def replica_fallback_method(self):
return self.create_replica_with_pg_basebackup
def should_use_s3_to_create_replica(self):
""" determine whether it makes sense to use S3 and not pg_basebackup """
if self.init_error:
return False
threshold_megabytes = self.wal_e.threshold_megabytes
threshold_backup_size_percentage = self.wal_e.threshold_backup_size_percentage
try:
latest_backup = subprocess.check_output(self.wal_e.cmd.split() + ['backup-list', '--detail', 'LATEST'],
env=self.env)
# name last_modified expanded_size_bytes wal_segment_backup_start wal_segment_offset_backup_start
# wal_segment_backup_stop wal_segment_offset_backup_stop
# base_00000001000000000000007F_00000040 2015-05-18T10:13:25.000Z
# 20310671 00000001000000000000007F 00000040
# 00000001000000000000007F 00000240
backup_strings = latest_backup.splitlines() if latest_backup else ()
if len(backup_strings) != 2:
return False
names = backup_strings[0].split()
vals = backup_strings[1].split()
if (len(names) != len(vals)) or (len(names) != 7):
return False
backup_info = dict(zip(names, vals))
except subprocess.CalledProcessError as e:
logger.error("could not query wal-e latest backup: {}".format(e))
return False
try:
backup_size = backup_info['expanded_size_bytes']
backup_start_segment = backup_info['wal_segment_backup_start']
backup_start_offset = backup_info['wal_segment_offset_backup_start']
except Exception as e:
logger.error("unable to get some of S3 backup parameters: {}".format(e))
return False
# WAL filename is XXXXXXXXYYYYYYYY000000ZZ, where X - timeline, Y - LSN logical log file,
# ZZ - 2 high digits of LSN offset. The rest of the offset is the provided decimal offset,
# that we have to convert to hex and 'prepend' to the high offset digits.
lsn_segment = backup_start_segment[8:16]
# first 2 characters of the result are 0x and the last one is L
lsn_offset = hex((long(backup_start_segment[16:32], 16) << 24) + long(backup_start_offset))[2:-1]
# construct the LSN from the segment and offset
backup_start_lsn = '{}/{}'.format(lsn_segment, lsn_offset)
conn = None
cursor = None
diff_in_bytes = long(backup_size)
try:
# get the difference in bytes between the current WAL location and the backup start offset
conn = psycopg2.connect(**(self.master_connection))
conn.autocommit = True
cursor = conn.cursor()
cursor.execute("SELECT pg_xlog_location_diff(pg_current_xlog_location(), %s)", (backup_start_lsn,))
diff_in_bytes = long(cursor.fetchone()[0])
except psycopg2.Error as e:
logger.error('could not determine difference with the master location: {}'.format(e))
return False
finally:
cursor and cursor.close()
conn and conn.close()
# if the size of the accumulated WAL segments is more than a certan percentage of the backup size
# or exceeds the pre-determined size - pg_basebackup is chosen instead.
return (diff_in_bytes < long(threshold_megabytes) * 1048576) and\
(diff_in_bytes < long(backup_size) * float(threshold_backup_size_percentage) / 100)
def create_replica_with_s3(self):
if self.init_error:
return 1
try:
ret = subprocess.call(self.wal_e.cmd + ' backup-fetch {} LATEST'.format(self.data_dir), env=self.env)
except Exception as e:
logger.error('Error when fetching backup with WAL-E: {0}'.format(e))
return 1
return ret
if __name__ == '__main__':
if len(sys.argv) == 5:
# scope, role, datadir, connstring
restore = WALERestore(*(sys.argv[1:]))
restore.setup()
sys.exit(restore.run())
sys.exit("Usage: {0} scope role datadir connstring".format(sys.argv[0]))
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from math import exp
import sys
import warnings
import numpy
from pyspark import RDD, since
from pyspark.mllib.common import callMLlibFunc, _py2java, _java2py
from pyspark.mllib.linalg import SparseVector, _convert_to_vector
from pyspark.mllib.regression import (
LabeledPoint, LinearModel, _regression_train_wrapper,
StreamingLinearAlgorithm)
from pyspark.mllib.util import Saveable, Loader, inherit_doc
__all__ = ['LogisticRegressionModel', 'LogisticRegressionWithSGD', 'LogisticRegressionWithLBFGS',
'SVMModel', 'SVMWithSGD', 'NaiveBayesModel', 'NaiveBayes',
'StreamingLogisticRegressionWithSGD']
class LinearClassificationModel(LinearModel):
"""
A private abstract class representing a multiclass classification
model. The categories are represented by int values: 0, 1, 2, etc.
"""
def __init__(self, weights, intercept):
super(LinearClassificationModel, self).__init__(weights, intercept)
self._threshold = None
@since('1.4.0')
def setThreshold(self, value):
"""
Sets the threshold that separates positive predictions from
negative predictions. An example with prediction score greater
than or equal to this threshold is identified as a positive,
and negative otherwise. It is used for binary classification
only.
"""
self._threshold = value
@property
@since('1.4.0')
def threshold(self):
"""
Returns the threshold (if any) used for converting raw
prediction scores into 0/1 predictions. It is used for
binary classification only.
"""
return self._threshold
@since('1.4.0')
def clearThreshold(self):
"""
Clears the threshold so that `predict` will output raw
prediction scores. It is used for binary classification only.
"""
self._threshold = None
@since('1.4.0')
def predict(self, test):
"""
Predict values for a single data point or an RDD of points
using the model trained.
"""
raise NotImplementedError
class LogisticRegressionModel(LinearClassificationModel):
"""
Classification model trained using Multinomial/Binary Logistic
Regression.
:param weights:
Weights computed for every feature.
:param intercept:
Intercept computed for this model. (Only used in Binary Logistic
Regression. In Multinomial Logistic Regression, the intercepts will
not bea single value, so the intercepts will be part of the
weights.)
:param numFeatures:
The dimension of the features.
:param numClasses:
The number of possible outcomes for k classes classification problem
in Multinomial Logistic Regression. By default, it is binary
logistic regression so numClasses will be set to 2.
>>> data = [
... LabeledPoint(0.0, [0.0, 1.0]),
... LabeledPoint(1.0, [1.0, 0.0]),
... ]
>>> lrm = LogisticRegressionWithSGD.train(sc.parallelize(data), iterations=10)
>>> lrm.predict([1.0, 0.0])
1
>>> lrm.predict([0.0, 1.0])
0
>>> lrm.predict(sc.parallelize([[1.0, 0.0], [0.0, 1.0]])).collect()
[1, 0]
>>> lrm.clearThreshold()
>>> lrm.predict([0.0, 1.0])
0.279...
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>> lrm = LogisticRegressionWithSGD.train(sc.parallelize(sparse_data), iterations=10)
>>> lrm.predict(numpy.array([0.0, 1.0]))
1
>>> lrm.predict(numpy.array([1.0, 0.0]))
0
>>> lrm.predict(SparseVector(2, {1: 1.0}))
1
>>> lrm.predict(SparseVector(2, {0: 1.0}))
0
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> lrm.save(sc, path)
>>> sameModel = LogisticRegressionModel.load(sc, path)
>>> sameModel.predict(numpy.array([0.0, 1.0]))
1
>>> sameModel.predict(SparseVector(2, {0: 1.0}))
0
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except:
... pass
>>> multi_class_data = [
... LabeledPoint(0.0, [0.0, 1.0, 0.0]),
... LabeledPoint(1.0, [1.0, 0.0, 0.0]),
... LabeledPoint(2.0, [0.0, 0.0, 1.0])
... ]
>>> data = sc.parallelize(multi_class_data)
>>> mcm = LogisticRegressionWithLBFGS.train(data, iterations=10, numClasses=3)
>>> mcm.predict([0.0, 0.5, 0.0])
0
>>> mcm.predict([0.8, 0.0, 0.0])
1
>>> mcm.predict([0.0, 0.0, 0.3])
2
.. versionadded:: 0.9.0
"""
def __init__(self, weights, intercept, numFeatures, numClasses):
super(LogisticRegressionModel, self).__init__(weights, intercept)
self._numFeatures = int(numFeatures)
self._numClasses = int(numClasses)
self._threshold = 0.5
if self._numClasses == 2:
self._dataWithBiasSize = None
self._weightsMatrix = None
else:
self._dataWithBiasSize = self._coeff.size // (self._numClasses - 1)
self._weightsMatrix = self._coeff.toArray().reshape(self._numClasses - 1,
self._dataWithBiasSize)
@property
@since('1.4.0')
def numFeatures(self):
"""
Dimension of the features.
"""
return self._numFeatures
@property
@since('1.4.0')
def numClasses(self):
"""
Number of possible outcomes for k classes classification problem
in Multinomial Logistic Regression.
"""
return self._numClasses
@since('0.9.0')
def predict(self, x):
"""
Predict values for a single data point or an RDD of points
using the model trained.
"""
if isinstance(x, RDD):
return x.map(lambda v: self.predict(v))
x = _convert_to_vector(x)
if self.numClasses == 2:
margin = self.weights.dot(x) + self._intercept
if margin > 0:
prob = 1 / (1 + exp(-margin))
else:
exp_margin = exp(margin)
prob = exp_margin / (1 + exp_margin)
if self._threshold is None:
return prob
else:
return 1 if prob > self._threshold else 0
else:
best_class = 0
max_margin = 0.0
if x.size + 1 == self._dataWithBiasSize:
for i in range(0, self._numClasses - 1):
margin = x.dot(self._weightsMatrix[i][0:x.size]) + \
self._weightsMatrix[i][x.size]
if margin > max_margin:
max_margin = margin
best_class = i + 1
else:
for i in range(0, self._numClasses - 1):
margin = x.dot(self._weightsMatrix[i])
if margin > max_margin:
max_margin = margin
best_class = i + 1
return best_class
@since('1.4.0')
def save(self, sc, path):
"""
Save this model to the given path.
"""
java_model = sc._jvm.org.apache.spark.mllib.classification.LogisticRegressionModel(
_py2java(sc, self._coeff), self.intercept, self.numFeatures, self.numClasses)
java_model.save(sc._jsc.sc(), path)
@classmethod
@since('1.4.0')
def load(cls, sc, path):
"""
Load a model from the given path.
"""
java_model = sc._jvm.org.apache.spark.mllib.classification.LogisticRegressionModel.load(
sc._jsc.sc(), path)
weights = _java2py(sc, java_model.weights())
intercept = java_model.intercept()
numFeatures = java_model.numFeatures()
numClasses = java_model.numClasses()
threshold = java_model.getThreshold().get()
model = LogisticRegressionModel(weights, intercept, numFeatures, numClasses)
model.setThreshold(threshold)
return model
def __repr__(self):
return self._call_java("toString")
class LogisticRegressionWithSGD(object):
"""
.. versionadded:: 0.9.0
.. note:: Deprecated in 2.0.0. Use ml.classification.LogisticRegression or
LogisticRegressionWithLBFGS.
"""
@classmethod
@since('0.9.0')
def train(cls, data, iterations=100, step=1.0, miniBatchFraction=1.0,
initialWeights=None, regParam=0.01, regType="l2", intercept=False,
validateData=True, convergenceTol=0.001):
"""
Train a logistic regression model on the given data.
:param data:
The training data, an RDD of LabeledPoint.
:param iterations:
The number of iterations.
(default: 100)
:param step:
The step parameter used in SGD.
(default: 1.0)
:param miniBatchFraction:
Fraction of data to be used for each SGD iteration.
(default: 1.0)
:param initialWeights:
The initial weights.
(default: None)
:param regParam:
The regularizer parameter.
(default: 0.01)
:param regType:
The type of regularizer used for training our model.
Supported values:
- "l1" for using L1 regularization
- "l2" for using L2 regularization (default)
- None for no regularization
:param intercept:
Boolean parameter which indicates the use or not of the
augmented representation for training data (i.e., whether bias
features are activated or not).
(default: False)
:param validateData:
Boolean parameter which indicates if the algorithm should
validate data before training.
(default: True)
:param convergenceTol:
A condition which decides iteration termination.
(default: 0.001)
"""
warnings.warn(
"Deprecated in 2.0.0. Use ml.classification.LogisticRegression or "
"LogisticRegressionWithLBFGS.", DeprecationWarning)
def train(rdd, i):
return callMLlibFunc("trainLogisticRegressionModelWithSGD", rdd, int(iterations),
float(step), float(miniBatchFraction), i, float(regParam), regType,
bool(intercept), bool(validateData), float(convergenceTol))
return _regression_train_wrapper(train, LogisticRegressionModel, data, initialWeights)
class LogisticRegressionWithLBFGS(object):
"""
.. versionadded:: 1.2.0
"""
@classmethod
@since('1.2.0')
def train(cls, data, iterations=100, initialWeights=None, regParam=0.0, regType="l2",
intercept=False, corrections=10, tolerance=1e-6, validateData=True, numClasses=2):
"""
Train a logistic regression model on the given data.
:param data:
The training data, an RDD of LabeledPoint.
:param iterations:
The number of iterations.
(default: 100)
:param initialWeights:
The initial weights.
(default: None)
:param regParam:
The regularizer parameter.
(default: 0.0)
:param regType:
The type of regularizer used for training our model.
Supported values:
- "l1" for using L1 regularization
- "l2" for using L2 regularization (default)
- None for no regularization
:param intercept:
Boolean parameter which indicates the use or not of the
augmented representation for training data (i.e., whether bias
features are activated or not).
(default: False)
:param corrections:
The number of corrections used in the LBFGS update.
If a known updater is used for binary classification,
it calls the ml implementation and this parameter will
have no effect. (default: 10)
:param tolerance:
The convergence tolerance of iterations for L-BFGS.
(default: 1e-6)
:param validateData:
Boolean parameter which indicates if the algorithm should
validate data before training.
(default: True)
:param numClasses:
The number of classes (i.e., outcomes) a label can take in
Multinomial Logistic Regression.
(default: 2)
>>> data = [
... LabeledPoint(0.0, [0.0, 1.0]),
... LabeledPoint(1.0, [1.0, 0.0]),
... ]
>>> lrm = LogisticRegressionWithLBFGS.train(sc.parallelize(data), iterations=10)
>>> lrm.predict([1.0, 0.0])
1
>>> lrm.predict([0.0, 1.0])
0
"""
def train(rdd, i):
return callMLlibFunc("trainLogisticRegressionModelWithLBFGS", rdd, int(iterations), i,
float(regParam), regType, bool(intercept), int(corrections),
float(tolerance), bool(validateData), int(numClasses))
if initialWeights is None:
if numClasses == 2:
initialWeights = [0.0] * len(data.first().features)
else:
if intercept:
initialWeights = [0.0] * (len(data.first().features) + 1) * (numClasses - 1)
else:
initialWeights = [0.0] * len(data.first().features) * (numClasses - 1)
return _regression_train_wrapper(train, LogisticRegressionModel, data, initialWeights)
class SVMModel(LinearClassificationModel):
"""
Model for Support Vector Machines (SVMs).
:param weights:
Weights computed for every feature.
:param intercept:
Intercept computed for this model.
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(1.0, [1.0]),
... LabeledPoint(1.0, [2.0]),
... LabeledPoint(1.0, [3.0])
... ]
>>> svm = SVMWithSGD.train(sc.parallelize(data), iterations=10)
>>> svm.predict([1.0])
1
>>> svm.predict(sc.parallelize([[1.0]])).collect()
[1]
>>> svm.clearThreshold()
>>> svm.predict(numpy.array([1.0]))
1.44...
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: -1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>> svm = SVMWithSGD.train(sc.parallelize(sparse_data), iterations=10)
>>> svm.predict(SparseVector(2, {1: 1.0}))
1
>>> svm.predict(SparseVector(2, {0: -1.0}))
0
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> svm.save(sc, path)
>>> sameModel = SVMModel.load(sc, path)
>>> sameModel.predict(SparseVector(2, {1: 1.0}))
1
>>> sameModel.predict(SparseVector(2, {0: -1.0}))
0
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except:
... pass
.. versionadded:: 0.9.0
"""
def __init__(self, weights, intercept):
super(SVMModel, self).__init__(weights, intercept)
self._threshold = 0.0
@since('0.9.0')
def predict(self, x):
"""
Predict values for a single data point or an RDD of points
using the model trained.
"""
if isinstance(x, RDD):
return x.map(lambda v: self.predict(v))
x = _convert_to_vector(x)
margin = self.weights.dot(x) + self.intercept
if self._threshold is None:
return margin
else:
return 1 if margin > self._threshold else 0
@since('1.4.0')
def save(self, sc, path):
"""
Save this model to the given path.
"""
java_model = sc._jvm.org.apache.spark.mllib.classification.SVMModel(
_py2java(sc, self._coeff), self.intercept)
java_model.save(sc._jsc.sc(), path)
@classmethod
@since('1.4.0')
def load(cls, sc, path):
"""
Load a model from the given path.
"""
java_model = sc._jvm.org.apache.spark.mllib.classification.SVMModel.load(
sc._jsc.sc(), path)
weights = _java2py(sc, java_model.weights())
intercept = java_model.intercept()
threshold = java_model.getThreshold().get()
model = SVMModel(weights, intercept)
model.setThreshold(threshold)
return model
class SVMWithSGD(object):
"""
.. versionadded:: 0.9.0
"""
@classmethod
@since('0.9.0')
def train(cls, data, iterations=100, step=1.0, regParam=0.01,
miniBatchFraction=1.0, initialWeights=None, regType="l2",
intercept=False, validateData=True, convergenceTol=0.001):
"""
Train a support vector machine on the given data.
:param data:
The training data, an RDD of LabeledPoint.
:param iterations:
The number of iterations.
(default: 100)
:param step:
The step parameter used in SGD.
(default: 1.0)
:param regParam:
The regularizer parameter.
(default: 0.01)
:param miniBatchFraction:
Fraction of data to be used for each SGD iteration.
(default: 1.0)
:param initialWeights:
The initial weights.
(default: None)
:param regType:
The type of regularizer used for training our model.
Allowed values:
- "l1" for using L1 regularization
- "l2" for using L2 regularization (default)
- None for no regularization
:param intercept:
Boolean parameter which indicates the use or not of the
augmented representation for training data (i.e. whether bias
features are activated or not).
(default: False)
:param validateData:
Boolean parameter which indicates if the algorithm should
validate data before training.
(default: True)
:param convergenceTol:
A condition which decides iteration termination.
(default: 0.001)
"""
def train(rdd, i):
return callMLlibFunc("trainSVMModelWithSGD", rdd, int(iterations), float(step),
float(regParam), float(miniBatchFraction), i, regType,
bool(intercept), bool(validateData), float(convergenceTol))
return _regression_train_wrapper(train, SVMModel, data, initialWeights)
@inherit_doc
class NaiveBayesModel(Saveable, Loader):
"""
Model for Naive Bayes classifiers.
:param labels:
List of labels.
:param pi:
Log of class priors, whose dimension is C, number of labels.
:param theta:
Log of class conditional probabilities, whose dimension is C-by-D,
where D is number of features.
>>> data = [
... LabeledPoint(0.0, [0.0, 0.0]),
... LabeledPoint(0.0, [0.0, 1.0]),
... LabeledPoint(1.0, [1.0, 0.0]),
... ]
>>> model = NaiveBayes.train(sc.parallelize(data))
>>> model.predict(numpy.array([0.0, 1.0]))
0.0
>>> model.predict(numpy.array([1.0, 0.0]))
1.0
>>> model.predict(sc.parallelize([[1.0, 0.0]])).collect()
[1.0]
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {1: 0.0})),
... LabeledPoint(0.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {0: 1.0}))
... ]
>>> model = NaiveBayes.train(sc.parallelize(sparse_data))
>>> model.predict(SparseVector(2, {1: 1.0}))
0.0
>>> model.predict(SparseVector(2, {0: 1.0}))
1.0
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> model.save(sc, path)
>>> sameModel = NaiveBayesModel.load(sc, path)
>>> sameModel.predict(SparseVector(2, {0: 1.0})) == model.predict(SparseVector(2, {0: 1.0}))
True
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except OSError:
... pass
.. versionadded:: 0.9.0
"""
def __init__(self, labels, pi, theta):
self.labels = labels
self.pi = pi
self.theta = theta
@since('0.9.0')
def predict(self, x):
"""
Return the most likely class for a data vector
or an RDD of vectors
"""
if isinstance(x, RDD):
return x.map(lambda v: self.predict(v))
x = _convert_to_vector(x)
return self.labels[numpy.argmax(self.pi + x.dot(self.theta.transpose()))]
def save(self, sc, path):
"""
Save this model to the given path.
"""
java_labels = _py2java(sc, self.labels.tolist())
java_pi = _py2java(sc, self.pi.tolist())
java_theta = _py2java(sc, self.theta.tolist())
java_model = sc._jvm.org.apache.spark.mllib.classification.NaiveBayesModel(
java_labels, java_pi, java_theta)
java_model.save(sc._jsc.sc(), path)
@classmethod
@since('1.4.0')
def load(cls, sc, path):
"""
Load a model from the given path.
"""
java_model = sc._jvm.org.apache.spark.mllib.classification.NaiveBayesModel.load(
sc._jsc.sc(), path)
# Can not unpickle array.array from Pyrolite in Python3 with "bytes"
py_labels = _java2py(sc, java_model.labels(), "latin1")
py_pi = _java2py(sc, java_model.pi(), "latin1")
py_theta = _java2py(sc, java_model.theta(), "latin1")
return NaiveBayesModel(py_labels, py_pi, numpy.array(py_theta))
class NaiveBayes(object):
"""
.. versionadded:: 0.9.0
"""
@classmethod
@since('0.9.0')
def train(cls, data, lambda_=1.0):
"""
Train a Naive Bayes model given an RDD of (label, features)
vectors.
This is the Multinomial NB (U{http://tinyurl.com/lsdw6p}) which
can handle all kinds of discrete data. For example, by
converting documents into TF-IDF vectors, it can be used for
document classification. By making every vector a 0-1 vector,
it can also be used as Bernoulli NB (U{http://tinyurl.com/p7c96j6}).
The input feature values must be nonnegative.
:param data:
RDD of LabeledPoint.
:param lambda_:
The smoothing parameter.
(default: 1.0)
"""
first = data.first()
if not isinstance(first, LabeledPoint):
raise ValueError("`data` should be an RDD of LabeledPoint")
labels, pi, theta = callMLlibFunc("trainNaiveBayesModel", data, lambda_)
return NaiveBayesModel(labels.toArray(), pi.toArray(), numpy.array(theta))
@inherit_doc
class StreamingLogisticRegressionWithSGD(StreamingLinearAlgorithm):
"""
Train or predict a logistic regression model on streaming data.
Training uses Stochastic Gradient Descent to update the model based on
each new batch of incoming data from a DStream.
Each batch of data is assumed to be an RDD of LabeledPoints.
The number of data points per batch can vary, but the number
of features must be constant. An initial weight
vector must be provided.
:param stepSize:
Step size for each iteration of gradient descent.
(default: 0.1)
:param numIterations:
Number of iterations run for each batch of data.
(default: 50)
:param miniBatchFraction:
Fraction of each batch of data to use for updates.
(default: 1.0)
:param regParam:
L2 Regularization parameter.
(default: 0.0)
:param convergenceTol:
Value used to determine when to terminate iterations.
(default: 0.001)
.. versionadded:: 1.5.0
"""
def __init__(self, stepSize=0.1, numIterations=50, miniBatchFraction=1.0, regParam=0.0,
convergenceTol=0.001):
self.stepSize = stepSize
self.numIterations = numIterations
self.regParam = regParam
self.miniBatchFraction = miniBatchFraction
self.convergenceTol = convergenceTol
self._model = None
super(StreamingLogisticRegressionWithSGD, self).__init__(
model=self._model)
@since('1.5.0')
def setInitialWeights(self, initialWeights):
"""
Set the initial value of weights.
This must be set before running trainOn and predictOn.
"""
initialWeights = _convert_to_vector(initialWeights)
# LogisticRegressionWithSGD does only binary classification.
self._model = LogisticRegressionModel(
initialWeights, 0, initialWeights.size, 2)
return self
@since('1.5.0')
def trainOn(self, dstream):
"""Train the model on the incoming dstream."""
self._validate(dstream)
def update(rdd):
# LogisticRegressionWithSGD.train raises an error for an empty RDD.
if not rdd.isEmpty():
self._model = LogisticRegressionWithSGD.train(
rdd, self.numIterations, self.stepSize,
self.miniBatchFraction, self._model.weights,
regParam=self.regParam, convergenceTol=self.convergenceTol)
dstream.foreachRDD(update)
def _test():
import doctest
from pyspark.sql import SparkSession
import pyspark.mllib.classification
globs = pyspark.mllib.classification.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("mllib.classification tests")\
.getOrCreate()
globs['sc'] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| |
from solr import Solr
import os, sys
reload(sys)
sys.setdefaultencoding('UTF8') # making UTF8 as default encoding
from argparse import ArgumentParser
from indexer import parse_lpsc_from_path
import re
from utils import canonical_name, canonical_target_name
# Functions to perform reference removal (assumes [n] reference style)
# Written by Karanjeet Singh
def extract_references(content):
"""
Extract references from text
:param content: text
:return: dictionary of references with reference id ([N]) as key
"""
references = {}
content = content.replace("\n", "\\n")
matches = re.findall('(\[[0-9]+\][^\[]*?(?=\[|Acknowledge|Fig|Table|Conclusion|pdf))', content)
if matches:
for match in matches:
ref_id = get_reference_id(match)
# No reference id exist -- skip it
if ref_id != -1:
value = match.replace('\\n', '\n')
references[ref_id] = value
return references
def get_reference_id(reference):
"""
Extract reference id ([N])
:param reference: Any possible reference
:return: reference id
"""
ref_id = -1
match = re.search('\[[0-9]+\]', reference)
if match:
ref_id = int(match.group(0).strip('[]'))
return ref_id
class BratAnnIndexer():
'''
This class reads/parses brat annotations from file system and indexes them
into Solr.
'''
def parse_ann_line(self, ann_line):
'''
parses each annotation line
'''
parts = ann_line.strip().split('\t')
res = {
'annotation_id_s': parts[0],
#'source': 'brat',
'source': 'reviewed',
}
if parts[0][0] == 'T': # anchors (for targets, components, events)
args = parts[1].split()[1:]
res.update({
'mainType': 'anchor',
'type': parts[1].split()[0],
'span_start': int(args[0]),
'span_end': int(args[-1]),
'name': parts[2]
})
elif parts[0][0] == 'E': # event
args = parts[1].split()
subargs = [a.split(':') for a in args[1:]]
res.update({
'mainType' : 'event',
'type' : args[0].split(':')[0],
'anchor_s' : args[0].split(':')[1],
'targets_ss' : [v for (t,v) in subargs if t.startswith('Targ')],
'cont_ss' : [v for (t,v) in subargs if t.startswith('Cont')]
})
elif parts[0][0] == 'R': # relation
label, arg1, arg2 = parts[1].split() # assumes 2 args
res.update({
'mainType' : 'relation',
'type': label,
'arg1_s': arg1.split(':')[1],
'arg2_s': arg2.split(':')[1]
})
elif parts[0][0] == 'A': # attribute
label, arg, value = parts[1].split()
res.update({
'mainType': 'attribute',
'type': label,
'arg1_s': arg,
'value_s': value
})
else:
print 'Unknown annotation type:', parts[0]
return None
res['type'] = res['type'].lower()
res['_path'] = '/%s' % res['type']
res['_depth'] = 1
return res
def extract_excerpt(self, content, ann):
'''
Extracts excerpt of an annotation from content
@param content - text content of document
@param ann annotation having span_start and span_end
@return excerpt text
'''
(anchor_start, anchor_end)= ann['span_start'], ann['span_end']
# Start: first capital letter after last period before last capital letter!
sent_start = 0
# Last preceding capital
m = [m for m in re.finditer('[A-Z]',content[:anchor_start])]
if m:
sent_start = m[-1].start()
# Last preceding period
sent_start = max(content[:sent_start].rfind('.'), 0)
# Next capital
m = re.search('[A-Z]',content[sent_start:])
if m:
sent_start = sent_start + m.start()
# End: next period followed by {space,newline}, or end of document.
# Better: skip "wt.", "ig." (for Figure), "(e" or ".g"
m = re.search('(?<!(wt|ig|\(e|\.g))\.[ \n]', content[anchor_end:])
if m != None:
sent_end = anchor_end + m.start() + 1
else:
sent_end = len(content)
return content[sent_start:sent_end]
def read_records(self, in_file):
'''
Reads brat annotations as solr input documents
@param in_file Input CSV file having text file and annotation file paths
'''
with open(in_file) as inp:
for line in inp: # assumption: input file is a csv having .txt,.ann paths
txt_f, ann_f = line.strip().split(',')
doc_id, doc_year, doc_url = parse_lpsc_from_path(ann_f)
venue = "LPSC-%d" % doc_year
if doc_id:
ann_f.split('/')[-1].replace('.ann', '')
with open(txt_f) as txtp:
txt = txtp.read()
with open(ann_f) as annp:
anns = list(map(self.parse_ann_line, annp.readlines()))
children = []
index = {} # index all annotations by its ids
for ann in filter(lambda x: x is not None, anns):
ann_id = ann['annotation_id_s']
ann['id'] = '%s_%s_%s_%s' % (doc_id, ann['source'], ann['type'], ann_id)
ann['p_id'] = doc_id
index[ann_id] = ann
children.append(ann)
# resolve references from Events to Targets and Contains
contains = filter(lambda a: a.get('mainType') == 'event'\
and a.get('type') == 'contains', children)
for ch in contains:
targets_anns = ch.get('targets_ss', [])
cont_anns = ch.get('cont_ss', [])
ch['target_ids_ss'] = list(map(lambda t: index[t]['id'], targets_anns))
ch['target_ann_ids_ss'] = list(map(lambda t: index[t]['annotation_id_s'], targets_anns))
ch['target_names_ss'] = list(map(lambda t: index[t]['name'], targets_anns))
ch['cont_ids_ss'] = list(map(lambda c: index[c]['id'], cont_anns))
ch['cont_names_ss'] = list(map(lambda c: index[c]['name'], cont_anns))
# extract excerpt from anchor annotation
anc_doc = index[ch['anchor_s']]
ch['excerpt_t'] = self.extract_excerpt(txt, anc_doc)
# Track aliases
targets = [a for a in children if a.get('type') == 'target']
aliases = [a for a in children if a.get('type') == 'alias']
# Extract references
references = extract_references(txt)
# Remove references from the content
for ref_id in references:
txt = txt.replace(references[ref_id], ' ' * len(references[ref_id]))
yield {
'id' : doc_id,
'content_ann_s': {'set': txt},
'references': {'set': references.values()},
'type': {'set': 'doc'},
'url': {'set': doc_url},
'year': {'set': doc_year},
'venue': {'set': venue}
}
for child in children:
if 'name' in child:
if child['type'] == 'target':
child['can_name'] = \
canonical_target_name(child['name'],
child['annotation_id_s'],
targets, aliases)
else:
child['can_name'] = canonical_name(child['name'])
if 'target_names_ss' in child:
child['target_names_ss'] = \
[canonical_target_name(t, i, targets, aliases) \
for (t,i) in zip(child['target_names_ss'],
child['target_ann_ids_ss'])]
if 'cont_names_ss' in child:
child['cont_names_ss'] = \
[canonical_name(c) for c in child['cont_names_ss']]
yield child
def index(self, solr_url, in_file):
'''
Reads annotations at the specified path and indexes them to solr
@param solr_url Target Solr URL to index
@param in_file CSV file having text file and annotation file paths
'''
solr = Solr(solr_url)
recs = self.read_records(in_file)
count, success, = solr.post_iterator(recs)
if success:
print("Indexed %d docs" % count)
else:
print("Error: Failed. Check solr logs")
if __name__ == '__main__':
ap = ArgumentParser()
ap.add_argument('-i', '--in', help="Path to input csv file having .txt,.ann records", required=True)
ap.add_argument('-s', '--solr-url', help="Solr URL", default="http://localhost:8983/solr/docsdev")
args = vars(ap.parse_args())
BratAnnIndexer().index(solr_url=args['solr_url'], in_file = args['in'])
| |
from __future__ import annotations
from typing import List, Tuple, Callable, Optional
import pytest
from itertools import product
from numpy.testing import assert_allclose, suppress_warnings
from scipy import special
from scipy.special import cython_special
bint_points = [True, False]
int_points = [-10, -1, 1, 10]
real_points = [-10.0, -1.0, 1.0, 10.0]
complex_points = [complex(*tup) for tup in product(real_points, repeat=2)]
CYTHON_SIGNATURE_MAP = {
'b': 'bint',
'f': 'float',
'd': 'double',
'g': 'long double',
'F': 'float complex',
'D': 'double complex',
'G': 'long double complex',
'i': 'int',
'l': 'long'
}
TEST_POINTS = {
'b': bint_points,
'f': real_points,
'd': real_points,
'g': real_points,
'F': complex_points,
'D': complex_points,
'G': complex_points,
'i': int_points,
'l': int_points,
}
PARAMS: List[Tuple[Callable, Callable, Tuple[str, ...], Optional[str]]] = [
(special.agm, cython_special.agm, ('dd',), None),
(special.airy, cython_special._airy_pywrap, ('d', 'D'), None),
(special.airye, cython_special._airye_pywrap, ('d', 'D'), None),
(special.bdtr, cython_special.bdtr, ('dld', 'ddd'), None),
(special.bdtrc, cython_special.bdtrc, ('dld', 'ddd'), None),
(special.bdtri, cython_special.bdtri, ('dld', 'ddd'), None),
(special.bdtrik, cython_special.bdtrik, ('ddd',), None),
(special.bdtrin, cython_special.bdtrin, ('ddd',), None),
(special.bei, cython_special.bei, ('d',), None),
(special.beip, cython_special.beip, ('d',), None),
(special.ber, cython_special.ber, ('d',), None),
(special.berp, cython_special.berp, ('d',), None),
(special.besselpoly, cython_special.besselpoly, ('ddd',), None),
(special.beta, cython_special.beta, ('dd',), None),
(special.betainc, cython_special.betainc, ('ddd',), None),
(special.betaincinv, cython_special.betaincinv, ('ddd',), None),
(special.betaln, cython_special.betaln, ('dd',), None),
(special.binom, cython_special.binom, ('dd',), None),
(special.boxcox, cython_special.boxcox, ('dd',), None),
(special.boxcox1p, cython_special.boxcox1p, ('dd',), None),
(special.btdtr, cython_special.btdtr, ('ddd',), None),
(special.btdtri, cython_special.btdtri, ('ddd',), None),
(special.btdtria, cython_special.btdtria, ('ddd',), None),
(special.btdtrib, cython_special.btdtrib, ('ddd',), None),
(special.cbrt, cython_special.cbrt, ('d',), None),
(special.chdtr, cython_special.chdtr, ('dd',), None),
(special.chdtrc, cython_special.chdtrc, ('dd',), None),
(special.chdtri, cython_special.chdtri, ('dd',), None),
(special.chdtriv, cython_special.chdtriv, ('dd',), None),
(special.chndtr, cython_special.chndtr, ('ddd',), None),
(special.chndtridf, cython_special.chndtridf, ('ddd',), None),
(special.chndtrinc, cython_special.chndtrinc, ('ddd',), None),
(special.chndtrix, cython_special.chndtrix, ('ddd',), None),
(special.cosdg, cython_special.cosdg, ('d',), None),
(special.cosm1, cython_special.cosm1, ('d',), None),
(special.cotdg, cython_special.cotdg, ('d',), None),
(special.dawsn, cython_special.dawsn, ('d', 'D'), None),
(special.ellipe, cython_special.ellipe, ('d',), None),
(special.ellipeinc, cython_special.ellipeinc, ('dd',), None),
(special.ellipj, cython_special._ellipj_pywrap, ('dd',), None),
(special.ellipkinc, cython_special.ellipkinc, ('dd',), None),
(special.ellipkm1, cython_special.ellipkm1, ('d',), None),
(special.ellipk, cython_special.ellipk, ('d',), None),
(special.entr, cython_special.entr, ('d',), None),
(special.erf, cython_special.erf, ('d', 'D'), None),
(special.erfc, cython_special.erfc, ('d', 'D'), None),
(special.erfcx, cython_special.erfcx, ('d', 'D'), None),
(special.erfi, cython_special.erfi, ('d', 'D'), None),
(special.erfinv, cython_special.erfinv, ('d',), None),
(special.erfcinv, cython_special.erfcinv, ('d',), None),
(special.eval_chebyc, cython_special.eval_chebyc, ('dd', 'dD', 'ld'), None),
(special.eval_chebys, cython_special.eval_chebys, ('dd', 'dD', 'ld'),
'd and l differ for negative int'),
(special.eval_chebyt, cython_special.eval_chebyt, ('dd', 'dD', 'ld'),
'd and l differ for negative int'),
(special.eval_chebyu, cython_special.eval_chebyu, ('dd', 'dD', 'ld'),
'd and l differ for negative int'),
(special.eval_gegenbauer, cython_special.eval_gegenbauer, ('ddd', 'ddD', 'ldd'),
'd and l differ for negative int'),
(special.eval_genlaguerre, cython_special.eval_genlaguerre, ('ddd', 'ddD', 'ldd'),
'd and l differ for negative int'),
(special.eval_hermite, cython_special.eval_hermite, ('ld',), None),
(special.eval_hermitenorm, cython_special.eval_hermitenorm, ('ld',), None),
(special.eval_jacobi, cython_special.eval_jacobi, ('dddd', 'dddD', 'lddd'),
'd and l differ for negative int'),
(special.eval_laguerre, cython_special.eval_laguerre, ('dd', 'dD', 'ld'),
'd and l differ for negative int'),
(special.eval_legendre, cython_special.eval_legendre, ('dd', 'dD', 'ld'), None),
(special.eval_sh_chebyt, cython_special.eval_sh_chebyt, ('dd', 'dD', 'ld'), None),
(special.eval_sh_chebyu, cython_special.eval_sh_chebyu, ('dd', 'dD', 'ld'),
'd and l differ for negative int'),
(special.eval_sh_jacobi, cython_special.eval_sh_jacobi, ('dddd', 'dddD', 'lddd'),
'd and l differ for negative int'),
(special.eval_sh_legendre, cython_special.eval_sh_legendre, ('dd', 'dD', 'ld'), None),
(special.exp1, cython_special.exp1, ('d', 'D'), None),
(special.exp10, cython_special.exp10, ('d',), None),
(special.exp2, cython_special.exp2, ('d',), None),
(special.expi, cython_special.expi, ('d', 'D'), None),
(special.expit, cython_special.expit, ('f', 'd', 'g'), None),
(special.expm1, cython_special.expm1, ('d', 'D'), None),
(special.expn, cython_special.expn, ('ld', 'dd'), None),
(special.exprel, cython_special.exprel, ('d',), None),
(special.fdtr, cython_special.fdtr, ('ddd',), None),
(special.fdtrc, cython_special.fdtrc, ('ddd',), None),
(special.fdtri, cython_special.fdtri, ('ddd',), None),
(special.fdtridfd, cython_special.fdtridfd, ('ddd',), None),
(special.fresnel, cython_special._fresnel_pywrap, ('d', 'D'), None),
(special.gamma, cython_special.gamma, ('d', 'D'), None),
(special.gammainc, cython_special.gammainc, ('dd',), None),
(special.gammaincc, cython_special.gammaincc, ('dd',), None),
(special.gammainccinv, cython_special.gammainccinv, ('dd',), None),
(special.gammaincinv, cython_special.gammaincinv, ('dd',), None),
(special.gammaln, cython_special.gammaln, ('d',), None),
(special.gammasgn, cython_special.gammasgn, ('d',), None),
(special.gdtr, cython_special.gdtr, ('ddd',), None),
(special.gdtrc, cython_special.gdtrc, ('ddd',), None),
(special.gdtria, cython_special.gdtria, ('ddd',), None),
(special.gdtrib, cython_special.gdtrib, ('ddd',), None),
(special.gdtrix, cython_special.gdtrix, ('ddd',), None),
(special.hankel1, cython_special.hankel1, ('dD',), None),
(special.hankel1e, cython_special.hankel1e, ('dD',), None),
(special.hankel2, cython_special.hankel2, ('dD',), None),
(special.hankel2e, cython_special.hankel2e, ('dD',), None),
(special.huber, cython_special.huber, ('dd',), None),
(special.hyp0f1, cython_special.hyp0f1, ('dd', 'dD'), None),
(special.hyp1f1, cython_special.hyp1f1, ('ddd', 'ddD'), None),
(special.hyp2f1, cython_special.hyp2f1, ('dddd', 'dddD'), None),
(special.hyperu, cython_special.hyperu, ('ddd',), None),
(special.i0, cython_special.i0, ('d',), None),
(special.i0e, cython_special.i0e, ('d',), None),
(special.i1, cython_special.i1, ('d',), None),
(special.i1e, cython_special.i1e, ('d',), None),
(special.inv_boxcox, cython_special.inv_boxcox, ('dd',), None),
(special.inv_boxcox1p, cython_special.inv_boxcox1p, ('dd',), None),
(special.it2i0k0, cython_special._it2i0k0_pywrap, ('d',), None),
(special.it2j0y0, cython_special._it2j0y0_pywrap, ('d',), None),
(special.it2struve0, cython_special.it2struve0, ('d',), None),
(special.itairy, cython_special._itairy_pywrap, ('d',), None),
(special.iti0k0, cython_special._iti0k0_pywrap, ('d',), None),
(special.itj0y0, cython_special._itj0y0_pywrap, ('d',), None),
(special.itmodstruve0, cython_special.itmodstruve0, ('d',), None),
(special.itstruve0, cython_special.itstruve0, ('d',), None),
(special.iv, cython_special.iv, ('dd', 'dD'), None),
(special.ive, cython_special.ive, ('dd', 'dD'), None),
(special.j0, cython_special.j0, ('d',), None),
(special.j1, cython_special.j1, ('d',), None),
(special.jv, cython_special.jv, ('dd', 'dD'), None),
(special.jve, cython_special.jve, ('dd', 'dD'), None),
(special.k0, cython_special.k0, ('d',), None),
(special.k0e, cython_special.k0e, ('d',), None),
(special.k1, cython_special.k1, ('d',), None),
(special.k1e, cython_special.k1e, ('d',), None),
(special.kei, cython_special.kei, ('d',), None),
(special.keip, cython_special.keip, ('d',), None),
(special.kelvin, cython_special._kelvin_pywrap, ('d',), None),
(special.ker, cython_special.ker, ('d',), None),
(special.kerp, cython_special.kerp, ('d',), None),
(special.kl_div, cython_special.kl_div, ('dd',), None),
(special.kn, cython_special.kn, ('ld', 'dd'), None),
(special.kolmogi, cython_special.kolmogi, ('d',), None),
(special.kolmogorov, cython_special.kolmogorov, ('d',), None),
(special.kv, cython_special.kv, ('dd', 'dD'), None),
(special.kve, cython_special.kve, ('dd', 'dD'), None),
(special.log1p, cython_special.log1p, ('d', 'D'), None),
(special.log_ndtr, cython_special.log_ndtr, ('d', 'D'), None),
(special.ndtri_exp, cython_special.ndtri_exp, ('d',), None),
(special.loggamma, cython_special.loggamma, ('D',), None),
(special.logit, cython_special.logit, ('f', 'd', 'g'), None),
(special.lpmv, cython_special.lpmv, ('ddd',), None),
(special.mathieu_a, cython_special.mathieu_a, ('dd',), None),
(special.mathieu_b, cython_special.mathieu_b, ('dd',), None),
(special.mathieu_cem, cython_special._mathieu_cem_pywrap, ('ddd',), None),
(special.mathieu_modcem1, cython_special._mathieu_modcem1_pywrap, ('ddd',), None),
(special.mathieu_modcem2, cython_special._mathieu_modcem2_pywrap, ('ddd',), None),
(special.mathieu_modsem1, cython_special._mathieu_modsem1_pywrap, ('ddd',), None),
(special.mathieu_modsem2, cython_special._mathieu_modsem2_pywrap, ('ddd',), None),
(special.mathieu_sem, cython_special._mathieu_sem_pywrap, ('ddd',), None),
(special.modfresnelm, cython_special._modfresnelm_pywrap, ('d',), None),
(special.modfresnelp, cython_special._modfresnelp_pywrap, ('d',), None),
(special.modstruve, cython_special.modstruve, ('dd',), None),
(special.nbdtr, cython_special.nbdtr, ('lld', 'ddd'), None),
(special.nbdtrc, cython_special.nbdtrc, ('lld', 'ddd'), None),
(special.nbdtri, cython_special.nbdtri, ('lld', 'ddd'), None),
(special.nbdtrik, cython_special.nbdtrik, ('ddd',), None),
(special.nbdtrin, cython_special.nbdtrin, ('ddd',), None),
(special.ncfdtr, cython_special.ncfdtr, ('dddd',), None),
(special.ncfdtri, cython_special.ncfdtri, ('dddd',), None),
(special.ncfdtridfd, cython_special.ncfdtridfd, ('dddd',), None),
(special.ncfdtridfn, cython_special.ncfdtridfn, ('dddd',), None),
(special.ncfdtrinc, cython_special.ncfdtrinc, ('dddd',), None),
(special.nctdtr, cython_special.nctdtr, ('ddd',), None),
(special.nctdtridf, cython_special.nctdtridf, ('ddd',), None),
(special.nctdtrinc, cython_special.nctdtrinc, ('ddd',), None),
(special.nctdtrit, cython_special.nctdtrit, ('ddd',), None),
(special.ndtr, cython_special.ndtr, ('d', 'D'), None),
(special.ndtri, cython_special.ndtri, ('d',), None),
(special.nrdtrimn, cython_special.nrdtrimn, ('ddd',), None),
(special.nrdtrisd, cython_special.nrdtrisd, ('ddd',), None),
(special.obl_ang1, cython_special._obl_ang1_pywrap, ('dddd',), None),
(special.obl_ang1_cv, cython_special._obl_ang1_cv_pywrap, ('ddddd',), None),
(special.obl_cv, cython_special.obl_cv, ('ddd',), None),
(special.obl_rad1, cython_special._obl_rad1_pywrap, ('dddd',), "see gh-6211"),
(special.obl_rad1_cv, cython_special._obl_rad1_cv_pywrap, ('ddddd',), "see gh-6211"),
(special.obl_rad2, cython_special._obl_rad2_pywrap, ('dddd',), "see gh-6211"),
(special.obl_rad2_cv, cython_special._obl_rad2_cv_pywrap, ('ddddd',), "see gh-6211"),
(special.pbdv, cython_special._pbdv_pywrap, ('dd',), None),
(special.pbvv, cython_special._pbvv_pywrap, ('dd',), None),
(special.pbwa, cython_special._pbwa_pywrap, ('dd',), None),
(special.pdtr, cython_special.pdtr, ('dd', 'dd'), None),
(special.pdtrc, cython_special.pdtrc, ('dd', 'dd'), None),
(special.pdtri, cython_special.pdtri, ('ld', 'dd'), None),
(special.pdtrik, cython_special.pdtrik, ('dd',), None),
(special.poch, cython_special.poch, ('dd',), None),
(special.pro_ang1, cython_special._pro_ang1_pywrap, ('dddd',), None),
(special.pro_ang1_cv, cython_special._pro_ang1_cv_pywrap, ('ddddd',), None),
(special.pro_cv, cython_special.pro_cv, ('ddd',), None),
(special.pro_rad1, cython_special._pro_rad1_pywrap, ('dddd',), "see gh-6211"),
(special.pro_rad1_cv, cython_special._pro_rad1_cv_pywrap, ('ddddd',), "see gh-6211"),
(special.pro_rad2, cython_special._pro_rad2_pywrap, ('dddd',), "see gh-6211"),
(special.pro_rad2_cv, cython_special._pro_rad2_cv_pywrap, ('ddddd',), "see gh-6211"),
(special.pseudo_huber, cython_special.pseudo_huber, ('dd',), None),
(special.psi, cython_special.psi, ('d', 'D'), None),
(special.radian, cython_special.radian, ('ddd',), None),
(special.rel_entr, cython_special.rel_entr, ('dd',), None),
(special.rgamma, cython_special.rgamma, ('d', 'D'), None),
(special.round, cython_special.round, ('d',), None),
(special.spherical_jn, cython_special.spherical_jn, ('ld', 'ldb', 'lD', 'lDb'), None),
(special.spherical_yn, cython_special.spherical_yn, ('ld', 'ldb', 'lD', 'lDb'), None),
(special.spherical_in, cython_special.spherical_in, ('ld', 'ldb', 'lD', 'lDb'), None),
(special.spherical_kn, cython_special.spherical_kn, ('ld', 'ldb', 'lD', 'lDb'), None),
(special.shichi, cython_special._shichi_pywrap, ('d', 'D'), None),
(special.sici, cython_special._sici_pywrap, ('d', 'D'), None),
(special.sindg, cython_special.sindg, ('d',), None),
(special.smirnov, cython_special.smirnov, ('ld', 'dd'), None),
(special.smirnovi, cython_special.smirnovi, ('ld', 'dd'), None),
(special.spence, cython_special.spence, ('d', 'D'), None),
(special.sph_harm, cython_special.sph_harm, ('lldd', 'dddd'), None),
(special.stdtr, cython_special.stdtr, ('dd',), None),
(special.stdtridf, cython_special.stdtridf, ('dd',), None),
(special.stdtrit, cython_special.stdtrit, ('dd',), None),
(special.struve, cython_special.struve, ('dd',), None),
(special.tandg, cython_special.tandg, ('d',), None),
(special.tklmbda, cython_special.tklmbda, ('dd',), None),
(special.voigt_profile, cython_special.voigt_profile, ('ddd',), None),
(special.wofz, cython_special.wofz, ('D',), None),
(special.wright_bessel, cython_special.wright_bessel, ('ddd',), None),
(special.wrightomega, cython_special.wrightomega, ('D',), None),
(special.xlog1py, cython_special.xlog1py, ('dd', 'DD'), None),
(special.xlogy, cython_special.xlogy, ('dd', 'DD'), None),
(special.y0, cython_special.y0, ('d',), None),
(special.y1, cython_special.y1, ('d',), None),
(special.yn, cython_special.yn, ('ld', 'dd'), None),
(special.yv, cython_special.yv, ('dd', 'dD'), None),
(special.yve, cython_special.yve, ('dd', 'dD'), None),
(special.zetac, cython_special.zetac, ('d',), None),
(special.owens_t, cython_special.owens_t, ('dd',), None)
]
IDS = [x[0].__name__ for x in PARAMS]
def _generate_test_points(typecodes):
axes = tuple(TEST_POINTS[x] for x in typecodes)
pts = list(product(*axes))
return pts
def test_cython_api_completeness():
# Check that everything is tested
for name in dir(cython_special):
func = getattr(cython_special, name)
if callable(func) and not name.startswith('_'):
for _, cyfun, _, _ in PARAMS:
if cyfun is func:
break
else:
raise RuntimeError(f"{name} missing from tests!")
@pytest.mark.parametrize("param", PARAMS, ids=IDS)
def test_cython_api(param):
pyfunc, cyfunc, specializations, knownfailure = param
if knownfailure:
pytest.xfail(reason=knownfailure)
# Check which parameters are expected to be fused types
max_params = max(len(spec) for spec in specializations)
values = [set() for _ in range(max_params)]
for typecodes in specializations:
for j, v in enumerate(typecodes):
values[j].add(v)
seen = set()
is_fused_code = [False] * len(values)
for j, v in enumerate(values):
vv = tuple(sorted(v))
if vv in seen:
continue
is_fused_code[j] = (len(v) > 1)
seen.add(vv)
# Check results
for typecodes in specializations:
# Pick the correct specialized function
signature = [CYTHON_SIGNATURE_MAP[code]
for j, code in enumerate(typecodes)
if is_fused_code[j]]
if signature:
cy_spec_func = cyfunc[tuple(signature)]
else:
signature = None
cy_spec_func = cyfunc
# Test it
pts = _generate_test_points(typecodes)
for pt in pts:
with suppress_warnings() as sup:
sup.filter(DeprecationWarning)
pyval = pyfunc(*pt)
cyval = cy_spec_func(*pt)
assert_allclose(cyval, pyval, err_msg="{} {} {}".format(pt, typecodes, signature))
| |
# Copyright (c) 2015-2016 Cisco Systems
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
import os
import os.path
import random
import shutil
import string
import pytest
from molecule import util
logging.getLogger("sh").setLevel(logging.WARNING)
def random_string(l=5):
return ''.join(random.choice(string.ascii_uppercase) for _ in range(l))
@pytest.fixture()
def temp_files(tmpdir, request):
def wrapper(fixtures=[]):
d = tmpdir.mkdir(random_string())
confs = []
for index, fixture in enumerate(fixtures):
c = d.join(os.extsep.join((fixture, 'yml')))
c.write(request.getfuncargvalue(fixtures[index]))
confs.append(c.strpath)
# TODO(retr0h): Remove - belongs elsewhere
pbook = d.join(os.extsep.join(('playbook', 'yml')))
data = [{'hosts': 'all', 'tasks': [{'command': 'echo'}]}]
pbook.write(data)
os.chdir(d.strpath)
# TODO(retr0h): Remove - belongs elsewhere
def cleanup():
shutil.rmtree(d.strpath)
request.addfinalizer(cleanup)
return confs
return wrapper
@pytest.fixture()
def molecule_vagrant_config(molecule_section_data, vagrant_section_data,
ansible_section_data):
return reduce(
lambda x, y: util.merge_dicts(x, y),
[molecule_section_data, vagrant_section_data, ansible_section_data])
@pytest.fixture()
def molecule_proxmox_config(molecule_section_data, proxmox_section_data,
ansible_section_data):
return reduce(
lambda x, y: util.merge_dicts(x, y),
[molecule_section_data, proxmox_section_data, ansible_section_data])
@pytest.fixture()
def molecule_docker_config(molecule_section_data, docker_section_data,
ansible_section_data):
return reduce(
lambda x, y: util.merge_dicts(x, y),
[molecule_section_data, docker_section_data, ansible_section_data])
@pytest.fixture()
def molecule_openstack_config(molecule_section_data, openstack_section_data,
ansible_section_data):
return reduce(
lambda x, y: util.merge_dicts(x, y),
[molecule_section_data, openstack_section_data, ansible_section_data])
@pytest.fixture()
def molecule_section_data(state_path):
return {
'molecule': {
'ignore_paths': [
'.git', '.vagrant', '.molecule'
],
'serverspec_dir': 'spec',
'testinfra_dir': 'tests',
'molecule_dir': 'test',
'state_file': state_path,
'vagrantfile_file': 'vagrantfile_file',
'rakefile_file': 'rakefile_file',
'vagrantfile_template': 'vagrantfile.j2',
'ansible_config_template': 'ansible.cfg.j2',
'rakefile_template': 'rakefile.j2',
'raw_ssh_args': [
'-o StrictHostKeyChecking=no',
'-o UserKnownHostsFile=/dev/null'
],
'test': {
'sequence': [
'destroy', 'syntax', 'create', 'converge', 'idempotence',
'verify'
]
}
}
}
@pytest.fixture()
def vagrant_section_data():
return {
'vagrant': {
'platforms': [
{'name': 'ubuntu',
'box': 'ubuntu/trusty64'}
],
'providers': [
{'name': 'virtualbox',
'type': 'virtualbox'}
],
'instances': [
{'name': 'aio-01',
'ansible_groups': ['example', 'example1'],
'options': {'append_platform_to_hostname': True}}
]
}
}
@pytest.fixture()
def proxmox_section_data():
return {'proxmox': {}}
@pytest.fixture()
def docker_section_data():
return {
'docker': {
'containers': [
{'name': 'test1',
'image': 'ubuntu',
'image_version': 'latest',
'port_bindings': {
80: 80,
443: 443
},
'options': {'append_platform_to_hostname': True},
'volume_mounts': ['/tmp/test1:/inside:rw'],
'ansible_groups': ['group1']},
{'name': 'test2',
'image': 'ubuntu',
'image_version': 'latest',
'ansible_groups': ['group2'],
'command': '/bin/sh',
'options': {'append_platform_to_hostname': True}, }
]
}
}
@pytest.fixture()
def openstack_section_data():
return {'openstack': {
'instances': [
{'name': 'aio-01',
'ansible_groups': ['example', 'example1'],
'options': {'append_platform_to_hostname': True}}
]
}}
@pytest.fixture()
def ansible_section_data():
return {
'ansible': {
'timeout': 30,
'sudo': True,
'sudo_user': False,
'ask_sudo_pass': False,
'ask_vault_pass': False,
'vault_password_file': False,
'limit': 'all',
'verbose': True,
'diff': True,
'tags': False,
'host_key_checking': False,
'raw_ssh_args': [
'-o UserKnownHostsFile=/dev/null', '-o IdentitiesOnly=yes',
'-o ControlMaster=auto', '-o ControlPersist=60s'
],
'galaxy': {},
'config_file': 'config_file',
'inventory_file': 'inventory_file',
'playbook': 'playbook.yml',
'raw_env_vars': {
'FOO': 'bar'
}
},
'testinfra': {}
}
@pytest.fixture()
def state_data():
return {}
@pytest.fixture()
def state_path(temp_files):
return temp_files(fixtures=['state_data'])[0]
| |
import re
import pytest
from aiohttp import web
async def test_middleware_modifies_response(loop, test_client):
async def handler(request):
return web.Response(body=b'OK')
@web.middleware
async def middleware(request, handler):
resp = await handler(request)
assert 200 == resp.status
resp.set_status(201)
resp.text = resp.text + '[MIDDLEWARE]'
return resp
app = web.Application()
app.middlewares.append(middleware)
app.router.add_route('GET', '/', handler)
client = await test_client(app)
resp = await client.get('/')
assert 201 == resp.status
txt = await resp.text()
assert 'OK[MIDDLEWARE]' == txt
async def test_middleware_handles_exception(loop, test_client):
async def handler(request):
raise RuntimeError('Error text')
@web.middleware
async def middleware(request, handler):
with pytest.raises(RuntimeError) as ctx:
await handler(request)
return web.Response(status=501,
text=str(ctx.value) + '[MIDDLEWARE]')
app = web.Application()
app.middlewares.append(middleware)
app.router.add_route('GET', '/', handler)
client = await test_client(app)
resp = await client.get('/')
assert 501 == resp.status
txt = await resp.text()
assert 'Error text[MIDDLEWARE]' == txt
async def test_middleware_chain(loop, test_client):
async def handler(request):
return web.Response(text='OK')
def make_middleware(num):
@web.middleware
async def middleware(request, handler):
resp = await handler(request)
resp.text = resp.text + '[{}]'.format(num)
return resp
return middleware
app = web.Application()
app.middlewares.append(make_middleware(1))
app.middlewares.append(make_middleware(2))
app.router.add_route('GET', '/', handler)
client = await test_client(app)
resp = await client.get('/')
assert 200 == resp.status
txt = await resp.text()
assert 'OK[2][1]' == txt
@pytest.fixture
def cli(loop, test_client):
def wrapper(extra_middlewares):
app = web.Application()
app.router.add_route(
'GET', '/resource1', lambda x: web.Response(text="OK"))
app.router.add_route(
'GET', '/resource2/', lambda x: web.Response(text="OK"))
app.router.add_route(
'GET', '/resource1/a/b', lambda x: web.Response(text="OK"))
app.router.add_route(
'GET', '/resource2/a/b/', lambda x: web.Response(text="OK"))
app.middlewares.extend(extra_middlewares)
return test_client(app, server_kwargs={'skip_url_asserts': True})
return wrapper
class TestNormalizePathMiddleware:
@pytest.mark.parametrize("path, status", [
('/resource1', 200),
('/resource1/', 404),
('/resource2', 200),
('/resource2/', 200),
('/resource1?p1=1&p2=2', 200),
('/resource1/?p1=1&p2=2', 404),
('/resource2?p1=1&p2=2', 200),
('/resource2/?p1=1&p2=2', 200)
])
async def test_add_trailing_when_necessary(
self, path, status, cli):
extra_middlewares = [
web.normalize_path_middleware(merge_slashes=False)]
client = await cli(extra_middlewares)
resp = await client.get(path)
assert resp.status == status
@pytest.mark.parametrize("path, status", [
('/resource1', 200),
('/resource1/', 404),
('/resource2', 404),
('/resource2/', 200),
('/resource1?p1=1&p2=2', 200),
('/resource1/?p1=1&p2=2', 404),
('/resource2?p1=1&p2=2', 404),
('/resource2/?p1=1&p2=2', 200)
])
async def test_no_trailing_slash_when_disabled(
self, path, status, cli):
extra_middlewares = [
web.normalize_path_middleware(
append_slash=False, merge_slashes=False)]
client = await cli(extra_middlewares)
resp = await client.get(path)
assert resp.status == status
@pytest.mark.parametrize("path, status", [
('/resource1/a/b', 200),
('//resource1//a//b', 200),
('//resource1//a//b/', 404),
('///resource1//a//b', 200),
('/////resource1/a///b', 200),
('/////resource1/a//b/', 404),
('/resource1/a/b?p=1', 200),
('//resource1//a//b?p=1', 200),
('//resource1//a//b/?p=1', 404),
('///resource1//a//b?p=1', 200),
('/////resource1/a///b?p=1', 200),
('/////resource1/a//b/?p=1', 404),
])
async def test_merge_slash(self, path, status, cli):
extra_middlewares = [
web.normalize_path_middleware(append_slash=False)]
client = await cli(extra_middlewares)
resp = await client.get(path)
assert resp.status == status
@pytest.mark.parametrize("path, status", [
('/resource1/a/b', 200),
('/resource1/a/b/', 404),
('//resource2//a//b', 200),
('//resource2//a//b/', 200),
('///resource1//a//b', 200),
('///resource1//a//b/', 404),
('/////resource1/a///b', 200),
('/////resource1/a///b/', 404),
('/resource2/a/b', 200),
('//resource2//a//b', 200),
('//resource2//a//b/', 200),
('///resource2//a//b', 200),
('///resource2//a//b/', 200),
('/////resource2/a///b', 200),
('/////resource2/a///b/', 200),
('/resource1/a/b?p=1', 200),
('/resource1/a/b/?p=1', 404),
('//resource2//a//b?p=1', 200),
('//resource2//a//b/?p=1', 200),
('///resource1//a//b?p=1', 200),
('///resource1//a//b/?p=1', 404),
('/////resource1/a///b?p=1', 200),
('/////resource1/a///b/?p=1', 404),
('/resource2/a/b?p=1', 200),
('//resource2//a//b?p=1', 200),
('//resource2//a//b/?p=1', 200),
('///resource2//a//b?p=1', 200),
('///resource2//a//b/?p=1', 200),
('/////resource2/a///b?p=1', 200),
('/////resource2/a///b/?p=1', 200)
])
async def test_append_and_merge_slash(self, path, status, cli):
extra_middlewares = [
web.normalize_path_middleware()]
client = await cli(extra_middlewares)
resp = await client.get(path)
assert resp.status == status
async def test_old_style_middleware(loop, test_client):
async def handler(request):
return web.Response(body=b'OK')
async def middleware_factory(app, handler):
async def middleware(request):
resp = await handler(request)
assert 200 == resp.status
resp.set_status(201)
resp.text = resp.text + '[old style middleware]'
return resp
return middleware
with pytest.warns(DeprecationWarning) as warning_checker:
app = web.Application()
app.middlewares.append(middleware_factory)
app.router.add_route('GET', '/', handler)
client = await test_client(app)
resp = await client.get('/')
assert 201 == resp.status
txt = await resp.text()
assert 'OK[old style middleware]' == txt
assert len(warning_checker) == 1
msg = str(warning_checker.list[0].message)
assert re.match('^old-style middleware '
'"<function test_old_style_middleware.<locals>.'
'middleware_factory at 0x[0-9a-fA-F]+>" '
'deprecated, see #2252$',
msg)
async def test_mixed_middleware(loop, test_client):
async def handler(request):
return web.Response(body=b'OK')
async def m_old1(app, handler):
async def middleware(request):
resp = await handler(request)
resp.text += '[old style 1]'
return resp
return middleware
@web.middleware
async def m_new1(request, handler):
resp = await handler(request)
resp.text += '[new style 1]'
return resp
async def m_old2(app, handler):
async def middleware(request):
resp = await handler(request)
resp.text += '[old style 2]'
return resp
return middleware
@web.middleware
async def m_new2(request, handler):
resp = await handler(request)
resp.text += '[new style 2]'
return resp
middlewares = m_old1, m_new1, m_old2, m_new2
with pytest.warns(DeprecationWarning) as w:
app = web.Application(middlewares=middlewares)
app.router.add_route('GET', '/', handler)
client = await test_client(app)
resp = await client.get('/')
assert 200 == resp.status
txt = await resp.text()
assert 'OK[new style 2][old style 2][new style 1][old style 1]' == txt
assert len(w) == 2
tmpl = ('^old-style middleware '
'"<function test_mixed_middleware.<locals>.'
'{} at 0x[0-9a-fA-F]+>" '
'deprecated, see #2252$')
p1 = tmpl.format('m_old1')
p2 = tmpl.format('m_old2')
assert re.match(p2, str(w.list[0].message))
assert re.match(p1, str(w.list[1].message))
async def test_old_style_middleware_class(loop, test_client):
async def handler(request):
return web.Response(body=b'OK')
class Middleware:
async def __call__(self, app, handler):
async def middleware(request):
resp = await handler(request)
assert 200 == resp.status
resp.set_status(201)
resp.text = resp.text + '[old style middleware]'
return resp
return middleware
with pytest.warns(DeprecationWarning) as warning_checker:
app = web.Application()
app.middlewares.append(Middleware())
app.router.add_route('GET', '/', handler)
client = await test_client(app)
resp = await client.get('/')
assert 201 == resp.status
txt = await resp.text()
assert 'OK[old style middleware]' == txt
assert len(warning_checker) == 1
msg = str(warning_checker.list[0].message)
assert re.match('^old-style middleware '
'"<test_web_middleware.test_old_style_middleware_class.'
'<locals>.Middleware object '
'at 0x[0-9a-fA-F]+>" deprecated, see #2252$', msg)
async def test_new_style_middleware_class(loop, test_client):
async def handler(request):
return web.Response(body=b'OK')
@web.middleware
class Middleware:
async def __call__(self, request, handler):
resp = await handler(request)
assert 200 == resp.status
resp.set_status(201)
resp.text = resp.text + '[new style middleware]'
return resp
with pytest.warns(None) as warning_checker:
app = web.Application()
app.middlewares.append(Middleware())
app.router.add_route('GET', '/', handler)
client = await test_client(app)
resp = await client.get('/')
assert 201 == resp.status
txt = await resp.text()
assert 'OK[new style middleware]' == txt
assert len(warning_checker) == 0
async def test_new_style_middleware_method(loop, test_client):
async def handler(request):
return web.Response(body=b'OK')
class Middleware:
@web.middleware
async def call(self, request, handler):
resp = await handler(request)
assert 200 == resp.status
resp.set_status(201)
resp.text = resp.text + '[new style middleware]'
return resp
with pytest.warns(None) as warning_checker:
app = web.Application()
app.middlewares.append(Middleware().call)
app.router.add_route('GET', '/', handler)
client = await test_client(app)
resp = await client.get('/')
assert 201 == resp.status
txt = await resp.text()
assert 'OK[new style middleware]' == txt
assert len(warning_checker) == 0
| |
from symbol.builder import FasterRcnn as Detector
from symbol.builder import add_anchor_to_arg
from models.FPN.builder import MSRAResNet50V1FPN as Backbone
from models.FPN.builder import FPNNeck as Neck
from models.FPN.builder import FPNRpnHead as RpnHead
from models.FPN.builder import FPNRoiAlign as RoiExtractor
from models.FPN.builder import FPNBbox2fcHead as BboxHead
from mxnext.complicate import normalizer_factory
def get_config(is_train):
class General:
log_frequency = 10
name = __name__.rsplit("/")[-1].rsplit(".")[-1]
batch_image = 2 if is_train else 1
fp16 = False
loader_worker = 8
class KvstoreParam:
kvstore = "nccl"
batch_image = General.batch_image
gpus = [0, 1, 2, 3, 4, 5, 6, 7]
fp16 = General.fp16
class NormalizeParam:
normalizer = normalizer_factory(type="fixbn")
class BackboneParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
class NeckParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
class RpnParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
batch_image = General.batch_image
nnvm_proposal = True
nnvm_rpn_target = False
class anchor_generate:
scale = (8,)
ratio = (0.5, 1.0, 2.0)
stride = (4, 8, 16, 32, 64)
image_anchor = 256
max_side = 1400
class anchor_assign:
allowed_border = 0
pos_thr = 0.7
neg_thr = 0.3
min_pos_thr = 0.0
image_anchor = 256
pos_fraction = 0.5
class head:
conv_channel = 256
mean = (0, 0, 0, 0)
std = (1, 1, 1, 1)
class proposal:
pre_nms_top_n = 2000 if is_train else 1000
post_nms_top_n = 2000 if is_train else 1000
nms_thr = 0.7
min_bbox_side = 0
class subsample_proposal:
proposal_wo_gt = False
image_roi = 512
fg_fraction = 0.25
fg_thr = 0.5
bg_thr_hi = 0.5
bg_thr_lo = 0.0
class bbox_target:
num_reg_class = 81
class_agnostic = False
weight = (1.0, 1.0, 1.0, 1.0)
mean = (0.0, 0.0, 0.0, 0.0)
std = (0.1, 0.1, 0.2, 0.2)
class BboxParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
num_class = 1 + 80
image_roi = 512
batch_image = General.batch_image
class regress_target:
class_agnostic = False
mean = (0.0, 0.0, 0.0, 0.0)
std = (0.1, 0.1, 0.2, 0.2)
class RoiParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
out_size = 7
stride = (4, 8, 16, 32)
roi_canonical_scale = 224
roi_canonical_level = 4
class DatasetParam:
if is_train:
image_set = ("coco_train2017", )
else:
image_set = ("coco_val2017", )
backbone = Backbone(BackboneParam)
neck = Neck(NeckParam)
rpn_head = RpnHead(RpnParam)
roi_extractor = RoiExtractor(RoiParam)
bbox_head = BboxHead(BboxParam)
detector = Detector()
if is_train:
train_sym = detector.get_train_symbol(backbone, neck, rpn_head, roi_extractor, bbox_head)
rpn_test_sym = None
test_sym = None
else:
train_sym = None
rpn_test_sym = detector.get_rpn_test_symbol(backbone, neck, rpn_head)
test_sym = detector.get_test_symbol(backbone, neck, rpn_head, roi_extractor, bbox_head)
class ModelParam:
train_symbol = train_sym
test_symbol = test_sym
rpn_test_symbol = rpn_test_sym
from_scratch = False
random = True
memonger = False
memonger_until = "stage3_unit21_plus"
class pretrain:
prefix = "pretrain_model/resnet-v1-50"
epoch = 0
fixed_param = ["conv0", "stage1", "gamma", "beta"]
def process_weight(sym, arg, aux):
for stride in RpnParam.anchor_generate.stride:
add_anchor_to_arg(
sym, arg, aux, RpnParam.anchor_generate.max_side,
stride, RpnParam.anchor_generate.scale,
RpnParam.anchor_generate.ratio)
class OptimizeParam:
class optimizer:
type = "sgd"
lr = 0.01 / 8 * len(KvstoreParam.gpus) * KvstoreParam.batch_image
momentum = 0.9
wd = 0.0001
clip_gradient = None
class schedule:
begin_epoch = 0
end_epoch = 6
lr_iter = [60000 * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image),
80000 * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image)]
class warmup:
type = "gradual"
lr = 0.01 / 8 * len(KvstoreParam.gpus) * KvstoreParam.batch_image / 3.0
iter = 500
class TestParam:
min_det_score = 0.05
max_det_per_image = 100
process_roidb = lambda x: x
process_output = lambda x, y: x
class model:
prefix = "experiments/{}/checkpoint".format(General.name)
epoch = OptimizeParam.schedule.end_epoch
class nms:
type = "nms"
thr = 0.5
class coco:
annotation = "data/coco/annotations/instances_minival2014.json"
# data processing
class NormParam:
mean = (122.7717, 115.9465, 102.9801) # RGB order
std = (1.0, 1.0, 1.0)
# data processing
class ResizeParam:
short = 800
long = 1333
class PadParam:
short = 800
long = 1333
max_num_gt = 100
class AnchorTarget2DParam:
def __init__(self):
self.generate = self._generate()
class _generate:
def __init__(self):
self.stride = (4, 8, 16, 32, 64)
self.short = (200, 100, 50, 25, 13)
self.long = (334, 167, 84, 42, 21)
scales = (8)
aspects = (0.5, 1.0, 2.0)
class assign:
allowed_border = 0
pos_thr = 0.7
neg_thr = 0.3
min_pos_thr = 0.0
class sample:
image_anchor = 256
pos_fraction = 0.5
class RenameParam:
mapping = dict(image="data")
from core.detection_input import ReadRoiRecord, Resize2DImageBbox, \
ConvertImageFromHwcToChw, Flip2DImageBbox, Pad2DImageBbox, \
RenameRecord, Norm2DImage
from models.FPN.input import PyramidAnchorTarget2D
if is_train:
transform = [
ReadRoiRecord(None),
Norm2DImage(NormParam),
Resize2DImageBbox(ResizeParam),
Flip2DImageBbox(),
Pad2DImageBbox(PadParam),
ConvertImageFromHwcToChw(),
RenameRecord(RenameParam.mapping)
]
data_name = ["data"]
label_name = ["gt_bbox", "im_info"]
if not RpnParam.nnvm_rpn_target:
transform.append(PyramidAnchorTarget2D(AnchorTarget2DParam()))
label_name += ["rpn_cls_label", "rpn_reg_target", "rpn_reg_weight"]
else:
transform = [
ReadRoiRecord(None),
Norm2DImage(NormParam),
Resize2DImageBbox(ResizeParam),
ConvertImageFromHwcToChw(),
RenameRecord(RenameParam.mapping)
]
data_name = ["data", "im_info", "im_id", "rec_id"]
label_name = []
import core.detection_metric as metric
rpn_acc_metric = metric.AccWithIgnore(
"RpnAcc",
["rpn_cls_loss_output", "rpn_cls_label_blockgrad_output"],
[]
)
rpn_l1_metric = metric.L1(
"RpnL1",
["rpn_reg_loss_output", "rpn_cls_label_blockgrad_output"],
[]
)
# for bbox, the label is generated in network so it is an output
box_acc_metric = metric.AccWithIgnore(
"RcnnAcc",
["bbox_cls_loss_output", "bbox_label_blockgrad_output"],
[]
)
box_l1_metric = metric.L1(
"RcnnL1",
["bbox_reg_loss_output", "bbox_label_blockgrad_output"],
[]
)
metric_list = [rpn_acc_metric, rpn_l1_metric, box_acc_metric, box_l1_metric]
return General, KvstoreParam, RpnParam, RoiParam, BboxParam, DatasetParam, \
ModelParam, OptimizeParam, TestParam, \
transform, data_name, label_name, metric_list
| |
# -*- coding: utf-8 -*-
#
# Copyright 2015 MarkLogic Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0#
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# File History
# ------------
#
# Norman Walsh 05/13/2015 Hacked user.py into template.py
#
"""
Template related classes for manipulating Certificate Templates
"""
from __future__ import unicode_literals, print_function, absolute_import
import json
from marklogic.exceptions import UnexpectedManagementAPIResponse
from marklogic.utilities.validators import assert_type
from marklogic.utilities.validators import validate_custom
from marklogic.models.certificate.request import Request
class Template:
"""
The Template class encapsulates a MarkLogic representation of
a certificate template.
"""
def __init__(self, name, description, cert_request,
key_type="rsa", key_length=None, pass_phrase=None):
"""
Create a new certificate template.
"""
self._config = {
"template-name": name,
"template-description": description,
"req": assert_type(cert_request, Request)
}
if key_type is not None:
self._config['key-type'] = key_type
if key_length is not None or pass_phrase is not None:
options = {}
if key_length is not None:
options['key-length'] = key_length
if pass_phrase is not None:
optoins['pass-phrase'] = pass_phrase
self._config['options'] = options
self.etag = None
def template_id(self):
"""
The template ID, MarkLogic's internal identifier.
:return: The template ID.
"""
if 'template-id' in self._config:
return self._config['template-id']
return None
def template_name(self):
"""
The template name.
:return: The current template name.
"""
if 'template-name' in self._config:
return self._config['template-name']
return None
def set_template_name(self, value):
"""
Set the template name.
:param value: The new template name.
:return: The Template object.
"""
self._config['template-name'] = value
return self
def template_description(self):
"""
The template description.
:return: The current template description.
"""
if 'template-description' in self._config:
return self._config['template-description']
return None
def set_template_description(self, value):
"""
Set the template description.
:param value: The new template description.
:return: The Template object.
"""
self._config['template-description'] = value
return self
def template_version(self):
"""
The template version.
:return: The current template version.
"""
if 'template-version' in self._config:
return self._config['template-version']
return None
def key_type(self):
"""
The key type.
:return: The current key type.
"""
if 'key-type' in self._config:
return self._config['key-type']
return None
def set_key_type(self, value):
"""
Set the key type.
The key type must be `rsa`.
"""
if value is not 'rsa':
validate_custom("The key-type must be 'rsa'")
self._config['key-type'] = value
return self
def key_length(self):
"""
The key length.
:return: The current key length.
"""
if 'options' in self._config:
if 'key-length' in self._config['options']:
return self._config['options']['key-length']
return None
def set_key_length(self, value):
"""
Set the key length.
:param value: The new key length.
:return: The Template object.
"""
if 'options' in self._config:
options = self._config['options']
else:
options = {}
options['key-length'] = value
self._config['options'] = options
return self
def pass_phrase(self):
"""
The passphrase.
:return: The current passphrase.
"""
if 'options' in self._config:
if 'pass-phrase' in self._config['options']:
return self._config['options']['pass-phrase']
return None
def set_pass_phrase(self, value):
"""
Set the passphrase.
:param value: The new passphrase.
:return: The Template object.
"""
if 'options' in self._config:
options = self._config['options']
else:
options = {}
options['pass-phrase'] = value
self._config['options'] = options
return self
def options(self):
"""
The template options.
The options are returned as a Python dictionary. Only the `key-length`
and `pass-phrase` options are supported by MarkLogic at this time,
but this method returns the entire dictionary.
:return: The options dictionary.
"""
if 'options' in self._config:
return self._config['options']
return None
def set_options(self, value):
"""
Set the template options.
The options are stored in a Python dictionary. Only the `key-length`
and `pass-phrase` options are supported by MarkLogic at this time,
but this method allows you to set any dictionary of options you like.
:param value: A dictionary of options.
:return: The Template object.
"""
self._config['options'] = value
return self
def req(self):
"""
The certificate request.
:return: The current certificate request.
"""
if 'req' in self._config:
return self._config['req']
return None
def set_req(self, value):
"""
Set the certificate request.
:param value: The certificate request.
:return: The Template object.
"""
self._config['req'] = assert_type(value, Request)
return self
def marshal(self):
"""
Return a flat structure suitable for conversion to JSON or XML.
:return: A hash of the keys in this object and their values, recursively.
"""
struct = { }
for key in self._config:
if key == "req":
struct[key] = self._config[key]._config
else:
struct[key] = self._config[key];
return struct
@classmethod
def unmarshal(cls, config):
"""
Construct a new Template from a flat structure. This method is
principally used to construct an object from a Management API
payload. The configuration passed in is largely assumed to be
valid.
:param: config: A hash of properties
:return: A newly constructed User object with the specified properties.
"""
result = Template("temp","temp", Request(organizationName="temp"))
result._config = config
if 'req' in result._config:
result._config['req'] = Request.unmarshal(result._config['req'])
return result
def create(self, connection):
"""
Creates the certificate template on the MarkLogic server.
:param connection: The connection to a MarkLogic server
:return: The Template object
"""
uri = connection.uri("certificate-templates")
struct = self.marshal()
response = connection.post(uri, payload=struct)
# All well and good, but we need to know what ID was assigned
uri = "{0}://{1}:{2}{3}/properties" \
.format(connection.protocol, connection.host,
connection.management_port,
response.headers['location'])
response = connection.get(uri)
if response.status_code == 200:
result = Template.unmarshal(json.loads(response.text))
self._config = result._config
else:
raise UnexpectedManagementAPIResponse(response.text)
return self
def read(self, connection):
"""
Loads the Template from the MarkLogic server. This will refresh
the properties of the object.
:param connection: The connection to a MarkLogic server
:return: The Template object
"""
if self.template_id() is None:
validate_custom("Cannot read an unsaved template")
temp = Template.lookup(connection, self.template_id())
if auth is None:
return None
else:
self._config = auth._config
return self
def update(self, connection):
"""
Updates the certificate template on the MarkLogic server.
:param connection: The connection to a MarkLogic server
:return: The Template object
"""
uri = connection.uri("certificate-templates", self.template_id())
struct = self.marshal()
del struct['template-version']
del struct['template-id']
response = connection.put(uri, payload=struct)
return self
def delete(self, connection):
"""
Deletes the Template from the MarkLogic server.
:param connection: The connection to a MarkLogic server
:return: None
"""
uri = connection.uri("certificate-templates", self.template_id(),
properties=None)
response = connection.delete(uri, etag=self.etag)
return self
# ============================================================
def generate_template_certificate_authority(self, connection, valid_for):
"""
Attempts to generate an template certificate authority.
:param valid_for: The number of days that the template should be valid.
:return: The Template object.
"""
struct = {
'operation': 'generate-template-certificate-authority',
'valid-for': assert_type(valid_for, int)
}
uri = connection.uri("certificate-templates", self.template_id(),
properties=None)
response = connection.post(uri, payload=struct)
if response.status_code != 201:
raise UnexpectedManagementAPIResponse(response.text)
return self
def generate_temporary_certificate(self, connection, valid_for,
common_name, dns_name, ip_addr,
if_necessary=True):
"""
Attempts to generate a temporary certificate.
If `if_necessary` is true, the server will only generate a new
temporary certificate if it does not already have one for the
specified server.
:param valid_for: The number of days that the template should be valid.
:param common_name: The common name for the certificate ("Example Corp")
:param dns_name: The DNS name for the cert ("example.com")
:param ip_addr: The IP address of the server
:param if_necessary: Only generate the cert if it's necessary
:return: The Template object.
"""
struct = {
'operation': 'generate-temporary-certificate',
'valid-for': assert_type(valid_for, int),
'common-name': common_name,
'dns-name': dns_name,
'ip-addr': ip_addr,
'if-necessary': 'true' if if_necessary else 'false'
}
uri = connection.uri("certificate-templates", self.template_id(),
properties=None)
response = connection.post(uri, payload=struct)
if response.status_code != 201:
raise UnexpectedManagementAPIResponse(response.text)
return self
def get_certificate(self, connection,
common_name, dns_name=None, ip_addr=None):
"""
Attempts to get the relevant certificate.
:param common_name: The common name for the certificate ("Example Corp")
:param dns_name: The DNS name for the cert ("example.com")
:param ip_addr: The IP address of the server
:return: The certificate or None if it isn't found.
"""
struct = {
'operation': 'get-certificate',
'common-name': common_name
}
if dns_name is not None:
struct['dns-name'] = dns_name
if ip-addr is not None:
struct['ip-addr'] = ip_addr
uri = connection.uri("certificate-templates", self.template_id(),
properties=None)
response = connection.post(uri, payload=struct)
if response.status_code == 200:
return json.loads(response.text)
else:
return None
def get_certificates_for_template(self, connection):
"""
Get a list of the certificates for this template.
:return: The certificate list.
"""
struct = {
'operation': 'get-certificates-for-template',
}
uri = connection.uri("certificate-templates", self.template_id(),
properties=None)
response = connection.post(uri, payload=struct)
if response.status_code == 200:
return json.loads(response.text)
else:
return None
def get_pending_certificate_request(self, connection,
common_name, dns_name=None, ip_addr=None):
pass
def insert_host_certificates(self, connection, certs, pkeys):
pass
def need_certificate(self, connection,
common_name, dns_name=None, ip_addr=None):
pass
def generate_certificate_request(self, connection,
common_name, dns_name=None, ip_addr=None):
pass
def get_template_certificate_authority(self, connection):
pass
# ============================================================
@classmethod
def list(cls, connection, include_names=False):
"""
List all the certificate templates.
If `include_names` is `True`, then the values in the list will be
structured values consisting of the template ID and the template
name separated by a "|".
:param connection: The connection to a MarkLogic server
:param include_names: Indicates if structured names should be returned.
:return: A list of certificate template IDs.
"""
uri = connection.uri("certificate-templates")
response = connection.get(uri)
if response.status_code != 200:
raise UnexpectedManagementAPIResponse(response.text)
results = []
json_doc = json.loads(response.text)
for item in json_doc['certificate-templates-default-list']['list-items']['list-item']:
if include_names:
results.append("{0}|{1}".format(item['idref'], item['nameref']))
else:
results.append(item['idref'])
return results
@classmethod
def lookup(cls, connection, tempid):
"""
Look up an individual certificate template by template id.
:param connection: The connection to the MarkLogic database
:param tempid: The certificate template id
:return: The Template object
"""
uri = connection.uri("certificate-templates", tempid)
response = connection.get(uri)
if response.status_code == 200:
result = Template.unmarshal(json.loads(response.text))
return result
else:
return None
| |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Connects to a Cloud SQL instance."""
import datetime
from apitools.base.protorpclite import util as protorpc_util
from apitools.base.py import exceptions as apitools_exceptions
from googlecloudsdk.api_lib.sql import constants
from googlecloudsdk.api_lib.sql import network
from googlecloudsdk.api_lib.sql import operations
from googlecloudsdk.api_lib.sql import validate
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import execution_utils
from googlecloudsdk.core.util import files
from googlecloudsdk.core.util import retry
def _WhitelistClientIP(instance_ref, sql_client, sql_messages, resources):
"""Add CLIENT_IP to the authorized networks list.
Makes an API call to add CLIENT_IP to the authorized networks list.
The server knows to interpret the string CLIENT_IP as the address with which
the client reaches the server. This IP will be whitelisted for 1 minute.
Args:
instance_ref: resources.Resource, The instance we're connecting to.
sql_client: apitools.BaseApiClient, A working client for the sql version
to be used.
sql_messages: module, The module that defines the messages for the sql
version to be used.
resources: resources.Registry, The registry that can create resource refs
for the sql version to be used.
Returns:
string, The name of the authorized network rule. Callers can use this name
to find out the IP the client reached the server with.
Raises:
HttpException: An http error response was received while executing api
request.
ToolException: Server did not complete the whitelisting operation in time.
"""
datetime_now = datetime.datetime.now(
protorpc_util.TimeZoneOffset(datetime.timedelta(0)))
acl_name = 'sql connect at time {0}'.format(datetime_now)
user_acl = sql_messages.AclEntry(
name=acl_name,
expirationTime=datetime_now + datetime.timedelta(minutes=1),
value='CLIENT_IP')
try:
original = sql_client.instances.Get(
sql_messages.SqlInstancesGetRequest(
project=instance_ref.project,
instance=instance_ref.instance))
except apitools_exceptions.HttpError as error:
raise exceptions.HttpException(error)
original.settings.ipConfiguration.authorizedNetworks.append(user_acl)
patch_request = sql_messages.SqlInstancesPatchRequest(
databaseInstance=original,
project=instance_ref.project,
instance=instance_ref.instance)
result = sql_client.instances.Patch(patch_request)
operation_ref = resources.Create(
'sql.operations',
operation=result.name,
project=instance_ref.project,
instance=instance_ref.instance)
message = 'Whitelisting your IP for incoming connection for 1 minute'
operations.OperationsV1Beta4.WaitForOperation(
sql_client, operation_ref, message)
return acl_name
def _GetClientIP(instance_ref, sql_client, acl_name):
"""Retrieves given instance and extracts its client ip."""
instance_info = sql_client.instances.Get(
sql_client.MESSAGES_MODULE.SqlInstancesGetRequest(
project=instance_ref.project,
instance=instance_ref.instance))
networks = instance_info.settings.ipConfiguration.authorizedNetworks
client_ip = None
for net in networks:
if net.name == acl_name:
client_ip = net.value
break
return instance_info, client_ip
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class Connect(base.Command):
"""Connects to a Cloud SQL instance."""
detailed_help = {
'DESCRIPTION': '{description}',
'EXAMPLES': """\
To connect to a Cloud SQL instance, run:
$ {command} my-instance --user=root
""",
}
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Args:
parser: An argparse parser that you can use it to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
parser.add_argument(
'instance',
completion_resource='sql.instances',
help='Cloud SQL instance ID.')
parser.add_argument(
'--user', '-u',
required=False,
help='Cloud SQL instance user to connect as.')
def Run(self, args):
"""Connects to a Cloud SQL instance.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
Returns:
If no exception is raised this method does not return. A new process is
started and the original one is killed.
Raises:
HttpException: An http error response was received while executing api
request.
ToolException: An error other than http error occured while executing the
command.
"""
sql_client = self.context['sql_client']
sql_messages = self.context['sql_messages']
resources = self.context['registry']
validate.ValidateInstanceName(args.instance)
instance_ref = resources.Parse(args.instance, collection='sql.instances')
acl_name = _WhitelistClientIP(instance_ref, sql_client, sql_messages,
resources)
# Get the client IP that the server sees. Sadly we can only do this by
# checking the name of the authorized network rule.
retryer = retry.Retryer(max_retrials=2, exponential_sleep_multiplier=2)
try:
instance_info, client_ip = retryer.RetryOnResult(
_GetClientIP,
[instance_ref, sql_client, acl_name],
should_retry_if=lambda x, s: x[1] is None, # client_ip is None
sleep_ms=500)
except retry.RetryException:
raise exceptions.ToolException('Could not whitelist client IP. Server '
'did not reply with the whitelisted IP.')
# Check for the mysql or psql executable based on the db version.
db_type = instance_info.databaseVersion.split('_')[0]
exe_name = constants.DB_EXE.get(db_type, 'mysql')
exe = files.FindExecutableOnPath(exe_name)
if not exe:
raise exceptions.ToolException(
'{0} client not found. Please install a {1} client and make sure '
'it is in PATH to be able to connect to the database instance.'
.format(exe_name.title(), exe_name))
# Check the version of IP and decide if we need to add ipv4 support.
ip_type = network.GetIpVersion(client_ip)
if ip_type == network.IP_VERSION_4:
if instance_info.settings.ipConfiguration.ipv4Enabled:
ip_address = instance_info.ipAddresses[0].ipAddress
else:
# TODO(user): ask user if we should enable ipv4 addressing
message = ('It seems your client does not have ipv6 connectivity and '
'the database instance does not have an ipv4 address. '
'Please request an ipv4 address for this database instance.')
raise exceptions.ToolException(message)
elif ip_type == network.IP_VERSION_6:
ip_address = instance_info.ipv6Address
else:
raise exceptions.ToolException('Could not connect to SQL server.')
# We have everything we need, time to party!
flags = constants.EXE_FLAGS[exe_name]
sql_args = [exe_name, flags['hostname'], ip_address]
if args.user:
sql_args.extend([flags['user'], args.user])
sql_args.append(flags['password'])
execution_utils.Exec(sql_args)
| |
from __future__ import print_function
from ctypes import py_object
class Array():
"""
ctypes array
"""
def __init__(self, *args, **kwargs):
if 'size' in kwargs:
self.size = kwargs['size']
else:
self.size = len(args)
if len(args) > self.size:
raise OverflowError('Array size is too small')
self.array = (self.size * py_object)()
for i in range(len(args)):
self.array[i] = args[i]
for i in range(len(args), self.size):
self.array[i] = None
def __getitem__(self, index):
if index < 0 or index >= self.size:
raise IndexError('Array index out of bounds')
return self.array[index]
def __setitem__(self, index, value):
if index < 0 or index >= self.size:
raise IndexError('Array index out of bounds')
self.array[index] = value
class AVLNode():
def __init__(self, key, value):
self.key = key
self.value = value
self.left = None
self.right = None
self.height = 0
class AVLTree():
def __init__(self):
self.root = None
self.num_keys = 0
def insert(self, key, value):
"""
insert key value pair into avl tree
"""
self.root = self.__insert(key, value, self.root)
def remove(self, key):
self.root = self.__remove(key, self.root)
def search(self, key):
return self.__search(self.root, key)
def traverse(self):
if self.root:
self.__traverse(self.root)
print('')
else:
print('[]')
def __insert(self, key, value, node):
if not node:
node = AVLNode(key, value)
self.num_keys += 1
elif key < node.key:
node.left = self.__insert(key, value, node.left)
if self.balance(node) == 2:
# case 1: single rotation left
if key < node.key:
node = self.left_rotate(node)
# case 2: double rotation left
else:
node.left = self.right_rotate(node.left)
node = self.left_rotate(node)
elif key > node.key:
node.right = self.__insert(key, value, node.right)
if self.balance(node) == -2:
# case 4: single rotation right
if key > node.key:
node = self.right_rotate(node)
# case 3: double rotation right
else:
node.right = self.left_rotate(node.right)
node = self.right_rotate(node)
else:
return node
node.height = max(self.height(node.left), self.height(node.right)) + 1
return node
def balance(self, node):
"""
get difference in heights of sibling subtrees
"""
if not node:
return 0
return self.height(node.left) - self.height(node.right)
def height(self, node):
if not node:
return -1
return node.height
def left_rotate(self, node):
# to the left to the left
# everything you own in the box to the left
left = node.left
node.left = left.right
left.right = node
node.height = max(self.height(node.left), self.height(node.right)) + 1
left.height = max(self.height(left.left), self.height(left.right)) + 1
return left
def right_rotate(self, node):
right = node.right
node.right = right.left
right.left = node
node.height = max(self.height(node.left), self.height(node.right)) + 1
right.height = max(self.height(right.left), self.height(right.right)) + 1
return right
def __remove(self, key, node):
if not node:
return node
if key < node.key:
node.left = self.__remove(key, node.left)
elif key > node.key:
node.right = self.__remove(key, node.right)
# this is the node that will be removed
else:
# one child
if not node.left or not node.right:
if node.left:
child = node.left
else:
child = node.right
if not child:
child = node
node = None
else:
node.key = child.key
node.value = child.value
self.num_keys -= 1
# two children
else:
min = node.right
while min.left:
min = min.left
node.key = min.key
node.value = min.value
node.right = self.__remove(min.key, node.right)
if not node:
return node
node.height = max(self.height(node.left), self.height(node.right)) + 1
if self.balance(node) == 1:
# case 1: single rotation left
if self.balance(node.right) <= 0:
node = self.left_rotate(node)
# case 2: double rotation left
else:
node.left = self.right_rotate(node.left)
node = self.left_rotate(node)
if self.balance(node) == -1:
# case 4: single rotation right
if self.balance(node.left) >= 0:
node = self.right_rotate(node)
# case 3: double rotation right
else:
node.right = self.left_rotate(node.right)
node = self.right_rotate(node)
return node
def __search(self, node, key):
# none
if not node:
return None
# key is at current node
elif node.key == key:
node.value += 1
return node
# key is in left subtree
elif key < node.key:
return self.__search(node.left, key)
# key is in right subtree
else:
return self.__search(node.right, key)
def __traverse(self, node):
"""
rendering for http://mshang.ca/syntree/
"""
print('[', end='')
print('<{0},{1}>'.format(node.key, node.value), end='')
if node.left:
self.__traverse(node.left)
if node.right:
self.__traverse(node.right)
print(']', end='')
def find_max(self):
"""
get node with max value
"""
if self.root:
return self.__find_max(self.root)
return None
def __find_max(self, node):
if not node:
return node
left = self.__find_max(node.left)
right = self.__find_max(node.right)
# left is greater
if left and left.value > node.value:
return left
# right is greater
if right and right.value > node.value:
return right
# parent is greater
return node
def reset_values(self):
if self.root:
self.__reset_values(self.root)
def __reset_values(self, node):
if not node:
return
else:
node.value = 0
self.__reset_values(node.left)
self.__reset_values(node.right)
class AVLHashTable():
def __init__(self):
self.num_keys = 0
self.size = 2 ** 8
self.array = Array(size=self.size)
def insert(self, key, value):
hash = self.__hash(key)
if not self.array[hash]:
self.array[hash] = AVLTree()
self.array[hash].insert(key, value)
self.num_keys += 1
def remove(self, key):
hash = self.__hash(key)
if self.array[hash]:
self.array[hash].remove(key)
self.num_keys -= 1
if self.array[hash].num_keys == 0:
self.array[hash] = None
def search(self, key):
hash = self.__hash(key)
if self.array[hash]:
return self.array[hash].search(key)
return None
def traverse(self):
pass
def __hash(self, key):
hash = 0
for c in key:
hash = (hash * 29) + ord(c)
return hash % self.size
def find_max(self):
node = None
start = 0
for i in range(self.size):
if self.array[i]:
start = i
node = self.array[i].find_max()
break
for i in range(start + 1, self.size):
if self.array[i]:
temp = self.array[i].find_max()
if temp and temp.value > node.value:
node = temp
return node
def reset_values(self):
for i in range(self.size):
if self.array[i]:
self.array[i].reset_values()
class HashTable():
def __init__(self):
self.num_keys = 0
self.size = 2 ** 16
self.array = Array(size=self.size)
def insert(self, key, value):
hash = self.__hash(key)
if not self.array[hash]:
self.array[hash] = AVLHashTable()
self.array[hash].insert(key, value)
self.num_keys += 1
def remove(self, key):
hash = self.__hash(key)
if self.array[hash]:
self.array[hash].remove(key)
self.num_keys -= 1
if self.array[hash].num_keys == 0:
self.array[hash] = None
def search(self, key):
hash = self.__hash(key)
if self.array[hash]:
return self.array[hash].search(key)
return None
def traverse(self):
pass
def __hash(self, key):
hash = 0
for c in key:
hash = (hash * 47) + ord(c)
return hash % self.size
def find_max(self):
node = None
start = 0
for i in range(self.size):
if self.array[i]:
start = i
node = self.array[i].find_max()
break
for i in range(start + 1, self.size):
if self.array[i]:
temp = self.array[i].find_max()
if temp and temp.value > node.value:
node = temp
return node
def reset_values(self):
"""
reset all values to 0
"""
for i in range(self.size):
if self.array[i]:
self.array[i].reset_values()
| |
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 OpenStack Foundation
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from importlib import import_module
import six
from six import moves
import django
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured
from django.core import urlresolvers
import horizon
from horizon import base
from horizon import conf
from horizon.test import helpers as test
from horizon.test.test_dashboards.cats.dashboard import Cats
from horizon.test.test_dashboards.cats.kittens.panel import Kittens
from horizon.test.test_dashboards.cats.tigers.panel import Tigers
from horizon.test.test_dashboards.dogs.dashboard import Dogs
from horizon.test.test_dashboards.dogs.puppies.panel import Puppies
class MyDash(horizon.Dashboard):
name = "My Dashboard"
slug = "mydash"
default_panel = "myslug"
class MyPanel(horizon.Panel):
name = "My Panel"
slug = "myslug"
urls = 'horizon.test.test_dashboards.cats.kittens.urls'
class AdminPanel(horizon.Panel):
name = "Admin Panel"
slug = "admin_panel"
permissions = ("horizon.test",)
urls = 'horizon.test.test_dashboards.cats.kittens.urls'
class RbacNoAccessPanel(horizon.Panel):
name = "RBAC Panel No"
slug = "rbac_panel_no"
def allowed(self, context):
return False
class RbacYesAccessPanel(horizon.Panel):
name = "RBAC Panel Yes"
slug = "rbac_panel_yes"
class BaseHorizonTests(test.TestCase):
def setUp(self):
super(BaseHorizonTests, self).setUp()
# Adjust our horizon config and register our custom dashboards/panels.
self.old_default_dash = settings.HORIZON_CONFIG['default_dashboard']
settings.HORIZON_CONFIG['default_dashboard'] = 'cats'
self.old_dashboards = settings.HORIZON_CONFIG['dashboards']
settings.HORIZON_CONFIG['dashboards'] = ('cats', 'dogs')
base.Horizon.register(Cats)
base.Horizon.register(Dogs)
Cats.register(Kittens)
Cats.register(Tigers)
Dogs.register(Puppies)
# Trigger discovery, registration, and URLconf generation if it
# hasn't happened yet.
base.Horizon._urls()
# Store our original dashboards
self._discovered_dashboards = list(base.Horizon._registry)
# Gather up and store our original panels for each dashboard
self._discovered_panels = {}
for dash in self._discovered_dashboards:
panels = list(base.Horizon._registry[dash]._registry)
self._discovered_panels[dash] = panels
def tearDown(self):
super(BaseHorizonTests, self).tearDown()
# Restore our settings
settings.HORIZON_CONFIG['default_dashboard'] = self.old_default_dash
settings.HORIZON_CONFIG['dashboards'] = self.old_dashboards
# Destroy our singleton and re-create it.
base.HorizonSite._instance = None
del base.Horizon
base.Horizon = base.HorizonSite()
# Reload the convenience references to Horizon stored in __init__
moves.reload_module(import_module("horizon"))
# Re-register our original dashboards and panels.
# This is necessary because autodiscovery only works on the first
# import, and calling reload introduces innumerable additional
# problems. Manual re-registration is the only good way for testing.
self._discovered_dashboards.remove(Cats)
self._discovered_dashboards.remove(Dogs)
for dash in self._discovered_dashboards:
base.Horizon.register(dash)
for panel in self._discovered_panels[dash]:
dash.register(panel)
def _reload_urls(self):
"""Clears out the URL caches, and reloads the root urls module.
It re-triggers the autodiscovery mechanism for Horizon.
Allows URLs to be re-calculated after registering new dashboards.
Useful only for testing and should never be used on a live site.
"""
urlresolvers.clear_url_caches()
moves.reload_module(import_module(settings.ROOT_URLCONF))
base.Horizon._urls()
class HorizonTests(BaseHorizonTests):
def test_registry(self):
"""Verify registration and autodiscovery work correctly.
Please note that this implicitly tests that autodiscovery works
by virtue of the fact that the dashboards listed in
``settings.INSTALLED_APPS`` are loaded from the start.
"""
# Registration
self.assertEqual(2, len(base.Horizon._registry))
horizon.register(MyDash)
self.assertEqual(3, len(base.Horizon._registry))
with self.assertRaises(ValueError):
horizon.register(MyPanel)
with self.assertRaises(ValueError):
horizon.register("MyPanel")
# Retrieval
my_dash_instance_by_name = horizon.get_dashboard("mydash")
self.assertIsInstance(my_dash_instance_by_name, MyDash)
my_dash_instance_by_class = horizon.get_dashboard(MyDash)
self.assertEqual(my_dash_instance_by_name, my_dash_instance_by_class)
with self.assertRaises(base.NotRegistered):
horizon.get_dashboard("fake")
self.assertQuerysetEqual(horizon.get_dashboards(),
['<Dashboard: cats>',
'<Dashboard: dogs>',
'<Dashboard: mydash>'])
# Removal
self.assertEqual(3, len(base.Horizon._registry))
horizon.unregister(MyDash)
self.assertEqual(2, len(base.Horizon._registry))
with self.assertRaises(base.NotRegistered):
horizon.get_dashboard(MyDash)
def test_site(self):
self.assertEqual("Horizon", six.text_type(base.Horizon))
self.assertEqual("<Site: horizon>", repr(base.Horizon))
dash = base.Horizon.get_dashboard('cats')
self.assertEqual(dash, base.Horizon.get_default_dashboard())
test_user = User()
self.assertEqual(dash.get_absolute_url(),
base.Horizon.get_user_home(test_user))
def test_dashboard(self):
cats = horizon.get_dashboard("cats")
self.assertEqual(base.Horizon, cats._registered_with)
self.assertQuerysetEqual(cats.get_panels(),
['<Panel: kittens>',
'<Panel: tigers>'])
self.assertEqual("/cats/", cats.get_absolute_url())
self.assertEqual("Cats", cats.name)
# Test registering a module with a dashboard that defines panels
# as a panel group.
cats.register(MyPanel)
self.assertQuerysetEqual(cats.get_panel_groups()['other'],
['<Panel: myslug>'])
# Test that panels defined as a tuple still return a PanelGroup
dogs = horizon.get_dashboard("dogs")
self.assertQuerysetEqual(dogs.get_panel_groups().values(),
['<PanelGroup: default>'])
# Test registering a module with a dashboard that defines panels
# as a tuple.
dogs = horizon.get_dashboard("dogs")
dogs.register(MyPanel)
self.assertQuerysetEqual(dogs.get_panels(),
['<Panel: puppies>',
'<Panel: myslug>'])
def test_panels(self):
cats = horizon.get_dashboard("cats")
tigers = cats.get_panel("tigers")
self.assertEqual(cats, tigers._registered_with)
self.assertEqual("/cats/tigers/", tigers.get_absolute_url())
def test_panel_without_slug_fails(self):
class InvalidPanel(horizon.Panel):
name = 'Invalid'
self.assertRaises(ImproperlyConfigured, InvalidPanel)
def test_registry_without_registerable_class_attr_fails(self):
class InvalidRegistry(base.Registry):
pass
self.assertRaises(ImproperlyConfigured, InvalidRegistry)
def test_index_url_name(self):
cats = horizon.get_dashboard("cats")
tigers = cats.get_panel("tigers")
tigers.index_url_name = "does_not_exist"
with self.assertRaises(urlresolvers.NoReverseMatch):
tigers.get_absolute_url()
tigers.index_url_name = "index"
self.assertEqual("/cats/tigers/", tigers.get_absolute_url())
def test_lazy_urls(self):
urlpatterns = horizon.urls[0]
self.assertIsInstance(urlpatterns, base.LazyURLPattern)
# The following two methods simply should not raise any exceptions
iter(urlpatterns)
reversed(urlpatterns)
def test_horizon_test_isolation_1(self):
"""Isolation Test Part 1: sets a value."""
cats = horizon.get_dashboard("cats")
cats.evil = True
def test_horizon_test_isolation_2(self):
"""Isolation Test Part 2: The value set in part 1 should be gone."""
cats = horizon.get_dashboard("cats")
self.assertFalse(hasattr(cats, "evil"))
def test_public(self):
dogs = horizon.get_dashboard("dogs")
# Known to have no restrictions on it other than being logged in.
puppies = dogs.get_panel("puppies")
url = puppies.get_absolute_url()
# Get a clean, logged out client instance.
self.client.logout()
resp = self.client.get(url)
redirect_url = "?".join(['http://testserver' + settings.LOGIN_URL,
"next=%s" % url])
self.assertRedirects(resp, redirect_url)
# Simulate ajax call
resp = self.client.get(url, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Response should be HTTP 401 with redirect header
self.assertEqual(401, resp.status_code)
self.assertEqual(redirect_url,
resp["X-Horizon-Location"])
def test_required_permissions(self):
dash = horizon.get_dashboard("cats")
panel = dash.get_panel('tigers')
# Non-admin user
self.assertQuerysetEqual(self.user.get_all_permissions(), [])
resp = self.client.get(panel.get_absolute_url())
self.assertEqual(403, resp.status_code)
resp = self.client.get(panel.get_absolute_url(),
follow=False,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(403, resp.status_code)
# Test insufficient permissions for logged-in user
resp = self.client.get(panel.get_absolute_url(), follow=True)
self.assertEqual(403, resp.status_code)
self.assertTemplateUsed(resp, "not_authorized.html")
# Set roles for admin user
self.set_permissions(permissions=['test'])
resp = self.client.get(panel.get_absolute_url())
self.assertEqual(200, resp.status_code)
# Test modal form
resp = self.client.get(panel.get_absolute_url(),
follow=False,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(200, resp.status_code)
def test_ssl_redirect_by_proxy(self):
dogs = horizon.get_dashboard("dogs")
puppies = dogs.get_panel("puppies")
url = puppies.get_absolute_url()
redirect_url = "?".join([settings.LOGIN_URL,
"next=%s" % url])
self.client.logout()
resp = self.client.get(url)
if django.VERSION >= (1, 9):
self.assertRedirects(resp, settings.TESTSERVER + redirect_url)
else:
self.assertRedirects(resp, redirect_url)
# Set SSL settings for test server
settings.SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL',
'https')
resp = self.client.get(url, HTTP_X_FORWARDED_PROTOCOL="https")
self.assertEqual(302, resp.status_code)
self.assertEqual('https://testserver:80%s' % redirect_url,
resp['location'])
# Restore settings
settings.SECURE_PROXY_SSL_HEADER = None
class GetUserHomeTests(BaseHorizonTests):
"""Test get_user_home parameters."""
def setUp(self):
self.orig_user_home = settings.HORIZON_CONFIG['user_home']
super(BaseHorizonTests, self).setUp()
self.original_username = "testname"
self.test_user = User()
self.test_user.username = self.original_username
def tearDown(self):
settings.HORIZON_CONFIG['user_home'] = self.orig_user_home
conf.HORIZON_CONFIG._setup()
def test_using_callable(self):
def themable_user_fnc(user):
return user.username.upper()
settings.HORIZON_CONFIG['user_home'] = themable_user_fnc
conf.HORIZON_CONFIG._setup()
self.assertEqual(self.test_user.username.upper(),
base.Horizon.get_user_home(self.test_user))
def test_using_module_function(self):
module_func = 'django.utils.encoding.force_text'
settings.HORIZON_CONFIG['user_home'] = module_func
conf.HORIZON_CONFIG._setup()
self.test_user.username = 'testname'
self.assertEqual(self.original_username,
base.Horizon.get_user_home(self.test_user))
def test_using_url(self):
fixed_url = "/url"
settings.HORIZON_CONFIG['user_home'] = fixed_url
conf.HORIZON_CONFIG._setup()
self.assertEqual(fixed_url,
base.Horizon.get_user_home(self.test_user))
class CustomPanelTests(BaseHorizonTests):
"""Test customization of dashboards and panels.
This tests customization using 'customization_module' to HORIZON_CONFIG.
"""
def setUp(self):
super(CustomPanelTests, self).setUp()
settings.HORIZON_CONFIG['customization_module'] = \
'horizon.test.customization.cust_test1'
# refresh config
conf.HORIZON_CONFIG._setup()
self._reload_urls()
def tearDown(self):
# Restore dash
cats = horizon.get_dashboard("cats")
cats.name = "Cats"
horizon.register(Dogs)
self._discovered_dashboards.append(Dogs)
Dogs.register(Puppies)
Cats.register(Tigers)
super(CustomPanelTests, self).tearDown()
settings.HORIZON_CONFIG.pop('customization_module')
# refresh config
conf.HORIZON_CONFIG._setup()
def test_customize_dashboard(self):
cats = horizon.get_dashboard("cats")
self.assertEqual("WildCats", cats.name)
self.assertQuerysetEqual(cats.get_panels(),
['<Panel: kittens>'])
with self.assertRaises(base.NotRegistered):
horizon.get_dashboard("dogs")
class CustomPermissionsTests(BaseHorizonTests):
"""Test customization of permissions on panels.
This tests customization using 'customization_module' to HORIZON_CONFIG.
"""
def setUp(self):
settings.HORIZON_CONFIG['customization_module'] = \
'horizon.test.customization.cust_test2'
# refresh config
conf.HORIZON_CONFIG._setup()
super(CustomPermissionsTests, self).setUp()
def tearDown(self):
# Restore permissions
dogs = horizon.get_dashboard("dogs")
puppies = dogs.get_panel("puppies")
puppies.permissions = tuple([])
super(CustomPermissionsTests, self).tearDown()
settings.HORIZON_CONFIG.pop('customization_module')
# refresh config
conf.HORIZON_CONFIG._setup()
def test_customized_permissions(self):
dogs = horizon.get_dashboard("dogs")
panel = dogs.get_panel('puppies')
# Non-admin user
self.assertQuerysetEqual(self.user.get_all_permissions(), [])
resp = self.client.get(panel.get_absolute_url())
self.assertEqual(403, resp.status_code)
resp = self.client.get(panel.get_absolute_url(),
follow=False,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(403, resp.status_code)
# Test customized permissions for logged-in user
resp = self.client.get(panel.get_absolute_url(), follow=True)
self.assertEqual(403, resp.status_code)
self.assertTemplateUsed(resp, "not_authorized.html")
# Set roles for admin user
self.set_permissions(permissions=['test'])
resp = self.client.get(panel.get_absolute_url())
self.assertEqual(200, resp.status_code)
# Test modal form
resp = self.client.get(panel.get_absolute_url(),
follow=False,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(resp.status_code, 200)
class RbacHorizonTests(test.TestCase):
def setUp(self):
super(RbacHorizonTests, self).setUp()
# Adjust our horizon config and register our custom dashboards/panels.
self.old_default_dash = settings.HORIZON_CONFIG['default_dashboard']
settings.HORIZON_CONFIG['default_dashboard'] = 'cats'
self.old_dashboards = settings.HORIZON_CONFIG['dashboards']
settings.HORIZON_CONFIG['dashboards'] = ('cats', 'dogs')
base.Horizon.register(Cats)
base.Horizon.register(Dogs)
Cats.register(RbacNoAccessPanel)
Cats.default_panel = 'rbac_panel_no'
Dogs.register(RbacYesAccessPanel)
Dogs.default_panel = 'rbac_panel_yes'
# Trigger discovery, registration, and URLconf generation if it
# hasn't happened yet.
base.Horizon._urls()
# Store our original dashboards
self._discovered_dashboards = list(base.Horizon._registry)
# Gather up and store our original panels for each dashboard
self._discovered_panels = {}
for dash in self._discovered_dashboards:
panels = list(base.Horizon._registry[dash]._registry)
self._discovered_panels[dash] = panels
def tearDown(self):
super(RbacHorizonTests, self).tearDown()
# Restore our settings
settings.HORIZON_CONFIG['default_dashboard'] = self.old_default_dash
settings.HORIZON_CONFIG['dashboards'] = self.old_dashboards
# Destroy our singleton and re-create it.
base.HorizonSite._instance = None
del base.Horizon
base.Horizon = base.HorizonSite()
# Reload the convenience references to Horizon stored in __init__
moves.reload_module(import_module("horizon"))
# Reset Cats and Dogs default_panel to default values
Cats.default_panel = 'kittens'
Dogs.default_panel = 'puppies'
# Re-register our original dashboards and panels.
# This is necessary because autodiscovery only works on the first
# import, and calling reload introduces innumerable additional
# problems. Manual re-registration is the only good way for testing.
self._discovered_dashboards.remove(Cats)
self._discovered_dashboards.remove(Dogs)
for dash in self._discovered_dashboards:
base.Horizon.register(dash)
for panel in self._discovered_panels[dash]:
dash.register(panel)
def test_rbac_panels(self):
context = {'request': self.request}
cats = horizon.get_dashboard("cats")
self.assertEqual(cats._registered_with, base.Horizon)
self.assertQuerysetEqual(cats.get_panels(),
['<Panel: rbac_panel_no>'])
self.assertFalse(cats.can_access(context))
dogs = horizon.get_dashboard("dogs")
self.assertEqual(dogs._registered_with, base.Horizon)
self.assertQuerysetEqual(dogs.get_panels(),
['<Panel: rbac_panel_yes>'])
self.assertTrue(dogs.can_access(context))
| |
# Copyright (C) 2014 VA Linux Systems Japan K.K.
# Copyright (C) 2014 Fumihiko Kakuma <kakuma at valinux co jp>
# Copyright (C) 2014 YAMAMOTO Takashi <yamamoto at valinux co jp>
# All Rights Reserved.
#
# Based on test for openvswitch agent(test_ovs_neutron_agent.py).
#
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import contextlib
import copy
import mock
import netaddr
from oslo.config import cfg
import testtools
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import constants as n_const
from neutron.openstack.common import importutils
from neutron.plugins.common import constants as p_const
from neutron.tests.unit.ofagent import ofa_test_base
NOTIFIER = ('neutron.plugins.ml2.rpc.AgentNotifierApi')
def _mock_port(is_neutron=True, normalized_name=None):
p = mock.Mock()
p.is_neutron_port.return_value = is_neutron
if normalized_name:
p.normalized_port_name.return_value = normalized_name
return p
class CreateAgentConfigMap(ofa_test_base.OFAAgentTestBase):
def test_create_agent_config_map_succeeds(self):
self.assertTrue(self.mod_agent.create_agent_config_map(cfg.CONF))
def test_create_agent_config_map_fails_for_invalid_tunnel_config(self):
# An ip address is required for tunneling but there is no default,
# verify this for both gre and vxlan tunnels.
cfg.CONF.set_override('tunnel_types', [p_const.TYPE_GRE],
group='AGENT')
with testtools.ExpectedException(ValueError):
self.mod_agent.create_agent_config_map(cfg.CONF)
cfg.CONF.set_override('tunnel_types', [p_const.TYPE_VXLAN],
group='AGENT')
with testtools.ExpectedException(ValueError):
self.mod_agent.create_agent_config_map(cfg.CONF)
def test_create_agent_config_map_enable_tunneling(self):
# Verify setting only enable_tunneling will default tunnel_type to GRE
cfg.CONF.set_override('tunnel_types', None, group='AGENT')
cfg.CONF.set_override('enable_tunneling', True, group='OVS')
cfg.CONF.set_override('local_ip', '10.10.10.10', group='OVS')
cfgmap = self.mod_agent.create_agent_config_map(cfg.CONF)
self.assertEqual(cfgmap['tunnel_types'], [p_const.TYPE_GRE])
def test_create_agent_config_map_fails_no_local_ip(self):
# An ip address is required for tunneling but there is no default
cfg.CONF.set_override('enable_tunneling', True, group='OVS')
with testtools.ExpectedException(ValueError):
self.mod_agent.create_agent_config_map(cfg.CONF)
def test_create_agent_config_map_fails_for_invalid_tunnel_type(self):
cfg.CONF.set_override('tunnel_types', ['foobar'], group='AGENT')
with testtools.ExpectedException(ValueError):
self.mod_agent.create_agent_config_map(cfg.CONF)
def test_create_agent_config_map_multiple_tunnel_types(self):
cfg.CONF.set_override('local_ip', '10.10.10.10', group='OVS')
cfg.CONF.set_override('tunnel_types', [p_const.TYPE_GRE,
p_const.TYPE_VXLAN], group='AGENT')
cfgmap = self.mod_agent.create_agent_config_map(cfg.CONF)
self.assertEqual(cfgmap['tunnel_types'],
[p_const.TYPE_GRE, p_const.TYPE_VXLAN])
class TestOFANeutronAgentBridge(ofa_test_base.OFAAgentTestBase):
def setUp(self):
super(TestOFANeutronAgentBridge, self).setUp()
self.br_name = 'bridge1'
self.root_helper = 'fake_helper'
self.ovs = self.mod_agent.Bridge(
self.br_name, self.root_helper, self.ryuapp)
def test_find_datapath_id(self):
with mock.patch.object(self.ovs, 'get_datapath_id',
return_value='12345'):
self.ovs.find_datapath_id()
self.assertEqual(self.ovs.datapath_id, '12345')
def _fake_get_datapath(self, app, datapath_id):
if self.ovs.retry_count >= 2:
datapath = mock.Mock()
datapath.ofproto_parser = mock.Mock()
return datapath
self.ovs.retry_count += 1
return None
def test_get_datapath_normal(self):
self.ovs.retry_count = 0
with mock.patch.object(self.mod_agent.ryu_api, 'get_datapath',
new=self._fake_get_datapath):
self.ovs.datapath_id = '0x64'
self.ovs.get_datapath(retry_max=4)
self.assertEqual(self.ovs.retry_count, 2)
def test_get_datapath_retry_out_by_default_time(self):
cfg.CONF.set_override('get_datapath_retry_times', 3, group='AGENT')
with mock.patch.object(self.mod_agent.ryu_api, 'get_datapath',
return_value=None) as mock_get_datapath:
with testtools.ExpectedException(SystemExit):
self.ovs.datapath_id = '0x64'
self.ovs.get_datapath(retry_max=3)
self.assertEqual(mock_get_datapath.call_count, 3)
def test_get_datapath_retry_out_by_specified_time(self):
with mock.patch.object(self.mod_agent.ryu_api, 'get_datapath',
return_value=None) as mock_get_datapath:
with testtools.ExpectedException(SystemExit):
self.ovs.datapath_id = '0x64'
self.ovs.get_datapath(retry_max=2)
self.assertEqual(mock_get_datapath.call_count, 2)
def test_setup_ofp_default_par(self):
with contextlib.nested(
mock.patch.object(self.ovs, 'set_protocols'),
mock.patch.object(self.ovs, 'set_controller'),
mock.patch.object(self.ovs, 'find_datapath_id'),
mock.patch.object(self.ovs, 'get_datapath'),
) as (mock_set_protocols, mock_set_controller,
mock_find_datapath_id, mock_get_datapath):
self.ovs.setup_ofp()
mock_set_protocols.assert_called_with('OpenFlow13')
mock_set_controller.assert_called_with(['tcp:127.0.0.1:6633'])
mock_get_datapath.assert_called_with(
cfg.CONF.AGENT.get_datapath_retry_times)
self.assertEqual(mock_find_datapath_id.call_count, 1)
def test_setup_ofp_specify_par(self):
controller_names = ['tcp:192.168.10.10:1234', 'tcp:172.17.16.20:5555']
with contextlib.nested(
mock.patch.object(self.ovs, 'set_protocols'),
mock.patch.object(self.ovs, 'set_controller'),
mock.patch.object(self.ovs, 'find_datapath_id'),
mock.patch.object(self.ovs, 'get_datapath'),
) as (mock_set_protocols, mock_set_controller,
mock_find_datapath_id, mock_get_datapath):
self.ovs.setup_ofp(controller_names=controller_names,
protocols='OpenFlow133',
retry_max=11)
mock_set_protocols.assert_called_with('OpenFlow133')
mock_set_controller.assert_called_with(controller_names)
mock_get_datapath.assert_called_with(11)
self.assertEqual(mock_find_datapath_id.call_count, 1)
def test_setup_ofp_with_except(self):
with contextlib.nested(
mock.patch.object(self.ovs, 'set_protocols',
side_effect=RuntimeError),
mock.patch.object(self.ovs, 'set_controller'),
mock.patch.object(self.ovs, 'find_datapath_id'),
mock.patch.object(self.ovs, 'get_datapath'),
) as (mock_set_protocols, mock_set_controller,
mock_find_datapath_id, mock_get_datapath):
with testtools.ExpectedException(SystemExit):
self.ovs.setup_ofp()
class TestOFANeutronAgent(ofa_test_base.OFAAgentTestBase):
def setUp(self):
super(TestOFANeutronAgent, self).setUp()
notifier_p = mock.patch(NOTIFIER)
notifier_cls = notifier_p.start()
self.notifier = mock.Mock()
notifier_cls.return_value = self.notifier
kwargs = self.mod_agent.create_agent_config_map(cfg.CONF)
class MockFixedIntervalLoopingCall(object):
def __init__(self, f):
self.f = f
def start(self, interval=0):
self.f()
with contextlib.nested(
mock.patch.object(self.mod_agent.OFANeutronAgent,
'setup_integration_br',
return_value=mock.Mock()),
mock.patch.object(self.mod_agent.Bridge,
'get_local_port_mac',
return_value='00:00:00:00:00:01'),
mock.patch('neutron.agent.linux.utils.get_interface_mac',
return_value='00:00:00:00:00:01'),
mock.patch('neutron.openstack.common.loopingcall.'
'FixedIntervalLoopingCall',
new=MockFixedIntervalLoopingCall)):
self.agent = self.mod_agent.OFANeutronAgent(self.ryuapp, **kwargs)
self.agent.sg_agent = mock.Mock()
self.int_dp = self._mk_test_dp('int_br')
self.agent.int_br = self._mk_test_br('int_br')
self.agent.int_br.set_dp(self.int_dp)
self.agent.phys_brs['phys-net1'] = self._mk_test_br('phys_br1')
self.agent.phys_ofports['phys-net1'] = 777
self.agent.int_ofports['phys-net1'] = 666
self.datapath = self._mk_test_dp('phys_br')
def _create_tunnel_port_name(self, tunnel_ip, tunnel_type):
tunnel_ip_hex = '%08x' % netaddr.IPAddress(tunnel_ip, version=4)
return '%s-%s' % (tunnel_type, tunnel_ip_hex)
def mock_scan_ports(self, port_set=None, registered_ports=None,
updated_ports=None, port_tags_dict=None):
port_tags_dict = port_tags_dict or {}
with contextlib.nested(
mock.patch.object(self.agent, '_get_ofport_names',
return_value=port_set),
mock.patch.object(self.agent.int_br, 'get_port_tag_dict',
return_value=port_tags_dict)
):
return self.agent.scan_ports(registered_ports, updated_ports)
def test_scan_ports_returns_current_only_for_unchanged_ports(self):
vif_port_set = set([1, 3])
registered_ports = set([1, 3])
expected = {'current': vif_port_set}
actual = self.mock_scan_ports(vif_port_set, registered_ports)
self.assertEqual(expected, actual)
def test_scan_ports_returns_port_changes(self):
vif_port_set = set([1, 3])
registered_ports = set([1, 2])
expected = dict(current=vif_port_set, added=set([3]), removed=set([2]))
actual = self.mock_scan_ports(vif_port_set, registered_ports)
self.assertEqual(expected, actual)
def _test_scan_ports_with_updated_ports(self, updated_ports):
vif_port_set = set([1, 3, 4])
registered_ports = set([1, 2, 4])
expected = dict(current=vif_port_set, added=set([3]),
removed=set([2]), updated=set([4]))
actual = self.mock_scan_ports(vif_port_set, registered_ports,
updated_ports)
self.assertEqual(expected, actual)
def test_scan_ports_finds_known_updated_ports(self):
self._test_scan_ports_with_updated_ports(set([4]))
def test_scan_ports_ignores_unknown_updated_ports(self):
# the port '5' was not seen on current ports. Hence it has either
# never been wired or already removed and should be ignored
self._test_scan_ports_with_updated_ports(set([4, 5]))
def test_scan_ports_ignores_updated_port_if_removed(self):
vif_port_set = set([1, 3])
registered_ports = set([1, 2])
updated_ports = set([1, 2])
expected = dict(current=vif_port_set, added=set([3]),
removed=set([2]), updated=set([1]))
actual = self.mock_scan_ports(vif_port_set, registered_ports,
updated_ports)
self.assertEqual(expected, actual)
def test_scan_ports_no_vif_changes_returns_updated_port_only(self):
vif_port_set = set([1, 2, 3])
registered_ports = set([1, 2, 3])
updated_ports = set([2])
expected = dict(current=vif_port_set, updated=set([2]))
actual = self.mock_scan_ports(vif_port_set, registered_ports,
updated_ports)
self.assertEqual(expected, actual)
def test_treat_devices_added_returns_true_for_missing_device(self):
with contextlib.nested(
mock.patch.object(self.agent.plugin_rpc, 'get_device_details',
side_effect=Exception()),
mock.patch.object(self.agent, '_get_ports',
return_value=[_mock_port(True, 'xxx')])):
self.assertTrue(self.agent.treat_devices_added_or_updated(['xxx']))
def _mock_treat_devices_added_updated(self, details, port, all_ports,
func_name):
"""Mock treat devices added or updated.
:param details: the details to return for the device
:param port: port name to process
:param all_ports: the port that _get_ports return
:param func_name: the function that should be called
:returns: whether the named function was called
"""
with contextlib.nested(
mock.patch.object(self.agent.plugin_rpc, 'get_device_details',
return_value=details),
mock.patch.object(self.agent, '_get_ports',
return_value=all_ports),
mock.patch.object(self.agent.plugin_rpc, 'update_device_up'),
mock.patch.object(self.agent.plugin_rpc, 'update_device_down'),
mock.patch.object(self.agent, func_name)
) as (get_dev_fn, _get_ports, upd_dev_up, upd_dev_down, func):
self.assertFalse(self.agent.treat_devices_added_or_updated([port]))
_get_ports.assert_called_once_with(self.agent.int_br)
return func.called
def test_treat_devices_added_updated_ignores_invalid_ofport(self):
port_name = 'hoge'
p1 = _mock_port(True, port_name)
p1.ofport = -1
self.assertFalse(self._mock_treat_devices_added_updated(
mock.MagicMock(), port_name, [p1], 'port_dead'))
def test_treat_devices_added_updated_marks_unknown_port_as_dead(self):
port_name = 'hoge'
p1 = _mock_port(True, port_name)
p1.ofport = 1
self.assertTrue(self._mock_treat_devices_added_updated(
mock.MagicMock(), port_name, [p1], 'port_dead'))
def test_treat_devices_added_does_not_process_missing_port(self):
with contextlib.nested(
mock.patch.object(self.agent.plugin_rpc, 'get_device_details'),
mock.patch.object(self.agent.int_br, 'get_vif_port_by_id',
return_value=None)
) as (get_dev_fn, get_vif_func):
self.assertFalse(get_dev_fn.called)
def test_treat_devices_added_updated_updates_known_port(self):
port_name = 'tapd3315981-0b'
p1 = _mock_port(False)
p2 = _mock_port(True, port_name)
ports = [p1, p2]
details = mock.MagicMock()
details.__contains__.side_effect = lambda x: True
self.assertTrue(self._mock_treat_devices_added_updated(
details, port_name, ports, 'treat_vif_port'))
def test_treat_devices_added_updated_put_port_down(self):
fake_details_dict = {'admin_state_up': False,
'port_id': 'xxx',
'device': 'xxx',
'network_id': 'yyy',
'physical_network': 'foo',
'segmentation_id': 'bar',
'network_type': 'baz'}
with contextlib.nested(
mock.patch.object(self.agent.plugin_rpc, 'get_device_details',
return_value=fake_details_dict),
mock.patch.object(self.agent, '_get_ports',
return_value=[_mock_port(True, 'xxx')]),
mock.patch.object(self.agent.plugin_rpc, 'update_device_up'),
mock.patch.object(self.agent.plugin_rpc, 'update_device_down'),
mock.patch.object(self.agent, 'treat_vif_port')
) as (get_dev_fn, _get_ports, upd_dev_up,
upd_dev_down, treat_vif_port):
self.assertFalse(self.agent.treat_devices_added_or_updated(
['xxx']))
self.assertTrue(treat_vif_port.called)
self.assertTrue(upd_dev_down.called)
_get_ports.assert_called_once_with(self.agent.int_br)
def test_treat_devices_removed_returns_true_for_missing_device(self):
with mock.patch.object(self.agent.plugin_rpc, 'update_device_down',
side_effect=Exception()):
self.assertTrue(self.agent.treat_devices_removed([{}]))
def _mock_treat_devices_removed(self, port_exists):
details = dict(exists=port_exists)
with mock.patch.object(self.agent.plugin_rpc, 'update_device_down',
return_value=details):
with mock.patch.object(self.agent, 'port_unbound') as port_unbound:
self.assertFalse(self.agent.treat_devices_removed([{}]))
self.assertTrue(port_unbound.called)
def test_treat_devices_removed_unbinds_port(self):
self._mock_treat_devices_removed(True)
def test_treat_devices_removed_ignores_missing_port(self):
self._mock_treat_devices_removed(False)
def _test_process_network_ports(self, port_info):
with contextlib.nested(
mock.patch.object(self.agent.sg_agent, "setup_port_filters"),
mock.patch.object(self.agent, "treat_devices_added_or_updated",
return_value=False),
mock.patch.object(self.agent, "treat_devices_removed",
return_value=False)
) as (setup_port_filters, device_added_updated, device_removed):
self.assertFalse(self.agent.process_network_ports(port_info))
setup_port_filters.assert_called_once_with(
port_info['added'], port_info.get('updated', set()))
device_added_updated.assert_called_once_with(
port_info['added'] | port_info.get('updated', set()))
device_removed.assert_called_once_with(port_info['removed'])
def test_process_network_ports(self):
self._test_process_network_ports(
{'current': set(['tap0']),
'removed': set(['eth0']),
'added': set(['eth1'])})
def test_process_network_port_with_updated_ports(self):
self._test_process_network_ports(
{'current': set(['tap0', 'tap1']),
'updated': set(['tap1', 'eth1']),
'removed': set(['eth0']),
'added': set(['eth1'])})
def test_report_state(self):
with mock.patch.object(self.agent.state_rpc,
"report_state") as report_st:
self.agent.int_br_device_count = 5
self.agent._report_state()
report_st.assert_called_with(self.agent.context,
self.agent.agent_state)
self.assertNotIn("start_flag", self.agent.agent_state)
self.assertEqual(
self.agent.agent_state["configurations"]["devices"],
self.agent.int_br_device_count
)
def test_port_update(self):
port = {"id": "b1981919-f516-11e3-a8f4-08606e7f74e7",
"network_id": "124",
"admin_state_up": False}
self.agent.port_update("unused_context",
port=port,
network_type="vlan",
segmentation_id="1",
physical_network="physnet")
self.assertEqual(set(['tapb1981919-f5']), self.agent.updated_ports)
def test_setup_physical_bridges(self):
with contextlib.nested(
mock.patch.object(ip_lib, "device_exists"),
mock.patch.object(utils, "execute"),
mock.patch.object(self.mod_agent.Bridge, "add_port"),
mock.patch.object(self.mod_agent.Bridge, "delete_port"),
mock.patch.object(self.mod_agent.Bridge, "set_protocols"),
mock.patch.object(self.mod_agent.Bridge, "set_controller"),
mock.patch.object(self.mod_agent.Bridge, "get_datapath_id",
return_value='0xa'),
mock.patch.object(self.agent.int_br, "add_port"),
mock.patch.object(self.agent.int_br, "delete_port"),
mock.patch.object(ip_lib.IPWrapper, "add_veth"),
mock.patch.object(ip_lib.IpLinkCommand, "delete"),
mock.patch.object(ip_lib.IpLinkCommand, "set_up"),
mock.patch.object(ip_lib.IpLinkCommand, "set_mtu"),
mock.patch.object(self.mod_agent.ryu_api, "get_datapath",
return_value=self.datapath)
) as (devex_fn, utilsexec_fn,
ovs_addport_fn, ovs_delport_fn, ovs_set_protocols_fn,
ovs_set_controller_fn, ovs_datapath_id_fn, br_addport_fn,
br_delport_fn, addveth_fn, linkdel_fn, linkset_fn, linkmtu_fn,
ryu_api_fn):
devex_fn.return_value = True
parent = mock.MagicMock()
parent.attach_mock(utilsexec_fn, 'utils_execute')
parent.attach_mock(linkdel_fn, 'link_delete')
parent.attach_mock(addveth_fn, 'add_veth')
addveth_fn.return_value = (ip_lib.IPDevice("int-br-eth1"),
ip_lib.IPDevice("phy-br-eth1"))
ovs_addport_fn.return_value = "25"
br_addport_fn.return_value = "11"
self.agent.setup_physical_bridges({"physnet1": "br-eth"})
expected_calls = [mock.call.link_delete(),
mock.call.utils_execute(['/sbin/udevadm',
'settle',
'--timeout=10']),
mock.call.add_veth('int-br-eth',
'phy-br-eth')]
parent.assert_has_calls(expected_calls, any_order=False)
self.assertEqual(11, self.agent.int_ofports["physnet1"])
self.assertEqual(25, self.agent.phys_ofports["physnet1"])
def test_setup_physical_interfaces(self):
with mock.patch.object(self.agent.int_br, "add_port") as add_port_fn:
add_port_fn.return_value = "111"
self.agent.setup_physical_interfaces({"physnet1": "eth1"})
add_port_fn.assert_called_once_with("eth1")
self.assertEqual(111, self.agent.int_ofports["physnet1"])
def test_port_unbound(self):
with contextlib.nested(
mock.patch.object(self.agent, "reclaim_local_vlan"),
mock.patch.object(self.agent, "get_net_uuid",
return_value="netuid12345"),
) as (reclvl_fn, _):
self.agent.enable_tunneling = True
lvm = mock.Mock()
lvm.network_type = "gre"
lvm.vif_ports = {"vif1": mock.Mock()}
self.agent.local_vlan_map["netuid12345"] = lvm
self.agent.port_unbound("vif1")
self.assertTrue(reclvl_fn.called)
def _prepare_l2_pop_ofports(self, network_type=None):
LVM = collections.namedtuple('LVM', 'net, vlan, segid, ip')
self.lvms = [LVM(net='net1', vlan=11, segid=21, ip='1.1.1.1'),
LVM(net='net2', vlan=12, segid=22, ip='2.2.2.2')]
self.tunnel_type = 'gre'
self.tun_name1 = self._create_tunnel_port_name(self.lvms[0].ip,
self.tunnel_type)
self.tun_name2 = self._create_tunnel_port_name(self.lvms[1].ip,
self.tunnel_type)
if network_type is None:
network_type = self.tunnel_type
lvm1 = mock.Mock()
lvm1.network_type = network_type
lvm1.vlan = self.lvms[0].vlan
lvm1.segmentation_id = self.lvms[0].segid
lvm1.tun_ofports = set([1])
lvm2 = mock.Mock()
lvm2.network_type = network_type
lvm2.vlan = self.lvms[1].vlan
lvm2.segmentation_id = self.lvms[1].segid
lvm2.tun_ofports = set([1, 2])
self.agent.tunnel_types = [self.tunnel_type]
self.agent.local_vlan_map = {self.lvms[0].net: lvm1,
self.lvms[1].net: lvm2}
self.agent.tun_ofports = {self.tunnel_type:
{self.lvms[0].ip: 1,
self.lvms[1].ip: 2}}
def test_fdb_ignore_network(self):
self._prepare_l2_pop_ofports()
fdb_entry = {'net3': {}}
with contextlib.nested(
mock.patch.object(self.agent, '_setup_tunnel_port'),
mock.patch.object(self.agent, 'cleanup_tunnel_port')
) as (add_tun_fn, clean_tun_fn):
self.agent.fdb_add(None, fdb_entry)
self.assertFalse(add_tun_fn.called)
self.agent.fdb_remove(None, fdb_entry)
self.assertFalse(clean_tun_fn.called)
def test_fdb_ignore_self(self):
self._prepare_l2_pop_ofports()
self.agent.local_ip = 'agent_ip'
fdb_entry = {self.lvms[1].net:
{'network_type': self.tunnel_type,
'segment_id': 'tun2',
'ports':
{'agent_ip':
[['mac', 'ip'],
n_const.FLOODING_ENTRY]}}}
with contextlib.nested(
mock.patch.object(self.agent.ryuapp, "add_arp_table_entry"),
mock.patch.object(self.agent.ryuapp, "del_arp_table_entry"),
) as (add_fn, del_fn):
self.agent.fdb_add(None, copy.deepcopy(fdb_entry))
add_fn.assert_called_once_with(12, 'ip', 'mac')
self.assertFalse(del_fn.called)
self.agent.fdb_remove(None, fdb_entry)
add_fn.assert_called_once_with(12, 'ip', 'mac')
del_fn.assert_called_once_with(12, 'ip')
def test_fdb_add_flows(self):
self._prepare_l2_pop_ofports()
fdb_entry = {self.lvms[0].net:
{'network_type': self.tunnel_type,
'segment_id': 'tun1',
'ports':
{self.lvms[1].ip:
[['mac', 'ip'],
n_const.FLOODING_ENTRY]}}}
with contextlib.nested(
mock.patch.object(self.agent, '_setup_tunnel_port'),
mock.patch.object(self.agent.int_br, 'install_tunnel_output'),
mock.patch.object(self.agent.int_br, 'delete_tunnel_output'),
) as (add_tun_fn, install_fn, delete_fn):
add_tun_fn.return_value = 2
self.agent.fdb_add(None, fdb_entry)
self.assertEqual(2, install_fn.call_count)
expected_calls = [
mock.call(7, 11, 21, set([2]), eth_dst='mac', goto_next=False),
mock.call(10, 11, 21, set([1, 2]), goto_next=True)
]
install_fn.assert_has_calls(expected_calls)
self.assertFalse(delete_fn.called)
def test_fdb_del_flows(self):
self._prepare_l2_pop_ofports()
fdb_entry = {self.lvms[1].net:
{'network_type': self.tunnel_type,
'segment_id': 'tun2',
'ports':
{self.lvms[1].ip:
[['mac', 'ip'],
n_const.FLOODING_ENTRY]}}}
with contextlib.nested(
mock.patch.object(self.agent.int_br, 'install_tunnel_output'),
mock.patch.object(self.agent.int_br, 'delete_tunnel_output'),
) as (install_fn, delete_fn):
self.agent.fdb_remove(None, fdb_entry)
install_fn.assert_called_once_with(10, 12, 22, set([1]),
goto_next=True)
delete_fn.assert_called_once_with(7, 12, eth_dst='mac')
def test_fdb_add_port(self):
self._prepare_l2_pop_ofports()
tunnel_ip = '10.10.10.10'
tun_name = self._create_tunnel_port_name(tunnel_ip,
self.tunnel_type)
fdb_entry = {self.lvms[0].net:
{'network_type': self.tunnel_type,
'segment_id': 'tun1',
'ports': {self.lvms[0].ip: [['mac', 'ip']]}}}
with mock.patch.object(self.agent, '_setup_tunnel_port') as add_tun_fn:
self.agent.fdb_add(None, fdb_entry)
self.assertFalse(add_tun_fn.called)
fdb_entry[self.lvms[0].net]['ports'][tunnel_ip] = [['mac', 'ip']]
self.agent.fdb_add(None, fdb_entry)
add_tun_fn.assert_called_with(
self.agent.int_br, tun_name, tunnel_ip, self.tunnel_type)
def test_fdb_del_port(self):
self._prepare_l2_pop_ofports()
fdb_entry = {self.lvms[1].net:
{'network_type': self.tunnel_type,
'segment_id': 'tun2',
'ports': {self.lvms[1].ip: [n_const.FLOODING_ENTRY]}}}
with mock.patch.object(self.agent.int_br,
'delete_port') as del_port_fn:
self.agent.fdb_remove(None, fdb_entry)
del_port_fn.assert_called_once_with(self.tun_name2)
def test_add_arp_table_entry(self):
self._prepare_l2_pop_ofports()
fdb_entry = {self.lvms[0].net:
{'network_type': self.tunnel_type,
'segment_id': 'tun1',
'ports': {self.lvms[0].ip: [n_const.FLOODING_ENTRY,
['mac1', 'ip1']],
self.lvms[1].ip: [['mac2', 'ip2']],
'192.0.2.1': [n_const.FLOODING_ENTRY,
['mac3', 'ip3']]}}}
with mock.patch.object(self.agent,
'setup_tunnel_port') as setup_tun_fn:
self.agent.fdb_add(None, fdb_entry)
calls = [
mock.call(self.agent.local_vlan_map[self.lvms[0].net].vlan,
'ip1', 'mac1'),
mock.call(self.agent.local_vlan_map[self.lvms[0].net].vlan,
'ip2', 'mac2')
]
self.ryuapp.add_arp_table_entry.assert_has_calls(calls)
setup_tun_fn.assert_called_once_with(self.agent.int_br,
'192.0.2.1', 'gre')
def _test_add_arp_table_entry_non_tunnel(self, network_type):
self._prepare_l2_pop_ofports(network_type=network_type)
fdb_entry = {self.lvms[0].net:
{'network_type': network_type,
'segment_id': 'tun1',
'ports': {self.lvms[0].ip: [n_const.FLOODING_ENTRY,
['mac1', 'ip1']],
self.lvms[1].ip: [['mac2', 'ip2']],
'192.0.2.1': [n_const.FLOODING_ENTRY,
['mac3', 'ip3']]}}}
with mock.patch.object(self.agent,
'setup_tunnel_port') as setup_tun_fn:
self.agent.fdb_add(None, fdb_entry)
calls = [
mock.call(self.agent.local_vlan_map[self.lvms[0].net].vlan,
'ip1', 'mac1'),
mock.call(self.agent.local_vlan_map[self.lvms[0].net].vlan,
'ip2', 'mac2')
]
self.ryuapp.add_arp_table_entry.assert_has_calls(calls)
self.assertFalse(setup_tun_fn.called)
def test_add_arp_table_entry_vlan(self):
self._test_add_arp_table_entry_non_tunnel('vlan')
def test_add_arp_table_entry_flat(self):
self._test_add_arp_table_entry_non_tunnel('flat')
def test_add_arp_table_entry_local(self):
self._test_add_arp_table_entry_non_tunnel('local')
def test_del_arp_table_entry(self):
self._prepare_l2_pop_ofports()
fdb_entry = {self.lvms[0].net:
{'network_type': self.tunnel_type,
'segment_id': 'tun1',
'ports': {self.lvms[0].ip: [n_const.FLOODING_ENTRY,
['mac1', 'ip1']],
self.lvms[1].ip: [['mac2', 'ip2']],
'192.0.2.1': [n_const.FLOODING_ENTRY,
['mac3', 'ip3']]}}}
with mock.patch.object(self.agent,
'cleanup_tunnel_port') as cleanup_tun_fn:
self.agent.fdb_remove(None, fdb_entry)
calls = [
mock.call(self.agent.local_vlan_map[self.lvms[0].net].vlan,
'ip1'),
mock.call(self.agent.local_vlan_map[self.lvms[0].net].vlan,
'ip2')
]
self.ryuapp.del_arp_table_entry.assert_has_calls(calls)
cleanup_tun_fn.assert_called_once_with(self.agent.int_br, 1, 'gre')
def _test_del_arp_table_entry_non_tunnel(self, network_type):
self._prepare_l2_pop_ofports(network_type=network_type)
fdb_entry = {self.lvms[0].net:
{'network_type': network_type,
'segment_id': 'tun1',
'ports': {self.lvms[0].ip: [n_const.FLOODING_ENTRY,
['mac1', 'ip1']],
self.lvms[1].ip: [['mac2', 'ip2']],
'192.0.2.1': [n_const.FLOODING_ENTRY,
['mac3', 'ip3']]}}}
with mock.patch.object(self.agent,
'cleanup_tunnel_port') as cleanup_tun_fn:
self.agent.fdb_remove(None, fdb_entry)
calls = [
mock.call(self.agent.local_vlan_map[self.lvms[0].net].vlan,
'ip1'),
mock.call(self.agent.local_vlan_map[self.lvms[0].net].vlan,
'ip2')
]
self.ryuapp.del_arp_table_entry.assert_has_calls(calls)
self.assertFalse(cleanup_tun_fn.called)
def test_del_arp_table_entry_vlan(self):
self._test_del_arp_table_entry_non_tunnel('vlan')
def test_del_arp_table_entry_flat(self):
self._test_del_arp_table_entry_non_tunnel('flat')
def test_del_arp_table_entry_local(self):
self._test_del_arp_table_entry_non_tunnel('local')
def test_recl_lv_port_to_preserve(self):
self._prepare_l2_pop_ofports()
self.agent.enable_tunneling = True
with mock.patch.object(
self.agent.int_br, 'delete_port'
) as del_port_fn:
self.agent.reclaim_local_vlan(self.lvms[0].net)
self.assertFalse(del_port_fn.called)
def test_recl_lv_port_to_remove(self):
self._prepare_l2_pop_ofports()
self.agent.enable_tunneling = True
with mock.patch.object(self.agent.int_br,
'delete_port') as del_port_fn:
self.agent.reclaim_local_vlan(self.lvms[1].net)
del_port_fn.assert_called_once_with(self.tun_name2)
def test__setup_tunnel_port_error_negative(self):
with contextlib.nested(
mock.patch.object(self.agent.int_br, 'add_tunnel_port',
return_value='-1'),
mock.patch.object(self.mod_agent.LOG, 'error')
) as (add_tunnel_port_fn, log_error_fn):
ofport = self.agent._setup_tunnel_port(
self.agent.int_br, 'gre-1', 'remote_ip', p_const.TYPE_GRE)
add_tunnel_port_fn.assert_called_once_with(
'gre-1', 'remote_ip', self.agent.local_ip, p_const.TYPE_GRE,
self.agent.vxlan_udp_port, self.agent.dont_fragment)
log_error_fn.assert_called_once_with(
_("Failed to set-up %(type)s tunnel port to %(ip)s"),
{'type': p_const.TYPE_GRE, 'ip': 'remote_ip'})
self.assertEqual(ofport, 0)
def test__setup_tunnel_port_error_not_int(self):
with contextlib.nested(
mock.patch.object(self.agent.int_br, 'add_tunnel_port',
return_value=None),
mock.patch.object(self.mod_agent.LOG, 'exception'),
mock.patch.object(self.mod_agent.LOG, 'error')
) as (add_tunnel_port_fn, log_exc_fn, log_error_fn):
ofport = self.agent._setup_tunnel_port(
self.agent.int_br, 'gre-1', 'remote_ip', p_const.TYPE_GRE)
add_tunnel_port_fn.assert_called_once_with(
'gre-1', 'remote_ip', self.agent.local_ip, p_const.TYPE_GRE,
self.agent.vxlan_udp_port, self.agent.dont_fragment)
log_exc_fn.assert_called_once_with(
_("ofport should have a value that can be "
"interpreted as an integer"))
log_error_fn.assert_called_once_with(
_("Failed to set-up %(type)s tunnel port to %(ip)s"),
{'type': p_const.TYPE_GRE, 'ip': 'remote_ip'})
self.assertEqual(ofport, 0)
def test_tunnel_sync(self):
self.agent.local_ip = 'agent_ip'
self.agent.context = 'fake_context'
self.agent.tunnel_types = ['vxlan']
with mock.patch.object(
self.agent.plugin_rpc, 'tunnel_sync'
) as tunnel_sync_rpc_fn:
self.agent.tunnel_sync()
tunnel_sync_rpc_fn.assert_called_once_with(
self.agent.context,
self.agent.local_ip,
self.agent.tunnel_types[0])
def test__get_ports(self):
ofpp = importutils.import_module('ryu.ofproto.ofproto_v1_3_parser')
reply = [ofpp.OFPPortDescStatsReply(body=[ofpp.OFPPort(name='hoge',
port_no=8)])]
sendmsg = mock.Mock(return_value=reply)
self.mod_agent.ryu_api.send_msg = sendmsg
result = self.agent._get_ports(self.agent.int_br)
result = list(result) # convert generator to list.
self.assertEqual(1, len(result))
self.assertEqual('hoge', result[0].port_name)
self.assertEqual(8, result[0].ofport)
expected_msg = ofpp.OFPPortDescStatsRequest(
datapath=self.agent.int_br.datapath)
sendmsg.assert_has_calls([mock.call(app=self.agent.ryuapp,
msg=expected_msg, reply_cls=ofpp.OFPPortDescStatsReply,
reply_multi=True)])
def test__get_ofport_names(self):
names = ['p111', 'p222', 'p333']
ps = [_mock_port(True, x) for x in names]
with mock.patch.object(self.agent, '_get_ports',
return_value=ps) as _get_ports:
result = self.agent._get_ofport_names('hoge')
_get_ports.assert_called_once_with('hoge')
self.assertEqual(set(names), result)
| |
'''
* TeleStax, Open Source Cloud Communications
* Copyright 2011-2016, Telestax Inc and individual contributors
* by the @authors tag.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
This code was generated by :
Name: Md Sharique
Email : nukles1.07@gmail.com
'''
import requests
import json
class client(object):
def __init__(self, Sid, AuthToken, BaseUrl):
self.Sid = Sid
self.AuthToken = AuthToken
self.BaseUrl = BaseUrl
class AccountDetails(object):
def __init__(self,client):
self.Sid = client.Sid
self.AuthToken = client.AuthToken
self.BaseUrl = client.BaseUrl
def Details(self):
try:
Url=self.BaseUrl+'/Accounts.json/'+self.Sid
r1=requests.get(Url, auth=(self.Sid,self.AuthToken))
if r1.status_code == 401:
return "Authentication Error! Please Enter Valid Account Sid and Authentication Token"
elif r1.status_code == 404:
return "Base Url is Incorrect! Please verify and try again"
else:
content = json.loads(r1.text)
print(content)
return content
except requests.HTTPError:
return ("HTTP ERROR")
except requests.ConnectionError:
return ("CONNECTION ERROR! Please check and try again")
except requests.Timeout:
return ("TIMEOUT ERROR")
except requests.RequestException:
return ("Invalid Url! Please check and try again")
class ChangeAccountPassword(object):
def __init__(self, Password, client):
self.Sid = client.Sid
self.AuthToken = client.AuthToken
self.BaseUrl = client.BaseUrl
self.Password = Password
def ChangePassword(self):
try:
Url = self.BaseUrl+'/Accounts.json/'+self.Sid
data = {'AccountSid': self.Sid, 'Password': self.Password}
r2 = requests.post(Url, data=data, auth=(self.Sid, self.AuthToken))
if r2.status_code == 401:
return ("Authentication Error! Please Enter Valid Account Sid and Authentication Token")
elif r2.status_code == 404:
return "Base Url is Incorrect! Please verify and try again"
elif r2.status_code == 400:
return "Password is too weak"
else:
content = json.loads(r2.text)
return content
except requests.HTTPError:
return ("HTTP ERROR")
except requests.ConnectionError:
return ("CONNECTION ERROR! Please check and try again")
except requests.Timeout:
return ("TIMEOUT ERROR")
except requests.RequestException:
return ("Invalid Url! Please check and try again")
class CreateSubAccount(object):
def __init__(self, FriendlyName, EmailAddress, Password, client):
self.Sid = client.Sid
self.AuthToken = client.AuthToken
self.BaseUrl = client.BaseUrl
self.Password = Password
self.FriendlyName = FriendlyName
self.EmailAddress = EmailAddress
def Create(self):
try:
Url = self.BaseUrl+'/Accounts.json/'
data = {'FriendlyName': self.FriendlyName, 'EmailAddress': self.EmailAddress, 'Password': self.Password}
r3 = requests.post(Url, data=data, auth=(self.Sid, self.AuthToken))
if r3.status_code == 401:
return ("Authentication Error! Please Enter Valid Account Sid and Authentication Token")
elif r3.status_code == 404:
return "Base Url is Incorrect! Please verify and try again"
elif r3.status_code == 409:
return ("Duplicate Name not allowed")
elif r3.status_code == 400:
return ("Password is too weak!")
else:
content = json.loads(r3.text)
return content
except requests.HTTPError:
return ("HTTP ERROR")
except requests.ConnectionError:
return ("CONNECTION ERROR! Please check and try again")
except requests.Timeout:
return ("TIMEOUT ERROR")
except requests.RequestException:
return ("Invalid Url! Please check and try again")
class CloseSubAccount(object):
def __init__(self, SubSid, client):
self.Sid = client.Sid
self.AuthToken = client.AuthToken
self.BaseUrl = client.BaseUrl
self.SubSid = SubSid
def Close(self):
try:
Url = self.BaseUrl+'/Accounts.json/'+self.SubSid
data = {'Status': 'closed'}
r3 = requests.post(Url, data=data, auth=(self.Sid, self.AuthToken))
if r3.status_code == 401:
return ("Authentication Error! Please Enter Valid Account Sid and Authentication Token")
elif r3.status_code == 404:
return "Base Url is Incorrect or Invalid SubSid! Please verify and try again"
else:
content = json.loads(r3.text)
return content
except requests.HTTPError:
return ("HTTP ERROR")
except requests.ConnectionError:
return ("CONNECTION ERROR! Please check and try again")
except requests.Timeout:
return ("TIMEOUT ERROR")
except requests.RequestException:
return ("Invalid Url! Please check and try again")
class SubAccountDetails(object):
def __init__(self, client):
self.Sid = client.Sid
self.AuthToken = client.AuthToken
self.BaseUrl = client.BaseUrl
def Details(self):
try:
Url = self.BaseUrl+'/Accounts.json/'
r4 = requests.get(Url, auth=(self.Sid, self.AuthToken))
if r4.status_code == 401:
return ("Authentication Error! Please Enter Valid Account Sid and Authentication Token")
elif r4.status_code == 404:
return "Base Url is Incorrect! Please verify and try again"
else:
content = json.loads(r4.text)
return content
except requests.HTTPError:
return ("HTTP ERROR")
except requests.ConnectionError:
return ("CONNECTION ERROR! Please check and try again")
except requests.Timeout:
return ("TIMEOUT ERROR")
except requests.RequestException:
return ("Invalid Url! Please check and try again")
| |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module implements update checking and notification to the user.
It provides a context manager around the cache file that stores information
about the last update check. The general process is as follows:
1) This stores the last time an update check occurred, so the check will only
be done if the update check frequency has expired.
2) When an update check is done, all notifications in the latest snapshot are
queried to see if their condition matches the current state of the SDK. Any
notifications that match are "activated" and cached.
3) Every time a command is run, Notify() is called to notify the user of
available updates. It loops over the activated notifications and determines
if any of the triggers match the current command invocation. If there is a
match, the notification is printed and the last nag time is recorded for that
particular notification. At most one notification is printed per command.
The priority is determined by the order the notifications are registered
in the component snapshot.
"""
import json
import os
import time
from googlecloudsdk.core import config
from googlecloudsdk.core import log
from googlecloudsdk.core.updater import schemas
class UpdateCheckData(object):
"""A class to hold update checking data and to perform notifications."""
UPDATE_CHECK_FREQUENCY_IN_SECONDS = 86400 # Once a day.
def __init__(self):
self._last_update_check_file = config.Paths().update_check_cache_path
self._dirty = False
self._data = self._LoadData()
def _LoadData(self):
"""Deserializes data from the json file."""
if not os.path.isfile(self._last_update_check_file):
return schemas.LastUpdateCheck.FromDictionary({})
with open(self._last_update_check_file) as fp:
try:
data = json.loads(fp.read())
return schemas.LastUpdateCheck.FromDictionary(data)
except ValueError:
log.debug('Failed to parse update check cache file. Using empty '
'cache instead.')
return schemas.LastUpdateCheck.FromDictionary({})
def _SaveData(self):
"""Serializes data to the json file."""
if not self._dirty:
return
with open(self._last_update_check_file, 'w') as fp:
fp.write(json.dumps(self._data.ToDictionary()))
self._dirty = False
def __enter__(self):
return self
def __exit__(self, *args):
self._SaveData()
def LastUpdateCheckRevision(self):
"""Gets the revision of the snapshot from the last update check.
Returns:
long, The revision of the last checked snapshot. This is a long int but
formatted as an actual date in seconds (i.e 20151009132504). It is *NOT*
seconds since the epoch.
"""
return self._data.last_update_check_revision
def LastUpdateCheckTime(self):
"""Gets the time of the last update check as seconds since the epoch.
Returns:
int, The time of the last update check in seconds since the epoch.
"""
return self._data.last_update_check_time
def SecondsSinceLastUpdateCheck(self):
"""Gets the number of seconds since we last did an update check.
Returns:
int, The amount of time in seconds.
"""
return time.time() - self._data.last_update_check_time
def ShouldDoUpdateCheck(self):
"""Checks if it is time to do an update check.
Returns:
True, if enough time has elapsed and we should perform another update
check. False otherwise.
"""
return (self.SecondsSinceLastUpdateCheck() >=
UpdateCheckData.UPDATE_CHECK_FREQUENCY_IN_SECONDS)
def UpdatesAvailable(self):
"""Returns whether we already know about updates that are available.
Returns:
bool, True if we know about updates, False otherwise.
"""
return bool(self._data.notifications)
def SetFromSnapshot(self, snapshot, component_updates_available, force=False):
"""Sets that we just did an update check and found the given snapshot.
If the given snapshot is different than the last one we saw, refresh the set
of activated notifications for available updates for any notifications with
matching conditions.
You must call Save() to persist these changes or use this as a context
manager.
Args:
snapshot: snapshots.ComponentSnapshot, The latest snapshot available.
component_updates_available: bool, True if there are updates to components
we have installed. False otherwise.
force: bool, True to force a recalculation of whether there are available
updates, even if the snapshot revision has not changed.
Returns:
bool, True if there are now components to update, False otherwise.
"""
if force or self.LastUpdateCheckRevision() != snapshot.revision:
log.debug('Updating notification cache...')
current_version = config.INSTALLATION_CONFIG.version
current_revision = config.INSTALLATION_CONFIG.revision
activated = []
possible_notifications = snapshot.sdk_definition.notifications
for notification in possible_notifications:
if notification.condition.Matches(
current_version, current_revision, component_updates_available):
log.debug('Activating notification: [%s]', notification.id)
activated.append(notification)
self._data.notifications = activated
self._CleanUpLastNagTimes()
self._data.last_update_check_time = time.time()
self._data.last_update_check_revision = snapshot.revision
self._dirty = True
return self.UpdatesAvailable()
def SetFromIncompatibleSchema(self):
"""Sets that we just did an update check and found a new schema version.
An incompatible schema version means there are definitely updates available
but we can't read the notifications to correctly notify the user. This will
install a default notification for the incompatible schema.
You must call Save() to persist these changes or use this as a context
manager.
"""
log.debug('Incompatible schema found. Activating default notification.')
# Nag once a week to update if the schema changed and we don't know what's
# going on anymore.
notification_spec = schemas.NotificationSpec(
id='incompatible',
condition=schemas.Condition(None, None, None, None, False),
trigger=schemas.Trigger(frequency=604800, command_regex=None),
notification=schemas.Notification(None, None, None)
)
self._data.notifications = [notification_spec]
self._CleanUpLastNagTimes()
self._data.last_update_check_time = time.time()
self._data.last_update_check_revision = 0 # Doesn't matter
self._dirty = True
def _CleanUpLastNagTimes(self):
"""Clean the map holding the last nag times for each notification.
If a notification is no longer activate, it is removed from the map. Any
notifications that are still activated have their last nag times preserved.
"""
activated_ids = [n.id for n in self._data.notifications]
self._data.last_nag_times = (
dict(
(name, value)
for name, value in self._data.last_nag_times.iteritems()
if name in activated_ids))
def Notify(self, command_path):
"""Notify the user of any available updates.
This should be called for every command that is run. It does not actually
do an update check, and does not necessarily notify the user each time. The
user will only be notified if there are activated notifications and if the
trigger for one of the activated notifications matches. At most one
notification will be printed per command. Order or priority is determined
by the order in which the notifications are registered in the component
snapshot file.
Args:
command_path: str, The '.' separated path of the command that is currently
being run (i.e. gcloud.foo.bar).
"""
# Only nag if we are running in an interactive terminal.
if not log.out.isatty() or not log.status.isatty():
return
for notification in self._data.notifications:
name = notification.id
last_nag_time = self._data.last_nag_times.get(name, 0)
# Only notify if the trigger matches. Exit as soon as one notification
# is printed.
if notification.trigger.Matches(last_nag_time, command_path):
log.status.write(notification.notification.NotificationMessage())
self._data.last_nag_times[name] = time.time()
self._dirty = True
break
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#==============================================================================
# main file for creating a database of all the experiments' runs
#==============================================================================
import os
import numpy as np
import pandas
from pandas.io import sql as psql
from time import strptime
from math import floor
import sqlite3 as lite
from apsimRegions.preprocess import fileio
def create_tables(masterDbConn, gridLut):
'''
Creates each of the tables in the master run database.
Parameters
----------
masterDbConn : sqlite connection object
master database to connect to
gridLut : pandas dataframe
contains the grid information (point_id, lat, lon, county, etc.)
Returns
-------
Nothing.
'''
with masterDbConn:
# create runParameters table
sql = "CREATE TABLE runParameters (run_id INTEGER PRIMARY KEY, met TEXT, crop TEXT, resolution REAL, clock_start TEXT, clock_end TEXT, crit_fr_asw REAL, sow_start TEXT, sow_end TEXT, harvest_date TEXT, soil_name TEXT)"
masterDbConn.execute(sql)
# create apsimOutput table
# handeled in update_apsim_output_table()
# create outputFields table
# handeled in update_output_fields_table()
# create gridPoints table
psql.write_frame(gridLut, 'gridPoints', masterDbConn)
def update_run_parameters_table(masterDbConn, configPath):
'''
Updates the runParameters table in the master run database. If a run
is already there it is updated, otherwise it is added.
Parameters
----------
masterDbConn : sqlite connection object
master database to connect to
configPath : string
path to configuration file which contains all the configuration
details of the run
Returns
-------
If the database is being updated (True) or not (False).
'''
# set if the database is being updated or not
update = False
# read configuration file
if os.path.isfile(configPath):
config = fileio.Config(configPath)
else:
print '*** Warning: {0} does not exist.'.format(configPath)
# set the variables to save to the runParameters table
row = config.toDict()
row['runId'] = int(os.path.split(os.path.split(configPath)[0])[1])
with masterDbConn:
try:
sql = "INSERT INTO runParameters VALUES (:runId, :met, :crop, :resolution, :clockStart, :clockEnd, :critFrAsw, :sowStart, :sowEnd, :harvestDate, :soilName)"
masterDbConn.execute(sql, row)
except lite.IntegrityError: # for when the row already exists
sql = "UPDATE runParameters SET run_id=:runId, met=:met, crop=:crop, resolution=:resolution, clock_start=:clockStart, clock_end=:clockEnd, crit_fr_asw=:critFrAsw, sow_start=:sowStart, sow_end=:sowEnd, harvest_date=:harvestDate, soil_name=:soilName WHERE run_id=:runId"
masterDbConn.execute(sql, row)
update = True
return update
def _get_yearly_yield(pointDailyData):
'''
Determines yield values by year when given daily point data.
Parameters
----------
pointDailyData : pandas TimeSeries
point daily yield values, indexed by date
Returns
-------
Dataframe of yearly yield data (yield and harvest_date).
'''
# get unique years from data
years = np.unique(pointDailyData.index.year)
yearlyYield = {}
harvestDates = {}
startDay = pandas.datetime(years[0],1,1)
yearOverlap = False
for year in years:
# check to see if previous year overlaps with this one
# if it does use the harvest day as start
if yearOverlap:
rng = pandas.date_range(startDay, '12/31/{}'.format(year))
else:
rng = pandas.date_range('1/1/{}'.format(year), '12/31/{}'.format(year))
# get the max yield for the year and the value on 12/31/{year}
yearMax = float(pointDailyData.ix[rng].max())
try:
yearLastValue = float(pointDailyData.ix[rng[-1]])
except KeyError as e:
# special case where previous year's crop is still in ground
# set current year as NaN and break loop (no more year data)
print '*** Warning: {e} out of simulation range. Setting year {year} as NaN. Likely due to previous year crop still in ground.'.format(e=e, year=year)
print '***** Work around: use management rule "end_crop_on_fixed_date_rule" in each APSIM simulation to end the crop at least 2 days before sowing.'
yearlyYield[year] = np.nan
harvestDates[year] = np.nan
raise
# TODO: BUG (#127)
# If the chunksize is set to be a static value from the first data
# point, there is a chance that if there are points where the
# simulation abruptly ended, then the chunksizes would be
# inaccurate from that point forward. Fix this.
#
# In cases where the apsim simulation failed to complete, this
# will cause problems; as when the crop cannot be planted again
# due to it already existing in the ground.
#
# Workaround: delete offending point from the database after
# the fact.
break
# if all values are 0 for a given year
if yearMax == 0:
yearlyYield[year] = 0
harvestDates[year] = np.nan
yearOverlap = False
# if the last day of the year is 0
elif yearLastValue == 0:
yearlyYield[year] = yearMax
# get timestamp of last possible day that matches yieldMax
harvestDates[year] = pointDailyData.ix[rng][pointDailyData.ix[rng] == yearMax].tail(1).index[0].strftime('%Y-%m-%d')
yearOverlap = False
# if the yield is > 0 on the last day of the year
elif yearLastValue > 0:
check = True
yearOverlap = True
yearlyYield[year] = yearLastValue
day = rng[-1]
while check:
# get the crop yield for the next day
day += pandas.DateOffset(1)
# if the date is out of the simulation period
# (simulation never completed) then set as NaN
try:
cropYieldNew = float(pointDailyData.ix[day])
except KeyError as e:
# will always happen for the last year of the simulation
#print '*** Warning: {e} out of simulation range. Setting year {year} as NaN.'.format(e=e, year=year)
yearlyYield[year] = np.nan
harvestDates[year] = np.nan
cropYieldNew = 0.0
check = False
# check to see if new value is >= preveous day's value
# if it is, set it as the new value
# keep checking until the new value is < the old one
if cropYieldNew >= yearlyYield[year]:
yearlyYield[year] = cropYieldNew
harvestDates[year] = day.strftime('%Y-%m-%d')
else:
check = False
startDay = day
else:
print '*** Warning: no case for daily data for year {}'.format(year)
yearlyYield = pandas.Series(yearlyYield)
harvestDates = pandas.Series(harvestDates)
yearlyYieldData = pandas.DataFrame({'yield':yearlyYield,
'harvest_date':harvestDates})
return yearlyYieldData
def _get_avg_data(apsimDbConn, pointDailyData, harvestDates, sowDate):
'''
Determines seasonal averages for data.
Parameters
----------
apsimDbConn : sqlite connection object
connection to database
dailyData : pandas dataframe
daily data values, indexed by date
harvestDates : pandas dataframe
string date of harvesting, indexed by year
sowDate : string
date of sowing (dd-mmm)
Returns
-------
Dataframe of yearly average data (rain, mint, maxt, radn, and irr_fasw).
'''
# get unique years from data
years = np.unique(pointDailyData.index.year)
# convert sowDate to correct format
sowDate = strptime(sowDate,'%d-%b')
# read data from the outputFields table
with apsimDbConn:
outputFields = psql.read_frame("SELECT * FROM outputFields;", apsimDbConn)
outputFields = list(outputFields['name'])
outputFields.remove('date')
outputFields.remove('yield')
yearlyAvgData = pandas.DataFrame({})
for field in outputFields:
dataAvgs = {}
for year in years:
harvestDate = harvestDates[year]
# check if harvestDate is a string
if type(harvestDate) == type(''):
rng = pandas.date_range('{0}/{1}/{2}'.format(sowDate.tm_mon, sowDate.tm_mday, year), harvestDate)
# get the avg values and add to dataAvgs dictionary
pointDailyDataMean = pointDailyData[field].ix[rng].mean()
dataAvgs[year] = pointDailyDataMean
else: # if harvestDate is not a string, set as NaN
dataAvgs[year] = np.nan
#print dataAvgs
yearlyAvgData[field] = pandas.Series(dataAvgs)
#print yearlyAvgData[field].head()
return yearlyAvgData
def _get_db_info(apsimDbConn, maxChunksize=1500000):
'''
Gathers information from the database.
Parameters
----------
apsimDbConn : sqlite connection object
connection to database
maxChunksize : int
(optional) maximum size of the chunks returned fromt the database.
Use to limit the number of rows returned when experiencing out of
memory errors.
Returns
-------
Point Ids, optimal chunksize based on maxChunksize, and the number of
points returned in each query from the database.
'''
with apsimDbConn:
# determine chunksize to read at a time
pointIds = pandas.io.sql.read_frame("SELECT DISTINCT point_id FROM apsimOutput", apsimDbConn)
# convert to numpy array
pointIds = np.array(pointIds['point_id'])
with apsimDbConn:
# assumes that all apsim simulation points have the same number of
# data points
# in cases where the apsim simulation failed to complete, this will
# cause problems.
pointDataSize = len(pandas.io.sql.read_frame("SELECT point_id FROM apsimOutput WHERE point_id={}".format(pointIds[0]), apsimDbConn))
numPoints = int(floor(maxChunksize / pointDataSize))
chunksize = pointDataSize * numPoints
#print 'pointDataSize:', pointDataSize
return pointIds, chunksize, numPoints
def _read_apsim_db(apsimDbConn, start, chunksize):
'''
Read apsimData.sqlite database.
Parameters
----------
apsimDbConn : sqlite connection object
connection to database
start : int
where to start limiting the data returned
chunksize : int
size of chunks to read from the database
Returns
-------
A dataframe of daily data.
'''
with apsimDbConn:
# read data from the outputFields table
outputFields = psql.read_frame("SELECT * FROM outputFields;", apsimDbConn)
outputFields = list(outputFields['name'])
outputFields = ', '.join(outputFields)
# read main data
sql = "SELECT point_id, {outputFields} FROM apsimOutput LIMIT {start}, {chunksize}".format(outputFields=outputFields, start=start, chunksize=chunksize)
dailyData = pandas.io.sql.read_frame(sql, apsimDbConn)
return dailyData
def _apsim_output(apsimDbPath, sowDates):
'''
Reads aspim data from the apsim run database.
Parameters
----------
apsimDbPath : string
path to apsim database
sowDates : pandas Series
dates of sowing for each location in the apsim simulation
(dd-mmm format)
Returns
-------
Pandas dataframe of yearly apsim output. Variables that have more than one
value per year (rain, mint, maxt, radn, etc.) are averaged over the growing
season.
'''
# open database
apsimDbConn = lite.connect(apsimDbPath)
# get pointIds, numPoints, and chunksize
print 'Getting database info...'
pointIds, chunksize, numPoints = _get_db_info(apsimDbConn)
print 'Number of points per chunk :', numPoints
# read main data
start = 0
apsimData = pandas.DataFrame({})
print 'point num : point_id'
for p, pointId in enumerate(pointIds):
print p+1, ':', pointId
# set sow date
sowDate = sowDates.ix[pointId][0]
# read data in chunks so there will be enough memory
if p % numPoints == 0:
print 'Reading from database...'
dailyData = _read_apsim_db(apsimDbConn, start, chunksize)
#print dailyData.tail()
start += chunksize
# set index to date column
pointDailyData = dailyData[dailyData['point_id'] == pointId]
pointDailyData = pointDailyData.drop(['point_id'], axis=1)
pointDailyData = pointDailyData.set_index('date')
# convert to datetime index
pointDailyData.index = pandas.to_datetime(pointDailyData.index)
# get yearly data
yearlyYieldData = _get_yearly_yield(pointDailyData['yield'])
# get yearly average data
harvestDates = yearlyYieldData['harvest_date']
yearlyAvgData = _get_avg_data(apsimDbConn, pointDailyData, harvestDates, sowDate)
# join yield and avg data, and make pretty
yearlyData = yearlyYieldData.join(yearlyAvgData)
yearlyData = yearlyData.reset_index()
yearlyData = yearlyData.rename(columns={'index':'sow_year'})
# add pointId column to data
pointIdSeries = pandas.Series([pointId] * len(yearlyData))
yearlyData['point_id'] = pointIdSeries
apsimData = apsimData.append(yearlyData, ignore_index=True)
return apsimData
def update_apsim_output_table(masterDbConn, runPath, update):
'''
Updates the apsimOutput table in the master run database. If a run
is already there it is updated, otherwise it is added.
Parameters
----------
masterDbConn : sqlite connection object
master database to connect to
runPath : string
path to the run folder for the apsimData.sqlite database for a
particular run
update : bool
if the database needs to be updated or if it is the first commit for a
particular run
Returns
-------
Nothing.
'''
# get the runId
runId = int(os.path.split(runPath)[1])
# don't do anything if the database is being updated
if update == True:
print "*** Warning: Run {} data may already exist. Skipping write.".format(runId)
return
# get sow start from parameters table
sql = "SELECT sow_start FROM runParameters WHERE run_id = {}".format(runId)
sowStart = psql.read_frame(sql, masterDbConn).ix[0][0]
# check to see if sow date is auto (determined from lookup table)
if sowStart == 'auto':
# read sow start for each location
sql = "SELECT point_id, sow_start FROM gridPoints"
sowDates = psql.read_frame(sql, masterDbConn, index_col='point_id')
else:
# set sow start the same for each location
sql = "SELECT point_id FROM gridPoints"
gridPoints = psql.read_frame(sql, masterDbConn)
sowDates = pandas.DataFrame([sowStart] * len(gridPoints), index=gridPoints['point_id'])
# get the run database path
apsimDbPath = os.path.join(runPath, 'data', 'apsimData.sqlite')
# read and convert to yearly formatted data
apsimData = _apsim_output(apsimDbPath, sowDates)
# add column with runId
runIdSeries = pandas.Series([runId] * len(apsimData))
apsimData['run_id'] = runIdSeries
# write runData to master database
psql.write_frame(apsimData, 'apsimOutput', masterDbConn, if_exists='append')
def update_output_fields_table(masterDbConn, runPath):
'''
Updates the outputFields table in the master run database. If a
field alredy exists it is skipped, otherwise it is added.
Parameters
----------
masterDbConn : sqlite connection object
master database to connect to
runPath : string
path to the run folder for the apsimData.sqlite database for a
particular run
Returns
-------
A list of fields that were updated in the table.
'''
# get the run database path
apsimDbPath = os.path.join(runPath, 'data', 'apsimData.sqlite')
# open run database
apsimDbConn = lite.connect(apsimDbPath)
with apsimDbConn:
# read data from the outputFields table
outputFields = psql.read_frame("SELECT * FROM outputFields;", apsimDbConn)
with masterDbConn:
# write outputFields to master database
try:
psql.write_frame(outputFields, 'outputFields', masterDbConn)
except ValueError:# as e: # if table already exists then do nothing
#print '*** Warning: {} Skipping write.'.format(e)
pass
def update_masterDb(masterDbPath, gridLutPath, startRun, endRun):
'''
Convenience function for updating everything in the master run
database.
Parameters
---------
masterDbPath : string
path to the master run database (../myDocs/runDatabase.sqlite)
gridLutPath : string
path to grid lookup table
startRun : int
run number to start processing on
endRun : int
(optional) run number to stop processing on; inclusive
Returns
-------
Nothing.
'''
print '---------------------- masterRunDb.py ----------------------'
print 'A processing script for apsimRegions output from the APSIM'
print 'crop model. Data is saved to a master run database.'
print '------------------------------------------------------------'
# set runs to process
if endRun == None:
endRun = startRun # inclusive
runs = range(startRun, endRun+1)
# read grid lookup table
gridLut = pandas.read_csv(gridLutPath)
# open run database
# check to see if the file exists. If it doesn't create gridPoints table
if os.path.isfile(masterDbPath):
masterDbConn = lite.connect(masterDbPath)
else:
# first time opening it
masterDbConn = lite.connect(masterDbPath)
# create tables
create_tables(masterDbConn, gridLut)
# update database with data from each run
numRuns = len(runs)
with masterDbConn:
for r, run in enumerate(runs):
# print progress
print 'Saving run: {0} ({1}/{2})...'.format(run, r+1, numRuns)
# get paths
runPath = os.path.join(os.path.split(masterDbPath)[0], str(run))
configPath = os.path.join(runPath, 'config.ini')
# update runParameters table
update = update_run_parameters_table(masterDbConn, configPath)
# update apsimOutput table
update_apsim_output_table(masterDbConn, runPath, update)
# update outputFields table
update_output_fields_table(masterDbConn, runPath)
print '\n***** Done! *****'
# Run if module is run as a program
if __name__ == '__main__':
experiment = 'example'
masterDbPath = 'C:/ExampleProject/output/{exp}/{exp}.sqlite'.format(exp=experiment)
gridLutPath = 'C:/ExampleProject/lookupTables/exampleLookupTable.csv'
startRun = 1
endRun = 1
update_masterDb(masterDbPath, gridLutPath, startRun, endRun)
| |
# -*- coding:utf-8 -*-
import re, time
from gjqyxyxxcxxt.util._time import get_current_date
def get_url(key, param):
url_dic = {
'list_url': 'http://www.tianyancha.com/v2/search/%s.json?',
'czxx_url': 'http://www.tianyancha.com/expanse/holder.json?id=%s&ps=100&pn=1',
'ryxx_url': 'http://www.tianyancha.com/expanse/staff.json?id=%s&ps=100&pn=1',
'xzcf_url': 'http://www.tianyancha.com/expanse/punishment.json?name=%s&ps=5&pn=1',
'bgxx_url': 'http://www.tianyancha.com/expanse/changeinfo.json?id=%s&ps=100&pn=1',
'ycxx_url': 'http://www.tianyancha.com/expanse/abnormal.json?id=%s&ps=10&pn=1',
'fzjg_url': 'http://www.tianyancha.com/expanse/branch.json?id=%s&ps=10&pn=1'
}
return url_dic[key] % param
def get_conf(conf_name):
referer_url = 'http://www.tianyancha.com/company/2313797533'
sgArr = ["6","b","t","f","l", "5","w","h","q","i","s","e","c","p","m","u","9","8","y","2","z","k","j","r","x","n","-","0","3","4","d","1","a","o","7","v","g"]
public_headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36",
}
my_headers = {
"Tyc-From": "normal",
"Accept": "application/json, text/plain, */*",
"Referer": referer_url,
"CheckError": "check",
# "Accept-Encoding": "gzip, deflate, sdch",
# "Accept-Language": "zh-CN,zh;q=0.8",
# "Connection": "keep-alive",
# "Host": "www.tianyancha.com",
}
return eval(conf_name)
def get_token_and_utm(tongji_page):
js_code = "".join([chr(int(code)) for code in tongji_page.json()["data"]["v"].split(",")])
token = re.findall(r"token=(\w+);", js_code)[0]
fxck_chars = re.findall(r"\'([\d\,]+)\'", js_code)[0]
four_count = fxck_chars.count('4')
fxck_chars = fxck_chars.split(",")
utm = ''
for i in fxck_chars:
if i == 4:
if four_count > 1:
utm += '2'
continue
else:
utm += '1'
continue
if 5 < int(i) < 20:
utm += get_conf('sgArr')[int(i)-2]
else:
utm += get_conf('sgArr')[int(i)]
return token, utm
def html_clear(string):
if not isinstance(string, str) and not isinstance(string, unicode):
return string
res, count = re.subn('<[^<>]+>', '', string)
return res
def time_format(dic, key):
if dic[key]:
res = re.findall('\d{4}\D\d{1,2}\D\d{1,2}', dic[key])
if res:
dic[key] = res[0]
def base_dic_init():
base_info = {}
base_info['ID'] = ''
base_info['UNISCID'] = ''
base_info['ENTTYPE'] = ''
base_info['OPTO'] = ''
base_info['REGORG'] = ''
base_info['APPRDATE'] = ''
base_info['REGNO'] = ''
base_info['ORGAN_CODE'] = ''
base_info['IXINNUOBM'] = ''
base_info['industry'] = ''
base_info['BRANINFO'] = []
base_info['PARTNERCHAGEINFO'] = []
base_info['MORTINFO'] = []
base_info['EQUINFO'] = []
base_info['FREEZEINFO'] = []
base_info['LAWINFO'] = []
base_info['EXCEINFO'] = []
base_info['PUNINFO'] = []
base_info['LEGINFO'] = []
base_info['PERINFO'] = []
base_info['CHGINFO'] = []
return base_info
def base_info_parse(json_obj):
base_info = base_dic_init()
base_info['company_id'] = json_obj.get('id', None)
base_info['websites'] = json_obj.get('websites', None)
base_info['emails'] = json_obj.get('emails', None)
base_info['province'] = json_obj.get('base', None)
base_info['city'] = json_obj.get('city', None)
base_info['crawl_time'] = get_current_date()
base_info['OPSCOPE'] = json_obj.get('businessScope', None)
base_info['DOM'] = json_obj.get('regLocation', None)
base_info['OPFROM'] = json_obj.get('estiblishTime', None)
base_info['ESTDATE'] = json_obj.get('estiblishTime', None)
base_info['ENTNAME'] = json_obj.get('name', None)
base_info['LEREP'] = json_obj.get('legalPersonName', None)
base_info['REGCAP'] = json_obj.get('regCapital', None)
base_info['REGSTATE'] = json_obj.get('regStatus', None)
base_info['company_id'] = html_clear(base_info['company_id'])
base_info['websites'] = html_clear(base_info['websites'])
base_info['emails'] = html_clear(base_info['emails'])
base_info['province'] = html_clear(base_info['province'])
base_info['city'] = html_clear(base_info['city'])
base_info['OPSCOPE'] = html_clear(base_info['OPSCOPE'])
base_info['DOM'] = html_clear(base_info['DOM'])
base_info['OPFROM'] = html_clear(base_info['OPFROM'])
base_info['ESTDATE'] = html_clear(base_info['ESTDATE'])
base_info['ENTNAME'] = html_clear(base_info['ENTNAME'])
base_info['LEREP'] = html_clear(base_info['LEREP'])
base_info['REGCAP'] = html_clear(base_info['REGCAP'])
base_info['REGSTATE'] = html_clear(base_info['REGSTATE'])
time_format(base_info, 'ESTDATE')
time_format(base_info, 'OPTO')
time_format(base_info, 'OPFROM')
time_format(base_info, 'APPRDATE')
return base_info
def czxx_parse(json_obj):
LEGINFO = []
for obj in json_obj['data']['result']:
czxx_dic = {}
czxx_dic['BLICTYPE'] = ''
czxx_dic['FLAG'] = ''
czxx_dic['INVTYPE'] = ''
czxx_dic['BLICNO'] = ''
czxx_dic['INV'] = obj.get('name', None)
czxx_dic['ID'] = obj.get('id', None)
czxx_dic['INV'] = html_clear(czxx_dic['INV'])
czxx_dic['ID'] = html_clear(czxx_dic['ID'])
my_dic = {}
my_dic['INV'] = obj.get('name', None)
my_dic['LISUBCONAM'] = None
my_dic['LIACCONAM'] = None
my_dic['PAYINFO'] = []
my_dic['REALPAYINFO'] = []
print '====>>>: ', obj['capitalActl']
if 'capital' in obj and obj['capital']:
for payinfo in obj['capital']:
pay_dic = {}
pay_dic['CONDATE'] = payinfo.get('time', None)
pay_dic['CONFORM'] = payinfo.get('paymet', None)
pay_dic['SUBCONAM'] = payinfo.get('amomon', None)
pay_dic['CONFORM'] = html_clear(pay_dic['CONFORM'])
pay_dic['SUBCONAM'] = html_clear(pay_dic['SUBCONAM'])
pay_dic['CONDATE'] = html_clear(pay_dic['CONDATE'])
my_dic['PAYINFO'].append(pay_dic)
if 'capitalActl' in obj and obj['capitalActl']:
for readpayinfo in obj['capitalActl']:
real_pay_dic = {}
real_pay_dic['CONDATE'] = readpayinfo.get('time', None)
real_pay_dic['CONFORM'] = readpayinfo.get('paymet', None)
real_pay_dic['SUBCONAM'] = readpayinfo.get('amomon', None)
real_pay_dic['CONFORM'] = html_clear(real_pay_dic['CONFORM'])
real_pay_dic['SUBCONAM'] = html_clear(real_pay_dic['SUBCONAM'])
real_pay_dic['CONDATE'] = html_clear(real_pay_dic['CONDATE'])
my_dic['REALPAYINFO'].append(real_pay_dic)
czxx_dic['INVDETAIL'] = my_dic
LEGINFO.append(czxx_dic)
return 'LEGINFO', LEGINFO
def ryxx_parse(json_obj):
PERINFO = []
for obj in json_obj['data']['result']:
ryxx_dic = {}
ryxx_dic['NAME'] = obj.get('name', None)
typeJoin = obj.get('typeJoin', None)
ryxx_dic['POSITION'] = ', '.join(typeJoin) if isinstance(typeJoin, list) else typeJoin
ryxx_dic['NAME'] = html_clear(ryxx_dic['NAME'])
ryxx_dic['POSITION'] = html_clear(ryxx_dic['POSITION'])
PERINFO.append(ryxx_dic)
return 'PERINFO', PERINFO
def xzcf_parse(json_obj):
PUNINFO = []
for dic_obj in json_obj['data']['items']:
dic = {}
dic['PENDECNO'] = dic_obj.get('punishNumber', None)
dic['ILLEGACTTYPE'] = dic_obj.get('type', None)
dic['PENTYPE'] = dic_obj.get('PENTYPE', None)
dic['PENAM'] = dic_obj.get('PENAM', None)
dic['FORFAM'] = dic_obj.get('FORFAM', None)
dic['content'] = dic_obj.get('content', None)
dic['decisionDate'] = dic_obj.get('decisionDate', None)
dic['PENAUTH'] = dic_obj.get('departmentName', None)
dic['PENDECISSDATE'] = dic_obj.get('publishDate', None)
PUNINFO.append(dic)
return 'PUNINFO', PUNINFO
def bgxx_parse(json_obj):
CHGINFO = []
for obj in json_obj['data']['result']:
bgxx_dic = {}
bgxx_dic['ALTITEM'] = obj.get('changeItem', None)
bgxx_dic['ALTITEM'] = html_clear(bgxx_dic['ALTITEM'])
bgxx_dic['ALTDATE'] = obj.get('changeTime', None)
bgxx_dic['ALTDATE'] = html_clear(bgxx_dic['ALTDATE'])
time_format(bgxx_dic, 'ALTDATE')
bgxx_dic['ALTBE'] = obj.get('contentBefore', None)
bgxx_dic['ALTBE'] = html_clear(bgxx_dic['ALTBE'])
if bgxx_dic['ALTBE']:
bgxx_dic['ALTBE'], c = re.subn(ur'[A-Za-z\r\n]+', '', bgxx_dic['ALTBE'])
bgxx_dic['ALTAF'] = obj.get('contentAfter', None)
bgxx_dic['ALTAF'] = html_clear(bgxx_dic['ALTAF'])
if bgxx_dic['ALTAF']:
bgxx_dic['ALTAF'], c = re.subn(ur'[A-Za-z\r\n]+', '', bgxx_dic['ALTAF'])
CHGINFO.append(bgxx_dic)
return 'CHGINFO', CHGINFO
def ycxx_parse(json_obj):
EXCEINFO = []
for obj in json_obj['data']['result']:
fzjg_dic = {}
fzjg_dic['SPECAUSE'] = obj.get('putReason', None)
fzjg_dic['ABNTIME'] = obj.get('putDate', None)
fzjg_dic['DECORG'] = obj.get('putDepartment', None)
EXCEINFO.append(fzjg_dic)
return 'EXCEINFO', EXCEINFO
def fzjg_parse(json_obj):
BRANINFO = []
for obj in json_obj['data']['result']:
fzjg_dic = {}
fzjg_dic['BRNAME'] = obj.get('name', None)
fzjg_dic['LEREP'] = obj.get('legalPersonName', None)
fzjg_dic['REGSTATE'] = obj.get('regStatus', None)
fzjg_dic['ESTDATE'] = obj.get('estiblishTime', None)
if fzjg_dic['ESTDATE']:
timestamp = int(str(fzjg_dic['ESTDATE'])[:10])
fzjg_dic['ESTDATE'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timestamp))
fzjg_dic['REGNO'] = obj.get('REGNO', None)
fzjg_dic['DOM'] = obj.get('DOM', None)
fzjg_dic['REGORG'] = obj.get('REGORG', None)
fzjg_dic['UNISCID'] = obj.get('UNISCID', None)
fzjg_dic['TEL'] = obj.get('TEL', None)
fzjg_dic['ADDR'] = obj.get('ADDR', None)
fzjg_dic['regCapital'] = obj.get('regCapital', None)
fzjg_dic['category'] = obj.get('category', None)
BRANINFO.append(fzjg_dic)
return 'BRANINFO', BRANINFO
| |
import re
class Pattern(object):
def __init__(self, name, regex):
self.name = name
self.pattern = re.compile(regex)
def match(self, input, index):
return self.pattern.match(input, index)
class Terminal(object):
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
if self.name.lower() == self.value:
return self.name
return '%s(%s)' % (self.name, self.value)
class Tree(object):
def __init__(self, *values):
self.value = None
if len(values) > 0:
self.value = values[0]
if len(values) > 1:
self.children = [x for x in values[1:]]
else:
self.children = []
def add(self, value):
if isinstance(value, Tree):
self.children.append(value)
else:
self.children.append(Tree(value))
return self
def __len__(self):
return len(self.children)
def isLeaf(self):
return len(self.children) == 0
def toDot(self):
self.label(1)
return 'digraph ast {%s\n}' % self.doToDot('')
def label(self, id):
self.id = id
id += 1
for c in self.children:
id = c.label(id)
return id
def doToDot(self, dot):
dot = '%s\n%d [label="%s"];' % (dot, self.id, self.value.value)
for c in self.children:
dot = c.doToDot(dot)
dot = '%s\n%d -> %d;' % (dot, self.id, c.id)
return dot
def __str__(self):
if self.isLeaf():
return self.value.__str__()
result = '(%s)' % self.value
for c in self.children:
result = '%s %s' % (result, c)
return '%s)' % result
class Scanner(object):
def __init__(self, input, patterns):
self.input = input
self.index = 0
self.patterns = patterns
self.terminal = None
self.lookAhead = self.next()
def next(self):
while self.index < len(self.input) and self.input[self.index].isspace():
self.index += 1
if self.index >= len(self.input):
return None
for p in self.patterns:
match = p.match(self.input, self.index)
if match:
self.index = match.end()
return Terminal(p.name, match.group())
raise Exception('Unrecognized input: %s' % (self.input[self.index]))
def matches(self, *types):
if self.lookAhead == None:
return False
for t in types:
if t == self.lookAhead.name:
self.terminal = self.lookAhead
self.lookAhead = self.next()
return True
return False
def expect(self, *types):
if self.matches(*types):
return self.terminal
raise Exception('Expected %s, found %s' % (','.join(types), self.lookAhead))
def atEnd(self):
return self.lookAhead == None
class Parser(object):
def __init__(self, scanner):
self.sc = scanner
self.prec = [('&&','||'), ('==','!=','>','<','>=','<='), ('+','-'), ('*','/','%')]
def parse(self):
tree = self.parseStatement()
if not self.sc.atEnd():
raise Exception('Unexpected input: %s' % self.sc.terminal)
return tree
def parseStatement(self):
if self.sc.matches('{'):
tree = Tree(self.sc.terminal)
while not self.sc.matches('}'):
tree.add(self.parseStatement())
return tree
if self.sc.matches('WHILE'):
return Tree(self.sc.terminal, self.parseExp(), self.parseStatement())
if self.sc.matches('BREAK'):
tree = Tree(self.sc.terminal)
self.sc.expect(';')
return tree
if self.sc.matches('IF'):
tree = Tree(self.sc.terminal, self.parseExp(), self.parseStatement())
if self.sc.matches('ELSE'):
tree.add(self.parseStatement())
return tree
if self.sc.matches('ID'):
id = self.sc.terminal
if self.sc.matches('='):
tree = Tree(self.sc.terminal, Tree(id), self.parseExp())
self.sc.expect(';')
return tree
self.sc.expect(';')
def parseExp(self):
return self.parseHead(0)
def parseHead(self, index):
result = self.parseTail(index)
while self.sc.matches(*self.prec[index]):
result = Tree(self.sc.terminal, result, self.parseTail(index))
return result
def parseTail(self, index):
if index >= len(self.prec)-1:
return self.parsePrim()
return self.parseHead(index + 1)
def parsePrim(self):
if self.sc.matches('('):
tree = self.parseExp()
self.sc.expect(')')
return tree
if self.sc.matches('-'):
return Tree(self.sc.terminal, self.parsePrim())
return Tree(self.sc.expect('INT', 'ID'))
if __name__ == '__main__':
patterns = []
patterns.append(Pattern('INT', r'[0-9]+'))
patterns.append(Pattern('IF', r'if'))
patterns.append(Pattern('ELSE', r'else'))
patterns.append(Pattern('WHILE', r'while'))
patterns.append(Pattern('BREAK', r'break'))
patterns.append(Pattern('ID', r'[a-zA-Z][a-zA-Z0-9_]*'))
patterns.append(Pattern(';', r'\;'))
patterns.append(Pattern('{', r'\{'))
patterns.append(Pattern('}', r'\}'))
patterns.append(Pattern('[', r'\['))
patterns.append(Pattern(']', r'\]'))
patterns.append(Pattern('(', r'\('))
patterns.append(Pattern(')', r'\)'))
patterns.append(Pattern('+', r'\+'))
patterns.append(Pattern('-', r'\-'))
patterns.append(Pattern('*', r'\*'))
patterns.append(Pattern('/', r'\/'))
patterns.append(Pattern('<=', r'\<\='))
patterns.append(Pattern('>=', r'\>\='))
patterns.append(Pattern('==', r'\=\='))
patterns.append(Pattern('!=', r'\!\='))
patterns.append(Pattern('&&', r'\&\&'))
patterns.append(Pattern('||', r'\|\|'))
patterns.append(Pattern('=', r'\='))
patterns.append(Pattern('<', r'\<'))
patterns.append(Pattern('>', r'\>'))
patterns.append(Pattern('%', r'\%'))
input = '''
{
i = 0;
while i<10 {
a = 2*3;
if i % 1 == 0 {
a = a + 1;
} else {
a = a + 2;
}
i = i+1;
}
}
'''
p = Parser(Scanner(input, patterns))
dot = p.parse().toDot()
print(dot)
| |
import copy
import random
from six import iteritems
try:
long()
except Exception:
long = int
from .JediTaskSpec import JediTaskSpec
from . import JediCoreUtils
from pandacommon.pandalogger.PandaLogger import PandaLogger
logger = PandaLogger().getLogger(__name__.split('.')[-1])
# class for input
class InputChunk:
# default output size 2G + 500MB (safety merging)
defaultOutputSize = 2500 * 1024 * 1024
def __str__(self):
sb = []
for key in self.__dict__:
sb.append("{key}='{value}'".format(key=key, value=self.__dict__[key]))
return ', '.join(sb)
def __repr__(self):
return self.__str__()
# constructor
def __init__(self,taskSpec,masterDataset=None,secondaryDatasetList=[], ramCount=0):
# task spec
self.taskSpec = taskSpec
# the list of secondary datasets
if secondaryDatasetList is None:
self.secondaryDatasetList = []
else:
self.secondaryDatasetList = secondaryDatasetList
# the list of site candidates
self.siteCandidates = {}
# the list of site candidates for jumbo jobs
self.siteCandidatesJumbo = {}
# the name of master index
self.masterIndexName = None
# dataset mapping including indexes of files/events
self.datasetMap = {}
# the master dataset
self.masterDataset = None
self.addMasterDS(masterDataset)
# the list of secondary datasets
self.secondaryDatasetList = []
for secondaryDS in secondaryDatasetList:
self.addSecondaryDS(secondaryDS)
# read in a block
self.readBlock = None
# merging
self.isMerging = False
# use scout
self.useScoutFlag = None
# memory requirements for the inputChunk
self.ramCount = ramCount
# flag to set if inputchunk is empty
self.isEmpty = False
# flag to use jumbo jobs
self.useJumbo = None
# checkpoint of used counters
self.file_checkpoints = {}
# list of bootstrapped sites
self.bootstrapped = set()
# add master dataset
def addMasterDS(self,masterDataset):
if masterDataset is not None:
self.masterDataset = masterDataset
self.masterIndexName = self.masterDataset.datasetID
self.datasetMap[self.masterDataset.datasetID] = {'used':0,'datasetSpec':masterDataset}
# add secondary dataset
def addSecondaryDS(self,secondaryDataset):
if secondaryDataset not in self.secondaryDatasetList:
self.secondaryDatasetList.append(secondaryDataset)
self.datasetMap[secondaryDataset.datasetID] = {'used':0,'datasetSpec':secondaryDataset}
# return list of datasets
def getDatasets(self,includePseudo=False):
dataList = []
if self.masterDataset is not None:
dataList.append(self.masterDataset)
dataList += self.secondaryDatasetList
# ignore pseudo datasets
if not includePseudo:
newDataList = []
for datasetSpec in dataList:
if not datasetSpec.isPseudo():
newDataList.append(datasetSpec)
dataList = newDataList
return dataList
# return dataset with datasetID
def getDatasetWithID(self,datasetID):
if datasetID in self.datasetMap:
return self.datasetMap[datasetID]['datasetSpec']
return None
# return dataset with datasetName
def getDatasetWithName(self,datasetName):
for tmpDatasetID,tmpDatasetVal in iteritems(self.datasetMap):
if tmpDatasetVal['datasetSpec'].datasetName == datasetName:
return tmpDatasetVal['datasetSpec']
return None
# reset used counters
def resetUsedCounters(self):
for tmpKey,tmpVal in iteritems(self.datasetMap):
tmpVal['used'] = 0
# checkpoint file usage
def checkpoint_file_usage(self):
for tmpKey, tmpVal in iteritems(self.datasetMap):
self.file_checkpoints[tmpKey] = tmpVal['used']
# rollback file usage
def rollback_file_usage(self):
for tmpKey, tmpVal in iteritems(self.datasetMap):
tmpVal['used'] = self.file_checkpoints[tmpKey]
# add site candidates
def addSiteCandidate(self,siteCandidateSpec):
self.siteCandidates[siteCandidateSpec.siteName] = siteCandidateSpec
return
# add site candidates
def addSiteCandidateForJumbo(self,siteCandidateSpec):
self.siteCandidatesJumbo[siteCandidateSpec.siteName] = siteCandidateSpec
return
# has candidate for jumbo jobs
def hasCandidatesForJumbo(self):
return len(self.siteCandidatesJumbo) > 0
# get one site candidate randomly
def getOneSiteCandidate(self, nSubChunks=0, ngSites=None, get_msg=False):
retSiteCandidate = None
if ngSites is None:
ngSites = []
ngSites = copy.copy(ngSites)
# skip sites for distributed datasets
"""
for tmpDatasetSpec in self.getDatasets():
if tmpDatasetSpec.isDistributed():
datasetUsage = self.datasetMap[tmpDatasetSpec.datasetID]
if len(tmpDatasetSpec.Files) > datasetUsage['used']:
tmpFileSpec = tmpDatasetSpec.Files[datasetUsage['used']]
for siteCandidate in self.siteCandidates.values():
# skip if the first file is unavailable at the site
if not siteCandidate.isAvailableFile(tmpFileSpec):
ngSites.append(siteCandidate.siteName)
"""
# check if to be bootstrapped
siteCandidateList = list(self.siteCandidates.values())
newSiteCandidateList = []
for siteCandidate in siteCandidateList:
if siteCandidate.weight == 0 and siteCandidate.siteName not in self.bootstrapped:
newSiteCandidateList.append(siteCandidate)
if newSiteCandidateList:
retSiteCandidate = random.choice(newSiteCandidateList)
self.bootstrapped.add(retSiteCandidate.siteName)
retMsg = 'toBootstrapped={}'.format(len(newSiteCandidateList))
else:
# get total weight
totalWeight = 0
nNG = 0
nOK = 0
nBoosted = 0
nFull = 0
fullStr = ''
for siteCandidate in siteCandidateList:
# remove NG sites
if siteCandidate.siteName in ngSites:
nNG += 1
continue
# already bootstrapped
if siteCandidate.weight == 0 and siteCandidate.siteName in self.bootstrapped:
nBoosted += 1
continue
# skip incapable
if not siteCandidate.can_accept_jobs():
nFull += 1
fullStr += '{}:{}/{} '.format(siteCandidate.siteName,
siteCandidate.nQueuedJobs,
siteCandidate.nRunningJobsCap)
continue
totalWeight += siteCandidate.weight
newSiteCandidateList.append(siteCandidate)
nOK += 1
siteCandidateList = newSiteCandidateList
if fullStr:
fullStr = " (skipped {})".format(fullStr[:-1])
retMsg = 'OK={} NG={} bootstrapped={} Full={}{}'.format(nOK, nNG, nBoosted, nFull,
fullStr)
# empty
if not siteCandidateList:
if get_msg:
return None, retMsg
return None
# get random number
rNumber = random.random() * totalWeight
for siteCandidate in siteCandidateList:
rNumber -= siteCandidate.weight
if rNumber <= 0:
retSiteCandidate = siteCandidate
break
# return something as a protection against precision of float
if retSiteCandidate is None:
retSiteCandidate = random.choice(siteCandidateList)
# modify weight
try:
if retSiteCandidate.nQueuedJobs is not None and retSiteCandidate.nAssignedJobs is not None:
oldNumQueued = retSiteCandidate.nQueuedJobs
retSiteCandidate.nQueuedJobs += nSubChunks
newNumQueued = retSiteCandidate.nQueuedJobs
retSiteCandidate.nAssignedJobs += nSubChunks
siteCandidate.weight = siteCandidate.weight * float(oldNumQueued+1) / float(newNumQueued+1)
except Exception:
pass
if get_msg:
return retSiteCandidate, retMsg
return retSiteCandidate
# get sites for parallel execution
def getParallelSites(self,nSites,nSubChunks,usedSites):
newSiteCandidate = self.getOneSiteCandidate(nSubChunks,usedSites)
if newSiteCandidate is not None:
usedSites.append(newSiteCandidate.siteName)
if nSites > len(usedSites):
return self.getParallelSites(nSites,nSubChunks,usedSites)
return ','.join(usedSites)
# get one site for jumbo jobs
def getOneSiteCandidateForJumbo(self,ngSites):
# get total weight
totalWeight = 0
weightList = []
siteCandidateList = list(self.siteCandidatesJumbo.values())
newSiteCandidateList = []
for siteCandidate in siteCandidateList:
# remove NG sites
if siteCandidate.siteName in ngSites:
continue
totalWeight += siteCandidate.weight
newSiteCandidateList.append(siteCandidate)
siteCandidateList = newSiteCandidateList
# empty
if siteCandidateList == []:
return None
# get random number
rNumber = random.random() * totalWeight
for siteCandidate in siteCandidateList:
rNumber -= siteCandidate.weight
if rNumber <= 0:
retSiteCandidate = siteCandidate
break
# return something as a protection against precision of float
if retSiteCandidate is None:
retSiteCandidate = random.choice(siteCandidateList)
return retSiteCandidate
# check if unused files/events remain
def checkUnused(self):
# master is undefined
if self.masterIndexName is None:
return False
indexVal = self.datasetMap[self.masterIndexName]
return indexVal['used'] < len(indexVal['datasetSpec'].Files)
# get master used index
def getMasterUsedIndex(self):
# master is undefined
if self.masterIndexName is None:
return 0
indexVal = self.datasetMap[self.masterIndexName]
return indexVal['used']
# get num of files in master
def getNumFilesInMaster(self):
# master is undefined
if self.masterIndexName is None:
return 0
indexVal = self.datasetMap[self.masterIndexName]
return len(indexVal['datasetSpec'].Files)
# check if secondary datasets use event ratios
def useEventRatioForSec(self):
for datasetSpec in self.secondaryDatasetList:
if datasetSpec.getEventRatio() is not None:
return True
return False
# get maximum size of atomic subchunk
def getMaxAtomSize(self,effectiveSize=False,getNumEvents=False):
# number of files per job if defined
if not self.isMerging:
nFilesPerJob = self.taskSpec.getNumFilesPerJob()
else:
nFilesPerJob = self.taskSpec.getNumFilesPerMergeJob()
nEventsPerJob = None
if nFilesPerJob is None:
# number of events per job
if not self.isMerging:
nEventsPerJob = self.taskSpec.getNumEventsPerJob()
else:
nEventsPerJob = self.taskSpec.getNumEventsPerMergeJob()
if nEventsPerJob is None:
nFilesPerJob = 1
# grouping with boundaryID
useBoundary = self.taskSpec.useGroupWithBoundaryID()
# LB
respectLB = self.taskSpec.respectLumiblock()
maxAtomSize = 0
while True:
if not self.isMerging:
maxNumFiles = self.taskSpec.getMaxNumFilesPerJob()
else:
maxNumFiles = self.taskSpec.getMaxNumFilesPerMergeJob()
# get one subchunk
subChunk = self.getSubChunk(None,nFilesPerJob=nFilesPerJob,
nEventsPerJob=nEventsPerJob,
useBoundary=useBoundary,
respectLB=respectLB,
maxNumFiles=maxNumFiles)
if subChunk is None:
break
# get size
tmpAtomSize = 0
for tmpDatasetSpec,tmpFileSpecList in subChunk:
if (effectiveSize or getNumEvents) and not tmpDatasetSpec.isMaster():
continue
for tmpFileSpec in tmpFileSpecList:
if effectiveSize:
tmpAtomSize += JediCoreUtils.getEffectiveFileSize(tmpFileSpec.fsize,tmpFileSpec.startEvent,
tmpFileSpec.endEvent,tmpFileSpec.nEvents)
elif getNumEvents:
tmpAtomSize += tmpFileSpec.getEffectiveNumEvents()
else:
tmpAtomSize += tmpFileSpec.fsize
if maxAtomSize < tmpAtomSize:
maxAtomSize = tmpAtomSize
# reset counters
self.resetUsedCounters()
# return
return maxAtomSize
# use scout
def useScout(self):
if self.masterDataset is not None and self.useScoutFlag is not None:
return self.useScoutFlag
if self.masterDataset is not None and \
self.masterDataset.nFiles > self.masterDataset.nFilesToBeUsed:
return True
return False
# set use scout
def setUseScout(self,useScoutFlag):
self.useScoutFlag = useScoutFlag
# get preassigned site
def getPreassignedSite(self):
if self.masterDataset is not None:
return self.masterDataset.site
return None
# get max output size
def getOutSize(self,outSizeMap):
values = list(outSizeMap.values())
values.sort()
try:
return values[-1]
except Exception:
return 0
# get subchunk with a selection criteria
def getSubChunk(self,siteName,maxNumFiles=None,maxSize=None,
sizeGradients=0,sizeIntercepts=0,
nFilesPerJob=None,multiplicand=1,
walltimeGradient=0,maxWalltime=0,
nEventsPerJob=None,useBoundary=None,
sizeGradientsPerInSize=None,
maxOutSize=None,
coreCount=1,
respectLB=False,
corePower=None,
dynNumEvents=False,
maxNumEventRanges=None,
multiplicity=None,
splitByFields=None,
tmpLog=None,
useDirectIO=False,
maxDiskSize=None):
# check if there are unused files/events
if not self.checkUnused():
return None
# protection against unreasonable values
if nFilesPerJob == 0:
nFilesPerJob = None
if nEventsPerJob == 0:
nEventsPerJob = None
# set default max number of files
if maxNumFiles is None:
maxNumFiles = 200
# set default max number of event ranges
if maxNumEventRanges is None:
maxNumEventRanges = 20
# set default max size
if maxSize is None and nFilesPerJob is None and nEventsPerJob is None:
# 20 GB at most by default
maxSize = 20 * 1024 * 1024 * 1024
# set default output size
minOutSize = self.defaultOutputSize
# set default max number of events
maxNumEvents = None
# ignore negative walltime gradient
if walltimeGradient is None or walltimeGradient < 0:
walltimeGradient = 0
# overwrite parameters when nFiles/EventsPerJob is used
if nFilesPerJob is not None and not dynNumEvents:
maxNumFiles = nFilesPerJob
if not respectLB:
multiplicand = nFilesPerJob
if nEventsPerJob is not None:
maxNumEvents = nEventsPerJob
# split with boundayID
splitWithBoundaryID = False
if useBoundary is not None:
splitWithBoundaryID = True
if useBoundary['inSplit'] == 2:
# unset max values to split only with boundaryID
maxNumFiles = None
maxSize = None
maxWalltime = 0
maxNumEvents = None
multiplicand = 1
# get site when splitting per site
if siteName is not None:
siteCandidate = self.siteCandidates[siteName]
# use event ratios
useEventRatio = self.useEventRatioForSec()
# start splitting
inputNumFiles = 0
inputNumEvents = 0
fileSize = 0
firstLoop = True
firstMaster = True
inputFileMap = {}
expWalltime = 0
nextStartEvent = None
boundaryID = None
newBoundaryID = False
eventJump = False
nSecFilesMap = {}
nSecEventsMap = {}
numMaster = 0
outSizeMap = {}
lumiBlockNr = None
newLumiBlockNr = False
siteAvailable = True
inputFileSet = set()
fieldStr = None
diskSize = 0
while (maxNumFiles is None or (not dynNumEvents and inputNumFiles <= maxNumFiles) or \
(dynNumEvents and len(inputFileSet) <= maxNumFiles and inputNumFiles <= maxNumEventRanges)) \
and (maxSize is None or (maxSize is not None and fileSize <= maxSize)) \
and (maxWalltime is None or maxWalltime <= 0 or expWalltime <= maxWalltime) \
and (maxNumEvents is None or (maxNumEvents is not None and inputNumEvents <= maxNumEvents)) \
and (maxOutSize is None or self.getOutSize(outSizeMap) <= maxOutSize) \
and (maxDiskSize is None or diskSize <= maxDiskSize):
# get one file (or one file group for MP) from master
datasetUsage = self.datasetMap[self.masterDataset.datasetID]
if self.masterDataset.datasetID not in outSizeMap:
outSizeMap[self.masterDataset.datasetID] = 0
boundaryIDs = set()
primaryHasEvents = False
for tmpFileSpec in self.masterDataset.Files[datasetUsage['used']:datasetUsage['used']+multiplicand]:
# check start event to keep continuity
if (maxNumEvents is not None or dynNumEvents) and tmpFileSpec.startEvent is not None:
if nextStartEvent is not None and nextStartEvent != tmpFileSpec.startEvent:
eventJump = True
break
# check boundaryID
if splitWithBoundaryID and boundaryID is not None and boundaryID != tmpFileSpec.boundaryID \
and useBoundary['inSplit'] != 3:
newBoundaryID = True
break
# check LB
if respectLB and lumiBlockNr is not None and lumiBlockNr != tmpFileSpec.lumiBlockNr:
newLumiBlockNr = True
break
# check field
if splitByFields is not None:
tmpFieldStr = tmpFileSpec.extractFieldsStr(splitByFields)
if fieldStr is None:
fieldStr = tmpFieldStr
elif tmpFieldStr != fieldStr:
newBoundaryID = True
break
# check for distributed datasets
#if self.masterDataset.isDistributed() and siteName is not None and \
# not siteCandidate.isAvailableFile(tmpFileSpec):
# siteAvailable = False
# break
if self.masterDataset.datasetID not in inputFileMap:
inputFileMap[self.masterDataset.datasetID] = []
inputFileMap[self.masterDataset.datasetID].append(tmpFileSpec)
inputFileSet.add(tmpFileSpec.lfn)
datasetUsage['used'] += 1
numMaster += 1
# get effective file size
effectiveFsize = JediCoreUtils.getEffectiveFileSize(tmpFileSpec.fsize,tmpFileSpec.startEvent,
tmpFileSpec.endEvent,tmpFileSpec.nEvents)
# get num of events
effectiveNumEvents = tmpFileSpec.getEffectiveNumEvents()
# sum
inputNumFiles += 1
if self.taskSpec.outputScaleWithEvents():
tmpOutSize = long(sizeGradients * effectiveNumEvents)
fileSize += tmpOutSize
diskSize += tmpOutSize
if not dynNumEvents or tmpFileSpec.lfn not in inputFileSet:
fileSize += long(tmpFileSpec.fsize)
if not useDirectIO:
diskSize += long(tmpFileSpec.fsize)
outSizeMap[self.masterDataset.datasetID] += long(sizeGradients * effectiveNumEvents)
else:
tmpOutSize = long(sizeGradients * effectiveFsize)
fileSize += tmpOutSize
diskSize += tmpOutSize
if not dynNumEvents or tmpFileSpec.lfn not in inputFileSet:
fileSize += long(tmpFileSpec.fsize)
if not useDirectIO:
diskSize += long(tmpFileSpec.fsize)
outSizeMap[self.masterDataset.datasetID] += long(sizeGradients * effectiveFsize)
if sizeGradientsPerInSize is not None:
tmpOutSize = long(effectiveFsize * sizeGradientsPerInSize)
fileSize += tmpOutSize
diskSize += tmpOutSize
outSizeMap[self.masterDataset.datasetID] += long(effectiveFsize * sizeGradientsPerInSize)
# sum offset only for the first master
if firstMaster:
fileSize += sizeIntercepts
# walltime
if self.taskSpec.useHS06():
if firstMaster:
expWalltime += self.taskSpec.baseWalltime
tmpExpWalltime = walltimeGradient * effectiveNumEvents / float(coreCount)
if corePower not in [None,0]:
tmpExpWalltime /= corePower
if self.taskSpec.cpuEfficiency == 0:
tmpExpWalltime = 0
else:
tmpExpWalltime /= float(self.taskSpec.cpuEfficiency)/100.0
if multiplicity is not None:
tmpExpWalltime /= float(multiplicity)
expWalltime += long(tmpExpWalltime)
else:
tmpExpWalltime = walltimeGradient * effectiveFsize / float(coreCount)
if multiplicity is not None:
tmpExpWalltime /= float(multiplicity)
expWalltime += long(tmpExpWalltime)
# the number of events
if (maxNumEvents is not None or useEventRatio) and tmpFileSpec.startEvent is not None and tmpFileSpec.endEvent is not None:
primaryHasEvents = True
inputNumEvents += (tmpFileSpec.endEvent - tmpFileSpec.startEvent + 1)
# set next start event
nextStartEvent = tmpFileSpec.endEvent + 1
if nextStartEvent == tmpFileSpec.nEvents:
nextStartEvent = 0
# boundaryID
if splitWithBoundaryID:
boundaryID = tmpFileSpec.boundaryID
if boundaryID not in boundaryIDs:
boundaryIDs.add(boundaryID)
# LB
if respectLB:
lumiBlockNr = tmpFileSpec.lumiBlockNr
firstMaster = False
# get files from secondaries
firstSecondary = True
for datasetSpec in self.secondaryDatasetList:
if datasetSpec.datasetID not in outSizeMap:
outSizeMap[datasetSpec.datasetID] = 0
if datasetSpec.isNoSplit():
# every job uses dataset without splitting
if firstLoop:
datasetUsage = self.datasetMap[datasetSpec.datasetID]
for tmpFileSpec in datasetSpec.Files:
if datasetSpec.datasetID not in inputFileMap:
inputFileMap[datasetSpec.datasetID] = []
inputFileMap[datasetSpec.datasetID].append(tmpFileSpec)
# sum
fileSize += tmpFileSpec.fsize
if not useDirectIO:
diskSize += tmpFileSpec.fsize
if sizeGradientsPerInSize is not None:
tmpOutSize = (tmpFileSpec.fsize * sizeGradientsPerInSize)
fileSize += tmpOutSize
diskSize += tmpOutSize
outSizeMap[datasetSpec.datasetID] += (tmpFileSpec.fsize * sizeGradientsPerInSize)
datasetUsage['used'] += 1
else:
if datasetSpec.datasetID not in nSecFilesMap:
nSecFilesMap[datasetSpec.datasetID] = 0
# get number of files to be used for the secondary
nSecondary = datasetSpec.getNumFilesPerJob()
if nSecondary is not None and firstLoop is False:
# read files only in the first bunch when number of files per job is specified
continue
if nSecondary is None:
nSecondary = datasetSpec.getNumMultByRatio(numMaster) - nSecFilesMap[datasetSpec.datasetID]
if (datasetSpec.getEventRatio() is not None and inputNumEvents > 0) or (splitWithBoundaryID and useBoundary['inSplit'] != 3):
# set large number to get all associated secondary files
nSecondary = 10000
datasetUsage = self.datasetMap[datasetSpec.datasetID]
# reset nUsed
if datasetSpec.isReusable() and datasetUsage['used']+nSecondary > len(datasetSpec.Files):
datasetUsage['used'] = 0
for tmpFileSpec in datasetSpec.Files[datasetUsage['used']:datasetUsage['used']+nSecondary]:
# check boundaryID
if (splitWithBoundaryID or (useBoundary is not None and useBoundary['inSplit'] == 3 and datasetSpec.getRatioToMaster() > 1)) \
and boundaryID is not None and \
not (boundaryID == tmpFileSpec.boundaryID or tmpFileSpec.boundaryID in boundaryIDs):
break
# check for distributed datasets
#if datasetSpec.isDistributed() and siteName is not None and \
# not siteCandidate.isAvailableFile(tmpFileSpec):
# break
# check ratio
if datasetSpec.datasetID not in nSecEventsMap:
nSecEventsMap[datasetSpec.datasetID] = 0
if datasetSpec.getEventRatio() is not None and inputNumEvents > 0:
if float(nSecEventsMap[datasetSpec.datasetID]) / float(inputNumEvents) >= datasetSpec.getEventRatio():
break
if datasetSpec.datasetID not in inputFileMap:
inputFileMap[datasetSpec.datasetID] = []
inputFileMap[datasetSpec.datasetID].append(tmpFileSpec)
# sum
fileSize += tmpFileSpec.fsize
if not useDirectIO:
diskSize += tmpFileSpec.fsize
if sizeGradientsPerInSize is not None:
tmpOutSize = (tmpFileSpec.fsize * sizeGradientsPerInSize)
fileSize += tmpOutSize
diskSize += tmpOutSize
outSizeMap[datasetSpec.datasetID] += (tmpFileSpec.fsize * sizeGradientsPerInSize)
datasetUsage['used'] += 1
nSecFilesMap[datasetSpec.datasetID] += 1
# the number of events
if firstSecondary and maxNumEvents is not None and not primaryHasEvents:
if tmpFileSpec.startEvent is not None and tmpFileSpec.endEvent is not None:
inputNumEvents += (tmpFileSpec.endEvent - tmpFileSpec.startEvent + 1)
elif tmpFileSpec.nEvents is not None:
inputNumEvents += tmpFileSpec.nEvents
if tmpFileSpec.nEvents is not None:
nSecEventsMap[datasetSpec.datasetID] += tmpFileSpec.nEvents
# use only the first secondary
firstSecondary = False
# unset first loop flag
firstLoop = False
# check if there are unused files/evets
if not self.checkUnused():
break
# break if nFilesPerJob is used as multiplicand
if nFilesPerJob is not None and not respectLB:
break
# boundayID is changed
if newBoundaryID:
break
# LB is changed
if newLumiBlockNr:
break
# event jump
if eventJump:
break
# distributed files are unavailable
if not siteAvailable:
break
primaryHasEvents = False
# check master in the next loop
datasetUsage = self.datasetMap[self.masterDataset.datasetID]
newInputNumFiles = inputNumFiles
newInputNumEvents = inputNumEvents
newFileSize = fileSize
newExpWalltime = expWalltime
newNextStartEvent = nextStartEvent
newNumMaster = numMaster
terminateFlag = False
newOutSizeMap = copy.copy(outSizeMap)
newBoundaryIDs = set()
newInputFileSet = copy.copy(inputFileSet)
newDiskSize = diskSize
if self.masterDataset.datasetID not in newOutSizeMap:
newOutSizeMap[self.masterDataset.datasetID] = 0
for tmpFileSpec in self.masterDataset.Files[datasetUsage['used']:datasetUsage['used']+multiplicand]:
# check continuity of event
if maxNumEvents is not None and tmpFileSpec.startEvent is not None and tmpFileSpec.endEvent is not None:
primaryHasEvents = True
newInputNumEvents += (tmpFileSpec.endEvent - tmpFileSpec.startEvent + 1)
# continuity of event is broken
if newNextStartEvent is not None and newNextStartEvent != tmpFileSpec.startEvent:
# no files in the next loop
if newInputNumFiles == 0:
terminateFlag = True
break
newNextStartEvent = tmpFileSpec.endEvent + 1
# check boundary
if splitWithBoundaryID and boundaryID is not None and boundaryID != tmpFileSpec.boundaryID \
and useBoundary['inSplit'] != 3:
# no files in the next loop
if newInputNumFiles == 0:
terminateFlag = True
break
# check LB
if respectLB and lumiBlockNr is not None and lumiBlockNr != tmpFileSpec.lumiBlockNr:
# no files in the next loop
if newInputNumFiles == 0:
terminateFlag = True
break
# check field
if splitByFields is not None:
tmpFieldStr = tmpFileSpec.extractFieldsStr(splitByFields)
if tmpFieldStr != fieldStr:
# no files in the next loop
if newInputNumFiles == 0:
terminateFlag = True
break
# check for distributed datasets
#if self.masterDataset.isDistributed() and siteName is not None and \
# not siteCandidate.isAvailableFile(tmpFileSpec):
# # no files in the next loop
# if newInputNumFiles == 0:
# terminateFlag = True
# break
# get effective file size
effectiveFsize = JediCoreUtils.getEffectiveFileSize(tmpFileSpec.fsize,tmpFileSpec.startEvent,
tmpFileSpec.endEvent,tmpFileSpec.nEvents)
# get num of events
effectiveNumEvents = tmpFileSpec.getEffectiveNumEvents()
newInputNumFiles += 1
newNumMaster += 1
newInputFileSet.add(tmpFileSpec.lfn)
if self.taskSpec.outputScaleWithEvents():
tmpOutSize = long(sizeGradients * effectiveNumEvents)
newFileSize += tmpOutSize
newDiskSize += tmpOutSize
if not dynNumEvents or tmpFileSpec.lfn not in inputFileSet:
newFileSize += long(tmpFileSpec.fsize)
if not useDirectIO:
newDiskSize += long(tmpFileSpec.fsize)
newOutSizeMap[self.masterDataset.datasetID] += long(sizeGradients * effectiveNumEvents)
else:
tmpOutSize = long(sizeGradients * effectiveFsize)
newFileSize += tmpOutSize
newDiskSize += tmpOutSize
if not dynNumEvents or tmpFileSpec.lfn not in inputFileSet:
newFileSize += long(tmpFileSpec.fsize)
if not useDirectIO:
newDiskSize += long(tmpFileSpec.fsize)
newOutSizeMap[self.masterDataset.datasetID] += long(sizeGradients * effectiveFsize)
if sizeGradientsPerInSize is not None:
tmpOutSize = long(effectiveFsize * sizeGradientsPerInSize)
newFileSize += tmpOutSize
newDiskSize += tmpOutSize
newOutSizeMap[self.masterDataset.datasetID] += long(effectiveFsize * sizeGradientsPerInSize)
if self.taskSpec.useHS06():
tmpExpWalltime = walltimeGradient * effectiveNumEvents / float(coreCount)
if corePower not in [None,0]:
tmpExpWalltime /= corePower
if self.taskSpec.cpuEfficiency == 0:
tmpExpWalltime = 0
else:
tmpExpWalltime /= float(self.taskSpec.cpuEfficiency)/100.0
if multiplicity is not None:
tmpExpWalltime /= float(multiplicity)
newExpWalltime += long(tmpExpWalltime)
else:
tmpExpWalltime = walltimeGradient * effectiveFsize / float(coreCount)
if multiplicity is not None:
tmpExpWalltime /= float(multiplicity)
newExpWalltime += long(tmpExpWalltime)
# boundaryID
if splitWithBoundaryID:
newBoundaryIDs.add(tmpFileSpec.boundaryID)
# check secondaries
firstSecondary = True
for datasetSpec in self.secondaryDatasetList:
if datasetSpec.datasetID not in newOutSizeMap:
newOutSizeMap[datasetSpec.datasetID] = 0
if not datasetSpec.isNoSplit() and datasetSpec.getNumFilesPerJob() is None:
# check boundaryID
if splitWithBoundaryID and boundaryID is not None and boundaryID != tmpFileSpec.boundaryID \
and useBoundary['inSplit'] != 3:
break
newNumSecondary = datasetSpec.getNumMultByRatio(newNumMaster) - nSecFilesMap[datasetSpec.datasetID]
datasetUsage = self.datasetMap[datasetSpec.datasetID]
for tmpFileSpec in datasetSpec.Files[datasetUsage['used']:datasetUsage['used']+nSecondary]:
# check boundaryID
if splitWithBoundaryID and boundaryID is not None and boundaryID != tmpFileSpec.boundaryID \
and tmpFileSpec.boundaryID not in boundaryIDs and tmpFileSpec.boundaryID not in newBoundaryIDs:
break
newFileSize += tmpFileSpec.fsize
if not useDirectIO:
newDiskSize += tmpFileSpec.fsize
if sizeGradientsPerInSize is not None:
tmpOutSize = (tmpFileSpec.fsize * sizeGradientsPerInSize)
newFileSize += tmpOutSize
newDiskSize += tmpOutSize
newOutSizeMap[datasetSpec.datasetID] += (tmpFileSpec.fsize * sizeGradientsPerInSize)
# the number of events
if firstSecondary and maxNumEvents is not None and not primaryHasEvents:
if tmpFileSpec.startEvent is not None and tmpFileSpec.endEvent is not None:
newInputNumEvents += (tmpFileSpec.endEvent - tmpFileSpec.startEvent + 1)
elif tmpFileSpec.nEvents is not None:
newInputNumEvents += tmpFileSpec.nEvents
firstSecondary = False
# termination
if terminateFlag:
break
# check
newOutSize = self.getOutSize(newOutSizeMap)
if (maxNumFiles is not None and ((not dynNumEvents and newInputNumFiles > maxNumFiles) \
or (dynNumEvents and (len(newInputFileSet) > maxNumFiles or newInputNumFiles > maxNumEventRanges)))) \
or (maxSize is not None and newFileSize > maxSize) \
or (maxSize is not None and newOutSize < minOutSize and maxSize-minOutSize < newFileSize-newOutSize) \
or (maxWalltime > 0 and newExpWalltime > maxWalltime) \
or (maxNumEvents is not None and newInputNumEvents > maxNumEvents) \
or (maxOutSize is not None and self.getOutSize(newOutSizeMap) > maxOutSize) \
or (maxDiskSize is not None and newDiskSize > maxDiskSize):
break
# reset nUsed for repeated datasets
for tmpDatasetID,datasetUsage in iteritems(self.datasetMap):
tmpDatasetSpec = datasetUsage['datasetSpec']
if tmpDatasetSpec.isRepeated():
if len(tmpDatasetSpec.Files) > 0:
datasetUsage['used'] %= len(tmpDatasetSpec.Files)
# make copy to return
returnList = []
for tmpDatasetID,inputFileList in iteritems(inputFileMap):
tmpRetList = []
for tmpFileSpec in inputFileList:
# split par site or get atomic subchunk
if siteName is not None:
# make copy to individually set locality
newFileSpec = copy.copy(tmpFileSpec)
# set locality
newFileSpec.locality = siteCandidate.getFileLocality(tmpFileSpec)
if newFileSpec.locality == 'remote':
newFileSpec.sourceName = siteCandidate.remoteSource
# append
tmpRetList.append(newFileSpec)
else:
# getting atomic subchunk
tmpRetList.append(tmpFileSpec)
# add to return map
tmpDatasetSpec = self.getDatasetWithID(tmpDatasetID)
returnList.append((tmpDatasetSpec,tmpRetList))
# return
return returnList
# check if master is mutable
def isMutableMaster(self):
if self.masterDataset is not None and self.masterDataset.state == 'mutable':
return True
return False
# figure out if output will go through express stream
def isExpress(self):
if self.taskSpec.processingType == 'urgent' or self.taskSpec.currentPriority > 1000:
return True
return False
# skip unavailable files in distributed datasets
def skipUnavailableFiles(self):
# skip files if no candiate have them
nSkip = 0
for tmpDatasetSpec in self.getDatasets():
if tmpDatasetSpec.isDistributed():
datasetUsage = self.datasetMap[tmpDatasetSpec.datasetID]
while len(tmpDatasetSpec.Files) > datasetUsage['used']:
tmpFileSpec = tmpDatasetSpec.Files[datasetUsage['used']]
isOK = False
for siteCandidate in self.siteCandidates.values():
if siteCandidate.isAvailableFile(tmpFileSpec):
isOK = True
break
if isOK:
break
# skip and check the next
datasetUsage['used'] += 1
nSkip += 1
return nSkip
# get max ramCount
def getMaxRamCount(self):
if self.isMerging:
return max(self.taskSpec.mergeRamCount, self.ramCount) if self.taskSpec.mergeRamCount else self.ramCount
else:
return max(self.taskSpec.ramCount, self.ramCount) if self.taskSpec.ramCount else self.ramCount
# get site candidate
def getSiteCandidate(self, name):
if name in self.siteCandidates:
return self.siteCandidates[name]
return None
# get list of candidate names
def get_candidate_names(self):
return list(self.siteCandidates.keys())
# update number of queued jobs
def update_n_queue(self, live_counter):
sites = []
for siteCandidate in self.siteCandidates.values():
if live_counter is not None:
n = live_counter.get(siteCandidate.siteName)
if n > 0:
siteCandidate.nQueuedJobs += n
sites.append(siteCandidate.siteName)
return ','.join(sites)
| |
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
from openmdao.api import Problem
from wakeexchange.OptimizationGroups import OptAEP
from wakeexchange.gauss import gauss_wrapper, add_gauss_params_IndepVarComps
from wakeexchange.floris import floris_wrapper, add_floris_params_IndepVarComps
if __name__ == "__main__":
nTurbines = 2
nDirections = 1
rotorDiameter = 126.4
rotorArea = np.pi*rotorDiameter*rotorDiameter/4.0
axialInduction = 1.0/3.0
CP = 0.7737/0.944 * 4.0 * 1.0/3.0 * np.power((1 - 1.0/3.0), 2)
# CP =0.768 * 4.0 * 1.0/3.0 * np.power((1 - 1.0/3.0), 2)
CT = 4.0*axialInduction*(1.0-axialInduction)
generator_efficiency = 0.944
# Define turbine characteristics
axialInduction = np.array([axialInduction, axialInduction])
rotorDiameter = np.array([rotorDiameter, rotorDiameter])
generatorEfficiency = np.array([generator_efficiency, generator_efficiency])
yaw = np.array([0., 0.])
# Define site measurements
wind_direction = 270.-0.523599*180./np.pi
wind_speed = 8. # m/s
air_density = 1.1716
Ct = np.array([CT, CT])
Cp = np.array([CP, CP])
gauss_prob = Problem(root=OptAEP(nTurbines=nTurbines, nDirections=nDirections, use_rotor_components=False,
wake_model=gauss_wrapper, wake_model_options={'nSamples': 0}, datasize=0,
params_IdepVar_func=add_gauss_params_IndepVarComps,
params_IndepVar_args={}))
floris_prob = Problem(root=OptAEP(nTurbines=nTurbines, nDirections=nDirections, use_rotor_components=False,
wake_model=floris_wrapper, wake_model_options=None, datasize=0,
params_IdepVar_func=add_floris_params_IndepVarComps,
params_IndepVar_args={}))
probs = [gauss_prob, floris_prob]
for prob in probs:
prob.setup()
turbineX = np.array([1118.1, 1881.9])
turbineY = np.array([1279.5, 1720.5])
prob['turbineX'] = turbineX
prob['turbineY'] = turbineY
prob['rotorDiameter'] = rotorDiameter
prob['axialInduction'] = axialInduction
prob['generatorEfficiency'] = generatorEfficiency
prob['air_density'] = air_density
prob['Cp_in'] = Cp
prob['Ct_in'] = Ct
prob['windSpeeds'] = np.array([wind_speed])
prob['windDirections'] = np.array([wind_direction])
# gauss_prob['model_params:ke'] = 0.052
# gauss_prob['model_params:spread_angle'] = 6.
# gauss_prob['model_params:rotation_offset_angle'] = 2.0
# for axialInd calc only
# gauss_prob['model_params:ke'] = 0.050688
# gauss_prob['model_params:spread_angle'] = 7.562716
# gauss_prob['model_params:rotation_offset_angle'] = 3.336568
# for axialInd and inflow adjust
# gauss_prob['model_params:ke'] = 0.052333
# gauss_prob['model_params:spread_angle'] = 8.111330
# gauss_prob['model_params:rotation_offset_angle'] = 2.770265
# for inflow adjust only
# gauss_prob['model_params:ke'] = 0.052230
# gauss_prob['model_params:spread_angle'] = 6.368191
# gauss_prob['model_params:rotation_offset_angle'] = 1.855112
# for added n_st_dev param #1
# gauss_prob['model_params:ke'] = 0.050755
# gauss_prob['model_params:spread_angle'] = 11.205766#*0.97
# gauss_prob['model_params:rotation_offset_angle'] = 3.651790
# gauss_prob['model_params:n_std_dev'] = 9.304371
# for added n_st_dev param #2
# gauss_prob['model_params:ke'] = 0.051010
# gauss_prob['model_params:spread_angle'] = 11.779591
# gauss_prob['model_params:rotation_offset_angle'] = 3.564547
# gauss_prob['model_params:n_std_dev'] = 9.575505
# for decoupled ky with n_std_dev = 4
# gauss_prob['model_params:ke'] = 0.051145
# gauss_prob['model_params:spread_angle'] = 2.617982
# gauss_prob['model_params:rotation_offset_angle'] = 3.616082
# gauss_prob['model_params:ky'] = 0.211496
# for integrating for decoupled ky with n_std_dev = 4, linear, integrating
# gauss_prob['model_params:ke'] = 0.016969
# gauss_prob['model_params:spread_angle'] = 0.655430
# gauss_prob['model_params:rotation_offset_angle'] = 3.615754
# gauss_prob['model_params:ky'] = 0.195392
# for integrating for decoupled ky with n_std_dev = 4, linear, integrating
# gauss_prob['model_params:ke'] = 0.008858
# gauss_prob['model_params:spread_angle'] = 0.000000
# gauss_prob['model_params:rotation_offset_angle'] = 4.035276
# gauss_prob['model_params:ky'] = 0.199385
# for decoupled ke with n_std_dev=4, linear, not integrating
# gauss_prob['model_params:ke'] = 0.051190
# gauss_prob['model_params:spread_angle'] = 2.619202
# gauss_prob['model_params:rotation_offset_angle'] = 3.629337
# gauss_prob['model_params:ky'] = 0.211567
# for decoupled ky with n_std_dev = 4, error = 1332.49, not integrating, power law
gauss_prob['model_params:ke'] = 0.051360
gauss_prob['model_params:rotation_offset_angle'] = 3.197348
gauss_prob['model_params:Dw0'] = 1.804024
gauss_prob['model_params:m'] = 0.0
# for decoupled ky with n_std_dev = 4, error = 1630.8, with integrating, power law
# gauss_prob['model_params:ke'] = 0.033165
# gauss_prob['model_params:rotation_offset_angle'] = 3.328051
# gauss_prob['model_params:Dw0'] = 1.708328
# gauss_prob['model_params:m'] = 0.0
# for decoupled ky with n_std_dev = 4, error = 1140.59, not integrating, power law for expansion,
# linear for yaw
# gauss_prob['model_params:ke'] = 0.050741
# gauss_prob['model_params:rotation_offset_angle'] = 3.628737
# gauss_prob['model_params:Dw0'] = 0.846582
# gauss_prob['model_params:ky'] = 0.207734
# for decoupled ky with n_std_dev = 4, error = 1058.73, integrating, power law for expansion,
# linear for yaw
# gauss_prob['model_params:ke'] = 0.016129
# gauss_prob['model_params:rotation_offset_angle'] = 3.644356
# gauss_prob['model_params:Dw0'] = 0.602132
# gauss_prob['model_params:ky'] = 0.191178
gauss_prob['model_params:integrate'] = False
gauss_prob['model_params:spread_mode'] = 'power'
gauss_prob['model_params:n_std_dev'] = 4
ICOWESdata = loadmat('../data/YawPosResults.mat')
yawrange = ICOWESdata['yaw'][0]
GaussianPower = list()
FlorisPower = list()
import time
t1 = time.time()
for i in range(0, 100):
gauss_prob.run()
t2 = time.time()
for i in range(0, 100):
floris_prob.run()
t3 = time.time()
# gauss time: 0.0580031871796
# floris time: 0.10697388649
print 'gauss time: ', t2-t1
print 'floris time: ', t3-t2
# quit()
for yaw1 in yawrange:
for prob in probs:
prob['yaw0'] = np.array([yaw1, 0.0])
prob.run()
GaussianPower.append(list(gauss_prob['wtPower0']))
FlorisPower.append(list(floris_prob['wtPower0']))
GaussianPower = np.array(GaussianPower)
FlorisPower = np.array(FlorisPower)
# print FlorisPower
SOWFApower = np.array([ICOWESdata['yawPowerT1'][0], ICOWESdata['yawPowerT2'][0]]).transpose()/1000.
fig, axes = plt.subplots(ncols=2, nrows=2, sharey=False)
power_scalar = 1E-3
axes[0, 0].plot(yawrange.transpose(), FlorisPower[:, 0]*power_scalar, 'b', yawrange.transpose(), SOWFApower[:, 0]*power_scalar, 'o', mec='b', mfc='none')
axes[0, 0].plot(yawrange.transpose(), FlorisPower[:, 1]*power_scalar, 'b', yawrange.transpose(), SOWFApower[:, 1]*power_scalar, '^', mec='b', mfc='none')
axes[0, 0].plot(yawrange.transpose(), FlorisPower[:, 0]*power_scalar+FlorisPower[:, 1]*power_scalar, '-k', yawrange.transpose(), SOWFApower[:, 0]*power_scalar
+ SOWFApower[:, 1]*power_scalar, 'ko')
axes[0, 0].plot(yawrange.transpose(), GaussianPower[:, 0]*power_scalar, '--r')
axes[0, 0].plot(yawrange.transpose(), GaussianPower[:, 1]*power_scalar, '--r')
axes[0, 0].plot(yawrange.transpose(), GaussianPower[:, 0]*power_scalar+GaussianPower[:, 1]*power_scalar, '--k')
axes[0, 0].set_xlabel('yaw angle (deg.)')
axes[0, 0].set_ylabel('Power (MW)')
# error_turbine2 = np.sum(np.abs(FLORISpower[:, 1] - SOWFApower[:, 1]))
posrange = ICOWESdata['pos'][0]
for prob in probs:
prob['yaw0'] = np.array([0.0, 0.0])
GaussianPower = list()
FlorisPower = list()
for pos2 in posrange:
# Define turbine locations and orientation
effUdXY = 0.523599
Xinit = np.array([1118.1, 1881.9])
Yinit = np.array([1279.5, 1720.5])
XY = np.array([Xinit, Yinit]) + np.dot(np.array([[np.cos(effUdXY), -np.sin(effUdXY)],
[np.sin(effUdXY), np.cos(effUdXY)]]),
np.array([[0., 0], [0, pos2]]))
for prob in probs:
prob['turbineX'] = XY[0, :]
prob['turbineY'] = XY[1, :]
prob.run()
GaussianPower.append(list(gauss_prob['wtPower0']))
FlorisPower.append(list(floris_prob['wtPower0']))
GaussianPower = np.array(GaussianPower)
FlorisPower = np.array(FlorisPower)
SOWFApower = np.array([ICOWESdata['posPowerT1'][0], ICOWESdata['posPowerT2'][0]]).transpose()/1000.
# print error_turbine2
axes[0, 1].plot(posrange/rotorDiameter[0], FlorisPower[:, 0]*power_scalar, 'b', posrange/rotorDiameter[0], SOWFApower[:, 0]*power_scalar, 'o', mec='b', mfc='none')
axes[0, 1].plot(posrange/rotorDiameter[0], FlorisPower[:, 1]*power_scalar, 'b', posrange/rotorDiameter[0], SOWFApower[:, 1]*power_scalar, '^', mec='b', mfc='none')
axes[0, 1].plot(posrange/rotorDiameter[0], FlorisPower[:, 0]*power_scalar+FlorisPower[:, 1]*power_scalar, 'k-', posrange/rotorDiameter[0], SOWFApower[:, 0]*power_scalar+SOWFApower[:, 1]*power_scalar, 'ko')
axes[0, 1].plot(posrange/rotorDiameter[0], GaussianPower[:, 0]*power_scalar, '--r')
axes[0, 1].plot(posrange/rotorDiameter[0], GaussianPower[:, 1]*power_scalar, '--r')
axes[0, 1].plot(posrange/rotorDiameter[0], GaussianPower[:, 0]*power_scalar+GaussianPower[:, 1]*power_scalar, '--k')
axes[0, 1].set_xlabel('y/D')
axes[0, 1].set_ylabel('Power (MW)')
posrange = np.linspace(-3.*rotorDiameter[0], 3.*rotorDiameter[0], num=1000)
for prob in probs:
prob['yaw0'] = np.array([0.0, 0.0])
prob['windDirections'] = np.array([270.])
prob['turbineX'] = np.array([0, 7.*rotorDiameter[0]])
GaussianVelocity = list()
FlorisVelocity = list()
for pos2 in posrange:
for prob in probs:
prob['turbineY'] = np.array([0, pos2])
prob.run()
GaussianVelocity.append(list(gauss_prob['wtVelocity0']))
FlorisVelocity.append(list(floris_prob['wtVelocity0']))
FlorisVelocity = np.array(FlorisVelocity)
GaussianVelocity = np.array(GaussianVelocity)
axes[1, 0].plot(posrange/rotorDiameter[0], FlorisVelocity[:, 1], 'b', label='Floris')
axes[1, 0].plot(posrange/rotorDiameter[0], GaussianVelocity[:, 1], '--r', label='Gaussian')
axes[1, 0].set_ylim([6.0, 8.5])
axes[1, 0].set_xlim([-3.0, 3.0])
axes[1, 0].set_xlabel('y/D')
axes[1, 0].set_ylabel('Velocity (m/s)')
# plt.legend()
# plt.show()
posrange = np.linspace(-1.*rotorDiameter[0], 30.*rotorDiameter[0], num=2000)
for prob in probs:
prob['turbineY'] = np.array([0, 0])
GaussianVelocity = list()
FlorisVelocity = list()
for pos2 in posrange:
for prob in probs:
prob['turbineX'] = np.array([0, pos2])
prob.run()
GaussianVelocity.append(list(gauss_prob['wtVelocity0']))
FlorisVelocity.append(list(floris_prob['wtVelocity0']))
FlorisVelocity = np.array(FlorisVelocity)
GaussianVelocity = np.array(GaussianVelocity)
axes[1, 1].plot(posrange/rotorDiameter[0], FlorisVelocity[:, 1], 'b', label='Floris')
axes[1, 1].plot(posrange/rotorDiameter[0], GaussianVelocity[:, 1], '--r', label='Gaussian')
axes[1, 1].plot(np.array([7.0, 7.0]), np.array([0.0, 9.0]), ':k', label='Tuning Point')
plt.xlabel('x/D')
plt.ylabel('Velocity (m/s)')
plt.legend(loc=4)
plt.show()
| |
# -*- coding: utf-8 -*-
"""
weppy.dal.models
----------------
Provides model layer for weppy's dal.
:copyright: (c) 2015 by Giovanni Barillari
:license: BSD, see LICENSE for more details.
"""
from collections import OrderedDict
from .._compat import iteritems, with_metaclass
from .apis import computation, virtualfield, fieldmethod
from .base import Field, _Field, sdict
from .helpers import HasOneWrap, HasManyWrap, HasManyViaWrap, \
VirtualWrap, Callback, make_tablename
class MetaModel(type):
def __new__(cls, name, bases, attrs):
new_class = type.__new__(cls, name, bases, attrs)
if bases == (object,):
return new_class
#: collect declared attributes
current_fields = []
current_vfields = []
computations = {}
callbacks = {}
for key, value in list(attrs.items()):
if isinstance(value, Field):
current_fields.append((key, value))
elif isinstance(value, virtualfield):
current_vfields.append((key, value))
elif isinstance(value, computation):
computations[key] = value
elif isinstance(value, Callback):
callbacks[key] = value
#: get super declared attributes
declared_fields = OrderedDict()
declared_vfields = OrderedDict()
super_relations = sdict(
_belongs_ref_=[], _hasone_ref_=[], _hasmany_ref_=[]
)
declared_computations = {}
declared_callbacks = {}
for base in reversed(new_class.__mro__[1:]):
#: collect fields from base class
if hasattr(base, '_declared_fields_'):
declared_fields.update(base._declared_fields_)
#: collect relations from base class
for key in list(super_relations):
if hasattr(base, key):
super_relations[key] += getattr(base, key)
#: collect virtuals from base class
if hasattr(base, '_declared_virtuals_'):
declared_vfields.update(base._declared_virtuals_)
#: collect computations from base class
if hasattr(base, '_declared_computations_'):
declared_computations.update(base._declared_computations_)
#: collect callbacks from base class
if hasattr(base, '_declared_callbacks_'):
declared_callbacks.update(base._declared_callbacks_)
#: set fields with correct order
current_fields.sort(key=lambda x: x[1]._inst_count_)
declared_fields.update(current_fields)
new_class._declared_fields_ = declared_fields
#: set relations references binding
from .apis import belongs_to, has_one, has_many
items = []
for item in belongs_to._references_.values():
items += item.reference
new_class._belongs_ref_ = super_relations._belongs_ref_ + items
belongs_to._references_ = {}
items = []
for item in has_one._references_.values():
items += item.reference
new_class._hasone_ref_ = super_relations._hasone_ref_ + items
has_one._references_ = {}
items = []
for item in has_many._references_.values():
items += item.reference
new_class._hasmany_ref_ = super_relations._hasmany_ref_ + items
has_many._references_ = {}
#: set virtual fields with correct order
current_vfields.sort(key=lambda x: x[1]._inst_count_)
declared_vfields.update(current_vfields)
new_class._declared_virtuals_ = declared_vfields
#: set computations
declared_computations.update(computations)
new_class._declared_computations_ = declared_computations
#: set callbacks
declared_callbacks.update(callbacks)
new_class._declared_callbacks_ = declared_callbacks
return new_class
class Model(with_metaclass(MetaModel)):
db = None
table = None
#sign_table = False
auto_validation = True
validation = {}
default_values = {}
update_values = {}
repr_values = {}
form_labels = {}
form_info = {}
form_rw = {}
form_widgets = {}
@property
def config(self):
return self.db.config
@classmethod
def __getsuperprops(cls):
superattr = "_supermodel" + cls.__name__
if hasattr(cls, superattr):
return
supermodel = cls.__base__
try:
supermodel.__getsuperprops()
setattr(cls, superattr, supermodel)
except:
setattr(cls, superattr, None)
sup = getattr(cls, superattr)
if not sup:
return
if cls.tablename == getattr(sup, 'tablename', None):
cls.tablename = make_tablename(cls.__name__)
#: get super model fields' properties
proplist = ['validation', 'default_values', 'update_values',
'repr_values', 'form_labels', 'form_info', 'form_rw',
'form_widgets']
for prop in proplist:
superprops = getattr(sup, prop)
props = {}
for k, v in superprops.items():
props[k] = v
for k, v in getattr(cls, prop).items():
props[k] = v
setattr(cls, prop, props)
def __new__(cls):
if not getattr(cls, 'tablename', None):
cls.tablename = make_tablename(cls.__name__)
cls.__getsuperprops()
return super(Model, cls).__new__(cls)
def __init__(self):
if not hasattr(self, 'migrate'):
self.migrate = self.config.get('migrate', self.db._migrate)
if not hasattr(self, 'format'):
self.format = None
def __parse_relation(self, item, singular=False):
if isinstance(item, dict):
refname = list(item)[0]
reference = item[refname]
else:
reference = item.capitalize()
refname = item
if singular:
reference = reference[:-1]
return reference, refname
def _define_props_(self):
#: create pydal's Field elements
self.fields = []
for name, obj in iteritems(self._declared_fields_):
if obj.modelname is not None:
obj = Field(*obj._args, **obj._kwargs)
setattr(self.__class__, name, obj)
self.fields.append(obj._make_field(name, self))
def _define_relations_(self):
self._virtual_relations_ = OrderedDict()
bad_args_error = "belongs_to, has_one and has_many only accept " + \
"strings or dicts as arguments"
#: belongs_to are mapped with 'reference' type Field
belongs_references = {}
if hasattr(self, '_belongs_ref_'):
for item in getattr(self, '_belongs_ref_'):
if not isinstance(item, (str, dict)):
raise RuntimeError(bad_args_error)
reference, refname = self.__parse_relation(item)
tablename = self.db[reference]._tablename
setattr(self.__class__, refname, Field('reference '+tablename))
self.fields.append(
getattr(self, refname)._make_field(refname, self)
)
belongs_references[reference] = refname
setattr(self.__class__, '_belongs_ref_', belongs_references)
#: has_one are mapped with virtualfield()
if hasattr(self, '_hasone_ref_'):
for item in getattr(self, '_hasone_ref_'):
if not isinstance(item, (str, dict)):
raise RuntimeError(bad_args_error)
reference, refname = self.__parse_relation(item)
self._virtual_relations_[refname] = \
virtualfield(refname)(HasOneWrap(reference))
delattr(self.__class__, '_hasone_ref_')
#: has_many are mapped with virtualfield()
hasmany_references = {}
if hasattr(self, '_hasmany_ref_'):
for item in getattr(self, '_hasmany_ref_'):
if not isinstance(item, (str, dict)):
raise RuntimeError(bad_args_error)
reference, refname = self.__parse_relation(item, True)
rclass = via = None
if isinstance(reference, dict):
rclass = reference.get('class')
via = reference.get('via')
if via is not None:
#: maps has_many({'things': {'via': 'otherthings'}})
self._virtual_relations_[refname] = virtualfield(refname)(
HasManyViaWrap(refname, via)
)
else:
#: maps has_many('things'),
# has_many({'things': 'othername'})
# has_many({'things': {'class': 'Model'}})
if rclass is not None:
reference = rclass
self._virtual_relations_[refname] = virtualfield(refname)(
HasManyWrap(reference)
)
hasmany_references[refname] = reference
setattr(self.__class__, '_hasmany_ref_', hasmany_references)
return
def _define_virtuals_(self):
err = 'virtualfield or fieldmethod cannot have same name as an' + \
'existent field!'
field_names = [field.name for field in self.fields]
for attr in ['_virtual_relations_', '_declared_virtuals_']:
for name, obj in iteritems(getattr(self, attr, {})):
if obj.field_name in field_names:
raise RuntimeError(err)
if isinstance(obj, fieldmethod):
f = _Field.Method(obj.field_name, VirtualWrap(self, obj))
else:
f = _Field.Virtual(obj.field_name, VirtualWrap(self, obj))
self.fields.append(f)
def _define_(self):
#if self.sign_table:
# from .tools import Auth
# fakeauth = Auth(DAL(None))
# self.fields.extend([fakeauth.signature])
self.__define_validation()
self.__define_defaults()
self.__define_updates()
self.__define_representation()
self.__define_computations()
self.__define_actions()
self.__define_form_utils()
self.setup()
def __define_validation(self):
for field in self.fields:
if isinstance(field, (_Field.Method, _Field.Virtual)):
continue
validation = self.validation.get(field.name, {})
if isinstance(validation, dict):
for key in list(validation):
field._requires[key] = validation[key]
elif isinstance(validation, list):
field._custom_requires += validation
else:
field._custom_requires.append(validation)
field._parse_validation()
def __define_defaults(self):
for field, value in self.default_values.items():
self.table[field].default = value
def __define_updates(self):
for field, value in self.update_values.items():
self.table[field].update = value
def __define_representation(self):
for field, value in self.repr_values.items():
self.table[field].represent = value
def __define_computations(self):
err = 'computations should have the name of an existing field to ' +\
'compute!'
field_names = [field.name for field in self.fields]
for name, obj in iteritems(self._declared_computations_):
if obj.field_name not in field_names:
raise RuntimeError(err)
# TODO add check virtuals
self.table[obj.field_name].compute = \
lambda row, obj=obj, self=self: obj.f(self, row)
def __define_actions(self):
for name, obj in iteritems(self._declared_callbacks_):
for t in obj.t:
if t in ["_before_insert", "_before_delete", "_after_delete"]:
getattr(self.table, t).append(
lambda a, obj=obj, self=self: obj.f(self, a)
)
else:
getattr(self.table, t).append(
lambda a, b, obj=obj, self=self: obj.f(self, a, b))
def __define_form_utils(self):
#: labels
for field, value in self.form_labels.items():
self.table[field].label = value
#: info
for field, value in self.form_info.items():
self.table[field].comment = value
#: rw
try:
self.table.is_active.writable = self.table.is_active.readable = \
False
except:
pass
for field, value in self.form_rw.items():
if isinstance(value, (tuple, list)):
writable, readable = value
else:
writable = value
readable = value
self.table[field].writable = writable
self.table[field].readable = readable
#: widgets
for field, value in self.form_widgets.items():
self.table[field].widget = value
def setup(self):
pass
#@classmethod
#def new(cls, **kwargs):
# return Row(**kwargs)
@classmethod
def create(cls, *args, **kwargs):
#rv = sdict(id=None)
#vals = sdict()
#errors = sdict()
if args:
if isinstance(args[0], (dict, sdict)):
for key in list(args[0]):
kwargs[key] = args[0][key]
#for field in cls.table.fields:
# value = kwargs.get(field)
# vals[field], error = cls.table[field].validate(value)
# if error:
# errors[field] = error
#if not errors:
# rv.id = cls.table.insert(**vals)
#rv.errors = errors
#return rv
return cls.table.validate_and_insert(**kwargs)
@classmethod
def validate(cls, row):
row = sdict(row)
errors = sdict()
for field in cls.table.fields:
value = row.get(field)
rv, error = cls.table[field].validate(value)
if error:
errors[field] = error
return errors
@classmethod
def form(cls, record=None, **kwargs):
from ..forms import DALForm
return DALForm(cls.table, record, **kwargs)
| |
# -*- coding: utf-8 -*-
__author__ = 'anewbigging'
from rwslib.extras.rwscmd import data_scrambler
from rwslib.extras.rwscmd.odmutils import E_ODM, A_ODM
import unittest
import datetime
from lxml import etree
from six import string_types
class TestDuckTyping(unittest.TestCase):
def setUp(self):
self.values = {
0: 'int',
1: 'int',
-1: 'int',
'1': 'int',
'1.0': 'float',
1.1: 'float',
'a': 'string',
'10 MAR 2016': 'date',
'MAR 2016': 'date',
'2016': 'date',
'10 03 2016': 'date',
'03 2016': 'date',
'10/MAR/2016': 'date',
'MAR/2016': 'date',
'10/03/2016': 'date',
'03/2016': 'date',
'10 31 2016': 'string', # TODO: check if this is valid Rave date format
'9999': 'int',
'16': 'int',
'MAR': 'string',
'20:45:23': 'time',
'20:45': 'time',
'10:45:23': 'time',
'10:45': 'time',
'10:45:23 AM': 'time',
'10:45 PM': 'time'
}
def test_ducktype(self):
"""Test duck typing integers"""
for value, expected_type in self.values.items():
rave_type, _ = data_scrambler.typeof_rave_data(value)
self.assertEqual(expected_type, rave_type,
msg='{0} should be of type {1} not {2}'.format(value, expected_type, rave_type))
class TestBasicScrambling(unittest.TestCase):
def setUp(self):
self.scr = data_scrambler.Scramble()
def test_scramble_int(self):
"""Test scrambling integers"""
i = self.scr.scramble_int(5)
self.assertEqual(i, str(int(i)))
def test_scramble_float(self):
"""Test scrambling floats"""
i = self.scr.scramble_float(5, 2)
self.assertIsInstance(float(i), float)
i = self.scr.scramble_float(5, 0)
self.assertIsInstance(float(i), float)
def test_scramble_strings(self):
"""Test scrambling strings"""
i = self.scr.scramble_string(4)
self.assertEqual(len(i), 4)
i = self.scr.scramble_string(200)
self.assertIsInstance(i, string_types)
def test_scramble_date(self):
"""Test scrambling dates"""
dt = self.scr.scramble_date('10 MAR 2016')
self.assertTrue(datetime.datetime.strptime(dt, '%d %b %Y'))
dt = self.scr.scramble_date('MAR 2016', '%b %Y')
self.assertTrue(datetime.datetime.strptime(dt, '%b %Y'))
def test_scramble_time(self):
"""Test scrambling times"""
dt = self.scr.scramble_time('18:12:14')
self.assertTrue(datetime.datetime.strptime(dt, '%H:%M:%S'))
dt = self.scr.scramble_time( '%H:%M')
self.assertTrue(datetime.datetime.strptime(dt, '%H:%M'))
class TestDuckTypeScrambling(unittest.TestCase):
def setUp(self):
self.scr = data_scrambler.Scramble()
def test_scramble_int(self):
"""Test scrambling integers"""
i = self.scr.scramble_value('12345')
self.assertEqual(i, str(int(i)))
def test_scramble_float(self):
"""Test scrambling floats"""
i = self.scr.scramble_value('123.45')
self.assertIsInstance(float(i), float)
i = self.scr.scramble_value('12345')
self.assertIsInstance(float(i), float)
def test_scramble_strings(self):
"""Test scrambling strings"""
s = 'asdf'
i = self.scr.scramble_value(s)
self.assertEqual(len(s), len(i))
self.assertNotEqual(s, i)
s = 'This is a large string to test scrambling of large strings'
i = self.scr.scramble_value(s)
self.assertNotEqual(s, i)
def test_scramble_date(self):
"""Test scrambling dates"""
dt = self.scr.scramble_value('10 MAR 2016')
self.assertTrue(datetime.datetime.strptime(dt, '%d %b %Y'))
dt = self.scr.scramble_value('MAR 2016')
self.assertTrue(datetime.datetime.strptime(dt, '%b %Y'))
def test_scramble_time(self):
"""Test scrambling times"""
dt = self.scr.scramble_value('18:12:14')
self.assertTrue(datetime.datetime.strptime(dt, '%H:%M:%S'))
dt = self.scr.scramble_value('18:12')
self.assertTrue(datetime.datetime.strptime(dt, '%H:%M'))
class TestScramblingWithMetadata(unittest.TestCase):
def setUp(self):
metadata = """
<ODM FileType="Snapshot" Granularity="Metadata" CreationDateTime="2016-02-29T13:47:23.654-00:00" FileOID="d460fc96-4f08-445f-89b1-0182e8e810c1" ODMVersion="1.3" xmlns:mdsol="http://www.mdsol.com/ns/odm/metadata" xmlns="http://www.cdisc.org/ns/odm/v1.3">
<Study OID="Test">
<GlobalVariables>
<StudyName>Test</StudyName>
<StudyDescription></StudyDescription>
<ProtocolName>Test</ProtocolName>
</GlobalVariables>
<BasicDefinitions/>
<MetaDataVersion OID="1" Name="Metadata version 1">
<ItemDef OID="VSDT" Name="VSDT" DataType="date" mdsol:DateTimeFormat="dd MMM yyyy" mdsol:VariableOID="VSDT" mdsol:Active="Yes" mdsol:ControlType="DateTime" mdsol:SourceDocument="Yes" mdsol:SASLabel="Visit Date" mdsol:QueryFutureDate="Yes" mdsol:Visible="Yes" mdsol:QueryNonConformance="Yes" mdsol:CanSetStudyEventDate="Yes" />
<ItemDef OID="TIME" Name="TIME" DataType="time" mdsol:DateTimeFormat="HH:nn:ss" mdsol:VariableOID="TIME" mdsol:Active="Yes" mdsol:ControlType="DateTime" mdsol:SourceDocument="Yes" mdsol:SASLabel="Time" mdsol:QueryFutureDate="Yes" mdsol:Visible="Yes" mdsol:QueryNonConformance="Yes" mdsol:CanSetStudyEventDate="No" />
<ItemDef OID="VSNUM" Name="VSNUM" DataType="integer" mdsol:VariableOID="VSNUM" Length="2" mdsol:Active="Yes" mdsol:ControlType="DropDownList" mdsol:SourceDocument="Yes" mdsol:SASLabel="Follow Up Visit" mdsol:Visible="Yes" mdsol:QueryNonConformance="Yes" />
<ItemDef OID="SAE" Name="SAE" DataType="text" mdsol:VariableOID="SAE" Length="200" mdsol:Active="Yes" mdsol:ControlType="LongText" mdsol:SourceDocument="Yes" mdsol:SASLabel="eSAE Desription" mdsol:Visible="Yes" mdsol:QueryNonConformance="Yes" />
<ItemDef OID="YN" Name="YN" DataType="integer" mdsol:VariableOID="YN" Length="1" mdsol:Active="Yes" mdsol:ControlType="DropDownList" mdsol:SourceDocument="Yes" mdsol:SASLabel="Subject Received Dose" mdsol:Visible="Yes" mdsol:QueryNonConformance="Yes">
<CodeListRef CodeListOID="YES_NO_UNKNOWN" />
</ItemDef>
<CodeList OID="YES_NO_UNKNOWN" Name="YES_NO_UNKNOWN" DataType="integer">
<CodeListItem CodedValue="0" mdsol:OrderNumber="1" />
<CodeListItem CodedValue="1" mdsol:OrderNumber="2" />
<CodeListItem CodedValue="97" mdsol:OrderNumber="3" />
</CodeList>
</MetaDataVersion>
</Study>
</ODM>
"""
self.scr = data_scrambler.Scramble(metadata=metadata)
def test_scramble_item_data(self):
"""Test scrambling using metadata"""
dt = self.scr.scramble_itemdata('VSDT', '')
self.assertTrue(datetime.datetime.strptime(dt, '%d %b %Y'))
tm = self.scr.scramble_itemdata('TIME', '')
self.assertTrue(datetime.datetime.strptime(tm, '%H:%M:%S'))
st = self.scr.scramble_itemdata('SAE', '')
self.assertNotEqual(str, st)
cd = self.scr.scramble_itemdata('YN', '')
self.assertIn(cd, ['0','1','97'])
def test_fill_empty(self):
"""Test filling empty values in ODM document"""
odm = """
<ODM xmlns="http://www.cdisc.org/ns/odm/v1.3" xmlns:mdsol="http://www.mdsol.com/ns/odm/metadata" ODMVersion="1.3" FileType="Transactional" FileOID="c3f15f2d-eb69-42e6-bed4-811bff27ebf9" CreationDateTime="2016-03-02T09:27:14.000-00:00">
<ClinicalData StudyOID="Test(Prod)" MetaDataVersionOID="1">
<SubjectData SubjectKey="9e15f698-327e-4e9c-8ed5-8be9b27b67b0" mdsol:SubjectKeyType="SubjectUUID" mdsol:SubjectName="0100-90005">
<SiteRef LocationOID="0100"/>
<StudyEventData StudyEventOID="SCRN">
<FormData FormOID="FORM1" FormRepeatKey="1">
<ItemGroupData ItemGroupOID="FORM1">
<ItemData ItemOID="YN" Value=""/>
</ItemGroupData>
</FormData>
</StudyEventData>
</SubjectData>
</ClinicalData>
</ODM>
"""
output = etree.fromstring(self.scr.fill_empty(None, odm))
path = ".//{0}[@{1}='{2}']".format(E_ODM.ITEM_DATA.value, A_ODM.ITEM_OID.value, 'YN')
elem = output.find(path)
self.assertIn(elem.get(A_ODM.VALUE.value), ['0', '1', '97'])
def test_fill_empty_remove_values(self):
"""Test filling empty values in ODM document"""
odm = """
<ODM xmlns="http://www.cdisc.org/ns/odm/v1.3" xmlns:mdsol="http://www.mdsol.com/ns/odm/metadata" ODMVersion="1.3" FileType="Transactional" FileOID="c3f15f2d-eb69-42e6-bed4-811bff27ebf9" CreationDateTime="2016-03-02T09:27:14.000-00:00">
<ClinicalData StudyOID="Test(Prod)" MetaDataVersionOID="1">
<SubjectData SubjectKey="9e15f698-327e-4e9c-8ed5-8be9b27b67b0" mdsol:SubjectKeyType="SubjectUUID" mdsol:SubjectName="0100-90005">
<SiteRef LocationOID="0100"/>
<StudyEventData StudyEventOID="SCRN">
<FormData FormOID="FORM1" FormRepeatKey="1">
<ItemGroupData ItemGroupOID="FORM1">
<ItemData ItemOID="YN" Value="1"/>
</ItemGroupData>
</FormData>
</StudyEventData>
</SubjectData>
</ClinicalData>
</ODM>
"""
output = etree.fromstring(self.scr.fill_empty(None, odm))
for el in [E_ODM.ITEM_DATA, E_ODM.ITEM_GROUP_DATA, E_ODM.FORM_DATA, E_ODM.STUDY_EVENT_DATA]:
path = ".//{0}".format(el.value)
self.assertIsNone(output.find(path))
def test_fill_empty_remove_values_ny(self):
"""Test filling empty values in ODM document with OID"""
odm = """
<ODM xmlns="http://www.cdisc.org/ns/odm/v1.3" xmlns:mdsol="http://www.mdsol.com/ns/odm/metadata" ODMVersion="1.3" FileType="Transactional" FileOID="c3f15f2d-eb69-42e6-bed4-811bff27ebf9" CreationDateTime="2016-03-02T09:27:14.000-00:00">
<ClinicalData StudyOID="Test(Prod)" MetaDataVersionOID="1">
<SubjectData SubjectKey="9e15f698-327e-4e9c-8ed5-8be9b27b67b0" mdsol:SubjectKeyType="SubjectUUID" mdsol:SubjectName="0100-90005">
<SiteRef LocationOID="0100"/>
<StudyEventData StudyEventOID="SCRN">
<FormData FormOID="FORM1" FormRepeatKey="1">
<ItemGroupData ItemGroupOID="FORM1">
<ItemData ItemOID="YN" Value=""/>
</ItemGroupData>
</FormData>
</StudyEventData>
</SubjectData>
</ClinicalData>
</ODM>
"""
fixed_values = {}
fixed_values['YN'] = '3'
output = etree.fromstring(self.scr.fill_empty(fixed_values, odm))
path = ".//{0}[@{1}='{2}']".format(E_ODM.ITEM_DATA.value, A_ODM.ITEM_OID.value, 'YN')
elem = output.find(path)
self.assertEqual(elem.get(A_ODM.VALUE.value), '3')
if __name__ == '__main__':
unittest.main()
| |
import re
import datetime
from django.db.migrations import operations
from django.db.migrations.migration import Migration
from django.db.migrations.questioner import MigrationQuestioner
class MigrationAutodetector(object):
"""
Takes a pair of ProjectStates, and compares them to see what the
first would need doing to make it match the second (the second
usually being the project's current state).
Note that this naturally operates on entire projects at a time,
as it's likely that changes interact (for example, you can't
add a ForeignKey without having a migration to add the table it
depends on first). A user interface may offer single-app usage
if it wishes, with the caveat that it may not always be possible.
"""
def __init__(self, from_state, to_state, questioner=None):
self.from_state = from_state
self.to_state = to_state
self.questioner = questioner or MigrationQuestioner()
def changes(self, graph, trim_to_apps=None):
"""
Main entry point to produce a list of appliable changes.
Takes a graph to base names on and an optional set of apps
to try and restrict to (restriction is not guaranteed)
"""
changes = self._detect_changes()
changes = self.arrange_for_graph(changes, graph)
if trim_to_apps:
changes = self._trim_to_apps(changes, trim_to_apps)
return changes
def _detect_changes(self):
"""
Returns a dict of migration plans which will achieve the
change from from_state to to_state. The dict has app labels
as keys and a list of migrations as values.
The resulting migrations aren't specially named, but the names
do matter for dependencies inside the set.
"""
# We'll store migrations as lists by app names for now
self.migrations = {}
old_apps = self.from_state.render()
new_apps = self.to_state.render()
# Prepare lists of old/new model keys that we care about
# (i.e. ignoring proxy ones)
old_model_keys = []
for al, mn in self.from_state.models.keys():
model = old_apps.get_model(al, mn)
if not model._meta.proxy and model._meta.managed:
old_model_keys.append((al, mn))
new_model_keys = []
for al, mn in self.to_state.models.keys():
model = new_apps.get_model(al, mn)
if not model._meta.proxy and model._meta.managed:
new_model_keys.append((al, mn))
def _rel_agnostic_fields_def(fields):
"""
Return a definition of the fields that ignores field names and
what related fields actually relate to.
"""
fields_def = []
for name, field in fields:
deconstruction = field.deconstruct()[1:]
if field.rel and field.rel.to:
del deconstruction[2]['to']
fields_def.append(deconstruction)
return fields_def
# Find any renamed models.
renamed_models = {}
renamed_models_rel = {}
added_models = set(new_model_keys) - set(old_model_keys)
for app_label, model_name in added_models:
model_state = self.to_state.models[app_label, model_name]
model_fields_def = _rel_agnostic_fields_def(model_state.fields)
removed_models = set(old_model_keys) - set(new_model_keys)
for rem_app_label, rem_model_name in removed_models:
if rem_app_label == app_label:
rem_model_state = self.from_state.models[rem_app_label, rem_model_name]
rem_model_fields_def = _rel_agnostic_fields_def(rem_model_state.fields)
if model_fields_def == rem_model_fields_def:
if self.questioner.ask_rename_model(rem_model_state, model_state):
self.add_to_migration(
app_label,
operations.RenameModel(
old_name=rem_model_state.name,
new_name=model_state.name,
)
)
renamed_models[app_label, model_name] = rem_model_name
renamed_models_rel['%s.%s' % (rem_model_state.app_label, rem_model_state.name)] = '%s.%s' % (model_state.app_label, model_state.name)
old_model_keys.remove((rem_app_label, rem_model_name))
old_model_keys.append((app_label, model_name))
break
# Adding models. Phase 1 is adding models with no outward relationships.
added_models = set(new_model_keys) - set(old_model_keys)
pending_add = {}
for app_label, model_name in added_models:
model_state = self.to_state.models[app_label, model_name]
# Are there any relationships out from this model? if so, punt it to the next phase.
related_fields = []
for field in new_apps.get_model(app_label, model_name)._meta.local_fields:
if field.rel:
if field.rel.to:
related_fields.append((field.name, field.rel.to._meta.app_label, field.rel.to._meta.model_name))
if hasattr(field.rel, "through") and not field.rel.through._meta.auto_created:
related_fields.append((field.name, field.rel.through._meta.app_label, field.rel.through._meta.model_name))
for field in new_apps.get_model(app_label, model_name)._meta.local_many_to_many:
if field.rel.to:
related_fields.append((field.name, field.rel.to._meta.app_label, field.rel.to._meta.model_name))
if hasattr(field.rel, "through") and not field.rel.through._meta.auto_created:
related_fields.append((field.name, field.rel.through._meta.app_label, field.rel.through._meta.model_name))
if related_fields:
pending_add[app_label, model_name] = related_fields
else:
self.add_to_migration(
app_label,
operations.CreateModel(
name=model_state.name,
fields=model_state.fields,
options=model_state.options,
bases=model_state.bases,
)
)
# Phase 2 is progressively adding pending models, splitting up into two
# migrations if required.
pending_new_fks = []
pending_unique_together = []
added_phase_2 = set()
while pending_add:
# Is there one we can add that has all dependencies satisfied?
satisfied = [
(m, rf)
for m, rf in pending_add.items()
if all((al, mn) not in pending_add for f, al, mn in rf)
]
if satisfied:
(app_label, model_name), related_fields = sorted(satisfied)[0]
model_state = self.to_state.models[app_label, model_name]
self.add_to_migration(
app_label,
operations.CreateModel(
name=model_state.name,
fields=model_state.fields,
options=model_state.options,
bases=model_state.bases,
),
# If it's already been added in phase 2 put it in a new
# migration for safety.
new=any((al, mn) in added_phase_2 for f, al, mn in related_fields),
)
added_phase_2.add((app_label, model_name))
# Ah well, we'll need to split one. Pick deterministically.
else:
(app_label, model_name), related_fields = sorted(pending_add.items())[0]
model_state = self.to_state.models[app_label, model_name]
# Defer unique together constraints creation, see ticket #22275
unique_together_constraints = model_state.options.pop('unique_together', None)
if unique_together_constraints:
pending_unique_together.append((app_label, model_name,
unique_together_constraints))
# Work out the fields that need splitting out
bad_fields = dict((f, (al, mn)) for f, al, mn in related_fields if (al, mn) in pending_add)
# Create the model, without those
self.add_to_migration(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[(n, f) for n, f in model_state.fields if n not in bad_fields],
options=model_state.options,
bases=model_state.bases,
)
)
# Add the bad fields to be made in a phase 3
for field_name, (other_app_label, other_model_name) in bad_fields.items():
pending_new_fks.append((app_label, model_name, field_name, other_app_label))
for field_name, other_app_label, other_model_name in related_fields:
# If it depends on a swappable something, add a dynamic depend'cy
swappable_setting = new_apps.get_model(app_label, model_name)._meta.get_field_by_name(field_name)[0].swappable_setting
if swappable_setting is not None:
self.add_swappable_dependency(app_label, swappable_setting)
elif app_label != other_app_label:
self.add_dependency(app_label, other_app_label)
del pending_add[app_label, model_name]
# Phase 3 is adding the final set of FKs as separate new migrations.
for app_label, model_name, field_name, other_app_label in pending_new_fks:
model_state = self.to_state.models[app_label, model_name]
self.add_to_migration(
app_label,
operations.AddField(
model_name=model_name,
name=field_name,
field=model_state.get_field_by_name(field_name),
),
new=True,
)
# If it depends on a swappable something, add a dynamic depend'cy
swappable_setting = new_apps.get_model(app_label, model_name)._meta.get_field_by_name(field_name)[0].swappable_setting
if swappable_setting is not None:
self.add_swappable_dependency(app_label, swappable_setting)
elif app_label != other_app_label:
self.add_dependency(app_label, other_app_label)
# Phase 3.1 - unique together constraints
for app_label, model_name, unique_together in pending_unique_together:
self.add_to_migration(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=unique_together
)
)
# Changes within models
kept_models = set(old_model_keys).intersection(new_model_keys)
old_fields = set()
new_fields = set()
unique_together_operations = []
for app_label, model_name in kept_models:
old_model_name = renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
# Collect field changes for later global dealing with (so AddFields
# always come before AlterFields even on separate models)
old_fields.update((app_label, model_name, x) for x, y in old_model_state.fields)
new_fields.update((app_label, model_name, x) for x, y in new_model_state.fields)
# Unique_together changes. Operations will be added to migration a
# bit later, after fields creation. See ticket #22035.
if old_model_state.options.get("unique_together", set()) != new_model_state.options.get("unique_together", set()):
unique_together_operations.append((
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=new_model_state.options.get("unique_together", set()),
)
))
# New fields
renamed_fields = {}
for app_label, model_name, field_name in new_fields - old_fields:
old_model_name = renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
field = new_model_state.get_field_by_name(field_name)
# Scan to see if this is actually a rename!
field_dec = field.deconstruct()[1:]
found_rename = False
for rem_app_label, rem_model_name, rem_field_name in (old_fields - new_fields):
if rem_app_label == app_label and rem_model_name == model_name:
old_field_dec = old_model_state.get_field_by_name(rem_field_name).deconstruct()[1:]
if field.rel and field.rel.to and 'to' in old_field_dec[2]:
old_rel_to = old_field_dec[2]['to']
if old_rel_to in renamed_models_rel:
old_field_dec[2]['to'] = renamed_models_rel[old_rel_to]
if old_field_dec == field_dec:
if self.questioner.ask_rename(model_name, rem_field_name, field_name, field):
self.add_to_migration(
app_label,
operations.RenameField(
model_name=model_name,
old_name=rem_field_name,
new_name=field_name,
)
)
old_fields.remove((rem_app_label, rem_model_name, rem_field_name))
old_fields.add((app_label, model_name, field_name))
renamed_fields[app_label, model_name, field_name] = rem_field_name
found_rename = True
break
if found_rename:
continue
# You can't just add NOT NULL fields with no default
if not field.null and not field.has_default():
field = field.clone()
field.default = self.questioner.ask_not_null_addition(field_name, model_name)
self.add_to_migration(
app_label,
operations.AddField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=False,
)
)
else:
self.add_to_migration(
app_label,
operations.AddField(
model_name=model_name,
name=field_name,
field=field,
)
)
new_field = new_apps.get_model(app_label, model_name)._meta.get_field_by_name(field_name)[0]
swappable_setting = getattr(new_field, 'swappable_setting', None)
if swappable_setting is not None:
self.add_swappable_dependency(app_label, swappable_setting)
# Old fields
for app_label, model_name, field_name in old_fields - new_fields:
old_model_name = renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
self.add_to_migration(
app_label,
operations.RemoveField(
model_name=model_name,
name=field_name,
)
)
# The same fields
for app_label, model_name, field_name in old_fields.intersection(new_fields):
# Did the field change?
old_model_name = renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_field_name = renamed_fields.get((app_label, model_name, field_name), field_name)
old_field_dec = old_model_state.get_field_by_name(old_field_name).deconstruct()[1:]
new_field_dec = new_model_state.get_field_by_name(field_name).deconstruct()[1:]
if old_field_dec != new_field_dec:
self.add_to_migration(
app_label,
operations.AlterField(
model_name=model_name,
name=field_name,
field=new_model_state.get_field_by_name(field_name),
)
)
for app_label, operation in unique_together_operations:
self.add_to_migration(app_label, operation)
# Removing models
removed_models = set(old_model_keys) - set(new_model_keys)
for app_label, model_name in removed_models:
model_state = self.from_state.models[app_label, model_name]
self.add_to_migration(
app_label,
operations.DeleteModel(
model_state.name,
)
)
# Alright, now add internal dependencies
for app_label, migrations in self.migrations.items():
for m1, m2 in zip(migrations, migrations[1:]):
m2.dependencies.append((app_label, m1.name))
# Clean up dependencies
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.dependencies = list(set(migration.dependencies))
return self.migrations
def add_to_migration(self, app_label, operation, new=False):
migrations = self.migrations.setdefault(app_label, [])
if not migrations or new:
subclass = type("Migration", (Migration,), {"operations": [], "dependencies": []})
instance = subclass("auto_%i" % (len(migrations) + 1), app_label)
migrations.append(instance)
migrations[-1].operations.append(operation)
def add_dependency(self, app_label, other_app_label):
"""
Adds a dependency to app_label's newest migration on
other_app_label's latest migration.
"""
if self.migrations.get(other_app_label):
dependency = (other_app_label, self.migrations[other_app_label][-1].name)
else:
dependency = (other_app_label, "__first__")
self.migrations[app_label][-1].dependencies.append(dependency)
def add_swappable_dependency(self, app_label, setting_name):
"""
Adds a dependency to the value of a swappable model setting.
"""
dependency = ("__setting__", setting_name)
self.migrations[app_label][-1].dependencies.append(dependency)
def arrange_for_graph(self, changes, graph):
"""
Takes in a result from changes() and a MigrationGraph,
and fixes the names and dependencies of the changes so they
extend the graph from the leaf nodes for each app.
"""
leaves = graph.leaf_nodes()
name_map = {}
for app_label, migrations in list(changes.items()):
if not migrations:
continue
# Find the app label's current leaf node
app_leaf = None
for leaf in leaves:
if leaf[0] == app_label:
app_leaf = leaf
break
# Do they want an initial migration for this app?
if app_leaf is None and not self.questioner.ask_initial(app_label):
# They don't.
for migration in migrations:
name_map[(app_label, migration.name)] = (app_label, "__first__")
del changes[app_label]
continue
# Work out the next number in the sequence
if app_leaf is None:
next_number = 1
else:
next_number = (self.parse_number(app_leaf[1]) or 0) + 1
# Name each migration
for i, migration in enumerate(migrations):
if i == 0 and app_leaf:
migration.dependencies.append(app_leaf)
if i == 0 and not app_leaf:
new_name = "0001_initial"
else:
new_name = "%04i_%s" % (
next_number,
self.suggest_name(migration.operations)[:100],
)
name_map[(app_label, migration.name)] = (app_label, new_name)
next_number += 1
migration.name = new_name
# Now fix dependencies
for app_label, migrations in changes.items():
for migration in migrations:
migration.dependencies = [name_map.get(d, d) for d in migration.dependencies]
return changes
def _trim_to_apps(self, changes, app_labels):
"""
Takes changes from arrange_for_graph and set of app labels and
returns a modified set of changes which trims out as many migrations
that are not in app_labels as possible.
Note that some other migrations may still be present, as they may be
required dependencies.
"""
# Gather other app dependencies in a first pass
app_dependencies = {}
for app_label, migrations in changes.items():
for migration in migrations:
for dep_app_label, name in migration.dependencies:
app_dependencies.setdefault(app_label, set()).add(dep_app_label)
required_apps = set(app_labels)
# Keep resolving till there's no change
old_required_apps = None
while old_required_apps != required_apps:
old_required_apps = set(required_apps)
for app_label in list(required_apps):
required_apps.update(app_dependencies.get(app_label, set()))
# Remove all migrations that aren't needed
for app_label in list(changes.keys()):
if app_label not in required_apps:
del changes[app_label]
return changes
@classmethod
def suggest_name(cls, ops):
"""
Given a set of operations, suggests a name for the migration
they might represent. Names are not guaranteed to be unique,
but we put some effort in to the fallback name to avoid VCS conflicts
if we can.
"""
if len(ops) == 1:
if isinstance(ops[0], operations.CreateModel):
return ops[0].name.lower()
elif isinstance(ops[0], operations.DeleteModel):
return "delete_%s" % ops[0].name.lower()
elif isinstance(ops[0], operations.AddField):
return "%s_%s" % (ops[0].model_name.lower(), ops[0].name.lower())
elif isinstance(ops[0], operations.RemoveField):
return "remove_%s_%s" % (ops[0].model_name.lower(), ops[0].name.lower())
elif len(ops) > 1:
if all(isinstance(o, operations.CreateModel) for o in ops):
return "_".join(sorted(o.name.lower() for o in ops))
return "auto_%s" % datetime.datetime.now().strftime("%Y%m%d_%H%M")
@classmethod
def parse_number(cls, name):
"""
Given a migration name, tries to extract a number from the
beginning of it. If no number found, returns None.
"""
if re.match(r"^\d+_", name):
return int(name.split("_")[0])
return None
| |
###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from sqlalchemy.engine import create_engine
from sqlalchemy.engine.url import URL
from sqlalchemy.exc import SQLAlchemyError
import urllib
from vistrails.core.db.action import create_action
from vistrails.core.bundles.installbundle import install
from vistrails.core import debug
from vistrails.core.modules.config import ModuleSettings
from vistrails.core.modules.module_registry import get_module_registry
from vistrails.core.modules.vistrails_module import Module, ModuleError
from vistrails.core.upgradeworkflow import UpgradeWorkflowHandler
from vistrails.core.utils import versions_increasing
from vistrails.packages.tabledata.common import TableObject
class DBConnection(Module):
"""Connects to a database.
If the URI you enter uses a driver which is not currently installed,
VisTrails will try to set it up.
"""
_input_ports = [('protocol', '(basic:String)'),
('user', '(basic:String)',
{'optional': True}),
('password', '(basic:String)',
{'optional': True}),
('host', '(basic:String)',
{'optional': True}),
('port', '(basic:Integer)',
{'optional': True}),
('db_name', '(basic:String)')]
_output_ports = [('connection', '(DBConnection)')]
def compute(self):
url = URL(drivername=self.get_input('protocol'),
username=self.force_get_input('user', None),
password=self.force_get_input('password', None),
host=self.force_get_input('host', None),
port=self.force_get_input('port', None),
database=self.get_input('db_name'))
try:
engine = create_engine(url)
except ImportError, e:
driver = url.drivername
installed = False
if driver == 'sqlite':
raise ModuleError(self,
"Python was built without sqlite3 support")
elif (driver == 'mysql' or
driver == 'drizzle'): # drizzle is a variant of MySQL
installed = install({
'pip': 'mysql-python',
'linux-debian': 'python-mysqldb',
'linux-ubuntu': 'python-mysqldb',
'linux-fedora': 'MySQL-python'})
elif (driver == 'postgresql' or
driver == 'postgre'): # deprecated alias
installed = install({
'pip': 'psycopg2',
'linux-debian':'python-psycopg2',
'linux-ubuntu':'python-psycopg2',
'linux-fedora':'python-psycopg2'})
elif driver == 'firebird':
installed = install({
'pip': 'fdb',
'linux-fedora':'python-fdb'})
elif driver == 'mssql' or driver == 'sybase':
installed = install({
'pip': 'pyodbc',
'linux-debian':'python-pyodbc',
'linux-ubuntu':'python-pyodbc',
'linux-fedora':'pyodbc'})
elif driver == 'oracle':
installed = install({
'pip': 'cx_Oracle'})
else:
raise ModuleError(self,
"SQLAlchemy couldn't connect: %s" %
debug.format_exception(e))
if not installed:
raise ModuleError(self,
"Failed to install required driver")
try:
engine = create_engine(url)
except Exception, e:
raise ModuleError(self,
"Couldn't connect to the database: %s" %
debug.format_exception(e))
except SQLAlchemyError:
# This is NoSuchModuleError in newer versions of SQLAlchemy but we
# want compatibility here
raise ModuleError(
self,
"SQLAlchemy has no support for protocol %r -- are you "
"sure you spelled that correctly?" % url.drivername)
self.set_output('connection', engine.connect())
class SQLSource(Module):
_settings = ModuleSettings(configure_widget=
'vistrails.packages.sql.widgets:SQLSourceConfigurationWidget')
_input_ports = [('connection', '(DBConnection)'),
('cacheResults', '(basic:Boolean)'),
('source', '(basic:String)')]
_output_ports = [('result', '(org.vistrails.vistrails.tabledata:Table)'),
('resultSet', '(basic:List)')]
def is_cacheable(self):
return False
def compute(self):
cached = False
if self.has_input('cacheResults'):
cached = self.get_input('cacheResults')
self.is_cacheable = lambda: cached
connection = self.get_input('connection')
inputs = dict((k, self.get_input(k)) for k in self.inputPorts.iterkeys()
if k not in ('source', 'connection', 'cacheResults'))
s = urllib.unquote(str(self.get_input('source')))
try:
transaction = connection.begin()
results = connection.execute(s, inputs)
try:
rows = results.fetchall()
except Exception:
self.set_output('result', None)
self.set_output('resultSet', None)
else:
# results.returns_rows is True
# We don't use 'if return_rows' because this attribute didn't
# use to exist
table = TableObject.from_dicts(rows, results.keys())
self.set_output('result', table)
self.set_output('resultSet', rows)
transaction.commit()
except SQLAlchemyError, e:
raise ModuleError(self, debug.format_exception(e))
_modules = [DBConnection, SQLSource]
def handle_module_upgrade_request(controller, module_id, pipeline):
# Before 0.0.3, SQLSource's resultSet output was type ListOfElements (which
# doesn't exist anymore)
# In 0.0.3, SQLSource's resultSet output was type List
# In 0.1.0, SQLSource's output was renamed to result and is now a Table;
# this is totally incompatible and no upgrade code is possible
# the resultSet is kept for now for compatibility
# Up to 0.0.4, DBConnection would ask for a password if one was necessary;
# this behavior has not been kept. There is now a password input port, to
# which you can connect a PasswordDialog from package dialogs if needed
old_module = pipeline.modules[module_id]
# DBConnection module from before 0.1.0: automatically add the password
# prompt module
if (old_module.name == 'DBConnection' and
versions_increasing(old_module.version, '0.1.0')):
reg = get_module_registry()
# Creates the new module
new_module = controller.create_module_from_descriptor(
reg.get_descriptor(DBConnection))
# Create the password module
mod_desc = reg.get_descriptor_by_name(
'org.vistrails.vistrails.dialogs', 'PasswordDialog')
mod = controller.create_module_from_descriptor(mod_desc)
# Adds a 'label' function to the password module
ops = [('add', mod)]
ops.extend(controller.update_function_ops(mod,
'label', ['Server password']))
# Connects the password module to the new module
conn = controller.create_connection(mod, 'result',
new_module, 'password')
ops.append(('add', conn))
# Replaces the old module with the new one
upgrade_actions = UpgradeWorkflowHandler.replace_generic(
controller, pipeline,
old_module, new_module,
src_port_remap={'self': 'connection'})
password_fix_action = create_action(ops)
return upgrade_actions + [password_fix_action]
return UpgradeWorkflowHandler.attempt_automatic_upgrade(
controller, pipeline,
module_id)
###############################################################################
import unittest
class TestSQL(unittest.TestCase):
def test_query_sqlite3(self):
"""Queries a SQLite3 database.
"""
import os
import sqlite3
import tempfile
import urllib2
from vistrails.tests.utils import execute, intercept_results
identifier = 'org.vistrails.vistrails.sql'
test_db_fd, test_db = tempfile.mkstemp(suffix='.sqlite3')
os.close(test_db_fd)
try:
conn = sqlite3.connect(test_db)
cur = conn.cursor()
cur.execute('''
CREATE TABLE test(name VARCHAR(24) PRIMARY KEY,
lastname VARCHAR(32) NOT NULL,
age INTEGER NOT NULL)
''')
cur.executemany('''
INSERT INTO test(name, lastname, age)
VALUES(:name, :lastname, :age)
''',
[{'name': 'John', 'lastname': 'Smith', 'age': 25},
{'name': 'Lara', 'lastname': 'Croft', 'age': 21}])
conn.commit()
conn.close()
source = ('''
INSERT INTO test(name, lastname, age)
VALUES(:name, :lastname, :age)
''')
with intercept_results(DBConnection, 'connection', SQLSource, 'result') as (connection, table):
self.assertFalse(execute([
('DBConnection', identifier, [
('protocol', [('String', 'sqlite')]),
('db_name', [('String', test_db)]),
]),
('SQLSource', identifier, [
('source', [('String', urllib2.quote(source))]),
('name', [('String', 'Michael')]),
('lastname', [('String', 'Buck')]),
('age', [('Integer', '78')]),
]),
],
[
(0, 'connection', 1, 'connection'),
],
add_port_specs=[
(1, 'input', 'name',
'org.vistrails.vistrails.basic:String'),
(1, 'input', 'lastname',
'org.vistrails.vistrails.basic:String'),
(1, 'input', 'age',
'org.vistrails.vistrails.basic:Integer'),
]))
self.assertEqual(len(connection), 1)
connection[0].close()
self.assertEqual(len(table), 1)
self.assertIsNone(table[0])
source = "SELECT name, lastname, age FROM test WHERE age > :age"
with intercept_results(DBConnection, 'connection', SQLSource, 'result') as (connection, table):
self.assertFalse(execute([
('DBConnection', identifier, [
('protocol', [('String', 'sqlite')]),
('db_name', [('String', test_db)]),
]),
('SQLSource', identifier, [
('source', [('String', urllib2.quote(source))]),
('age', [('Integer', '22')]),
]),
],
[
(0, 'connection', 1, 'connection'),
],
add_port_specs=[
(1, 'input', 'age',
'org.vistrails.vistrails.basic:Integer'),
]))
self.assertEqual(len(connection), 1)
connection[0].close()
self.assertEqual(len(table), 1)
table, = table
self.assertEqual(table.names, ['name', 'lastname', 'age'])
self.assertEqual((table.rows, table.columns), (2, 3))
self.assertEqual(set(table.get_column(1)),
set(['Smith', 'Buck']))
finally:
try:
os.remove(test_db)
except OSError:
pass # Oops, we are leaking the file here...
| |
# As a test suite for the os module, this is woefully inadequate, but this
# does add tests for a few functions which have been determined to be more
# portable than they had been thought to be.
import os
import errno
import unittest
import warnings
import sys
import signal
import subprocess
import sysconfig
import textwrap
import time
try:
import resource
except ImportError:
resource = None
from test import test_support
from test.script_helper import assert_python_ok
import mmap
import uuid
warnings.filterwarnings("ignore", "tempnam", RuntimeWarning, __name__)
warnings.filterwarnings("ignore", "tmpnam", RuntimeWarning, __name__)
# Tests creating TESTFN
class FileTests(unittest.TestCase):
def setUp(self):
if os.path.exists(test_support.TESTFN):
os.unlink(test_support.TESTFN)
tearDown = setUp
def test_access(self):
f = os.open(test_support.TESTFN, os.O_CREAT|os.O_RDWR)
os.close(f)
self.assertTrue(os.access(test_support.TESTFN, os.W_OK))
def test_closerange(self):
first = os.open(test_support.TESTFN, os.O_CREAT|os.O_RDWR)
# We must allocate two consecutive file descriptors, otherwise
# it will mess up other file descriptors (perhaps even the three
# standard ones).
second = os.dup(first)
try:
retries = 0
while second != first + 1:
os.close(first)
retries += 1
if retries > 10:
# XXX test skipped
self.skipTest("couldn't allocate two consecutive fds")
first, second = second, os.dup(second)
finally:
os.close(second)
# close a fd that is open, and one that isn't
os.closerange(first, first + 2)
self.assertRaises(OSError, os.write, first, "a")
@test_support.cpython_only
def test_rename(self):
path = unicode(test_support.TESTFN)
old = sys.getrefcount(path)
self.assertRaises(TypeError, os.rename, path, 0)
new = sys.getrefcount(path)
self.assertEqual(old, new)
class TemporaryFileTests(unittest.TestCase):
def setUp(self):
self.files = []
os.mkdir(test_support.TESTFN)
def tearDown(self):
for name in self.files:
os.unlink(name)
os.rmdir(test_support.TESTFN)
def check_tempfile(self, name):
# make sure it doesn't already exist:
self.assertFalse(os.path.exists(name),
"file already exists for temporary file")
# make sure we can create the file
open(name, "w")
self.files.append(name)
@unittest.skipUnless(hasattr(os, 'tempnam'), 'test needs os.tempnam()')
def test_tempnam(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "tempnam", RuntimeWarning,
r"test_os$")
warnings.filterwarnings("ignore", "tempnam", DeprecationWarning)
self.check_tempfile(os.tempnam())
name = os.tempnam(test_support.TESTFN)
self.check_tempfile(name)
name = os.tempnam(test_support.TESTFN, "pfx")
self.assertTrue(os.path.basename(name)[:3] == "pfx")
self.check_tempfile(name)
@unittest.skipUnless(hasattr(os, 'tmpfile'), 'test needs os.tmpfile()')
def test_tmpfile(self):
# As with test_tmpnam() below, the Windows implementation of tmpfile()
# attempts to create a file in the root directory of the current drive.
# On Vista and Server 2008, this test will always fail for normal users
# as writing to the root directory requires elevated privileges. With
# XP and below, the semantics of tmpfile() are the same, but the user
# running the test is more likely to have administrative privileges on
# their account already. If that's the case, then os.tmpfile() should
# work. In order to make this test as useful as possible, rather than
# trying to detect Windows versions or whether or not the user has the
# right permissions, just try and create a file in the root directory
# and see if it raises a 'Permission denied' OSError. If it does, then
# test that a subsequent call to os.tmpfile() raises the same error. If
# it doesn't, assume we're on XP or below and the user running the test
# has administrative privileges, and proceed with the test as normal.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "tmpfile", DeprecationWarning)
if sys.platform == 'win32':
name = '\\python_test_os_test_tmpfile.txt'
if os.path.exists(name):
os.remove(name)
try:
fp = open(name, 'w')
except IOError, first:
# open() failed, assert tmpfile() fails in the same way.
# Although open() raises an IOError and os.tmpfile() raises an
# OSError(), 'args' will be (13, 'Permission denied') in both
# cases.
try:
fp = os.tmpfile()
except OSError, second:
self.assertEqual(first.args, second.args)
else:
self.fail("expected os.tmpfile() to raise OSError")
return
else:
# open() worked, therefore, tmpfile() should work. Close our
# dummy file and proceed with the test as normal.
fp.close()
os.remove(name)
fp = os.tmpfile()
fp.write("foobar")
fp.seek(0,0)
s = fp.read()
fp.close()
self.assertTrue(s == "foobar")
@unittest.skipUnless(hasattr(os, 'tmpnam'), 'test needs os.tmpnam()')
def test_tmpnam(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "tmpnam", RuntimeWarning,
r"test_os$")
warnings.filterwarnings("ignore", "tmpnam", DeprecationWarning)
name = os.tmpnam()
if sys.platform in ("win32",):
# The Windows tmpnam() seems useless. From the MS docs:
#
# The character string that tmpnam creates consists of
# the path prefix, defined by the entry P_tmpdir in the
# file STDIO.H, followed by a sequence consisting of the
# digit characters '0' through '9'; the numerical value
# of this string is in the range 1 - 65,535. Changing the
# definitions of L_tmpnam or P_tmpdir in STDIO.H does not
# change the operation of tmpnam.
#
# The really bizarre part is that, at least under MSVC6,
# P_tmpdir is "\\". That is, the path returned refers to
# the root of the current drive. That's a terrible place to
# put temp files, and, depending on privileges, the user
# may not even be able to open a file in the root directory.
self.assertFalse(os.path.exists(name),
"file already exists for temporary file")
else:
self.check_tempfile(name)
# Test attributes on return values from os.*stat* family.
class StatAttributeTests(unittest.TestCase):
def setUp(self):
os.mkdir(test_support.TESTFN)
self.fname = os.path.join(test_support.TESTFN, "f1")
f = open(self.fname, 'wb')
f.write("ABC")
f.close()
def tearDown(self):
os.unlink(self.fname)
os.rmdir(test_support.TESTFN)
@unittest.skipUnless(hasattr(os, 'stat'), 'test needs os.stat()')
def test_stat_attributes(self):
import stat
result = os.stat(self.fname)
# Make sure direct access works
self.assertEqual(result[stat.ST_SIZE], 3)
self.assertEqual(result.st_size, 3)
# Make sure all the attributes are there
members = dir(result)
for name in dir(stat):
if name[:3] == 'ST_':
attr = name.lower()
if name.endswith("TIME"):
def trunc(x): return int(x)
else:
def trunc(x): return x
self.assertEqual(trunc(getattr(result, attr)),
result[getattr(stat, name)])
self.assertIn(attr, members)
try:
result[200]
self.fail("No exception raised")
except IndexError:
pass
# Make sure that assignment fails
try:
result.st_mode = 1
self.fail("No exception raised")
except (AttributeError, TypeError):
pass
try:
result.st_rdev = 1
self.fail("No exception raised")
except (AttributeError, TypeError):
pass
try:
result.parrot = 1
self.fail("No exception raised")
except AttributeError:
pass
# Use the stat_result constructor with a too-short tuple.
try:
result2 = os.stat_result((10,))
self.fail("No exception raised")
except TypeError:
pass
# Use the constructor with a too-long tuple.
try:
result2 = os.stat_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14))
except TypeError:
pass
@unittest.skipUnless(hasattr(os, 'statvfs'), 'test needs os.statvfs()')
def test_statvfs_attributes(self):
try:
result = os.statvfs(self.fname)
except OSError, e:
# On AtheOS, glibc always returns ENOSYS
if e.errno == errno.ENOSYS:
self.skipTest('glibc always returns ENOSYS on AtheOS')
# Make sure direct access works
self.assertEqual(result.f_bfree, result[3])
# Make sure all the attributes are there.
members = ('bsize', 'frsize', 'blocks', 'bfree', 'bavail', 'files',
'ffree', 'favail', 'flag', 'namemax')
for value, member in enumerate(members):
self.assertEqual(getattr(result, 'f_' + member), result[value])
# Make sure that assignment really fails
try:
result.f_bfree = 1
self.fail("No exception raised")
except TypeError:
pass
try:
result.parrot = 1
self.fail("No exception raised")
except AttributeError:
pass
# Use the constructor with a too-short tuple.
try:
result2 = os.statvfs_result((10,))
self.fail("No exception raised")
except TypeError:
pass
# Use the constructor with a too-long tuple.
try:
result2 = os.statvfs_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14))
except TypeError:
pass
def test_utime_dir(self):
delta = 1000000
st = os.stat(test_support.TESTFN)
# round to int, because some systems may support sub-second
# time stamps in stat, but not in utime.
os.utime(test_support.TESTFN, (st.st_atime, int(st.st_mtime-delta)))
st2 = os.stat(test_support.TESTFN)
self.assertEqual(st2.st_mtime, int(st.st_mtime-delta))
# Restrict tests to Win32, since there is no guarantee other
# systems support centiseconds
def get_file_system(path):
if sys.platform == 'win32':
root = os.path.splitdrive(os.path.abspath(path))[0] + '\\'
import ctypes
kernel32 = ctypes.windll.kernel32
buf = ctypes.create_string_buffer("", 100)
if kernel32.GetVolumeInformationA(root, None, 0, None, None, None, buf, len(buf)):
return buf.value
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
@unittest.skipUnless(get_file_system(test_support.TESTFN) == "NTFS",
"requires NTFS")
def test_1565150(self):
t1 = 1159195039.25
os.utime(self.fname, (t1, t1))
self.assertEqual(os.stat(self.fname).st_mtime, t1)
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
@unittest.skipUnless(get_file_system(test_support.TESTFN) == "NTFS",
"requires NTFS")
def test_large_time(self):
t1 = 5000000000 # some day in 2128
os.utime(self.fname, (t1, t1))
self.assertEqual(os.stat(self.fname).st_mtime, t1)
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
def test_1686475(self):
# Verify that an open file can be stat'ed
try:
os.stat(r"c:\pagefile.sys")
except WindowsError, e:
if e.errno == 2: # file does not exist; cannot run test
self.skipTest(r'c:\pagefile.sys does not exist')
self.fail("Could not stat pagefile.sys")
from test import mapping_tests
class EnvironTests(mapping_tests.BasicTestMappingProtocol):
"""check that os.environ object conform to mapping protocol"""
type2test = None
def _reference(self):
return {"KEY1":"VALUE1", "KEY2":"VALUE2", "KEY3":"VALUE3"}
def _empty_mapping(self):
os.environ.clear()
return os.environ
def setUp(self):
self.__save = dict(os.environ)
os.environ.clear()
def tearDown(self):
os.environ.clear()
os.environ.update(self.__save)
# Bug 1110478
def test_update2(self):
if os.path.exists("/bin/sh"):
os.environ.update(HELLO="World")
with os.popen("/bin/sh -c 'echo $HELLO'") as popen:
value = popen.read().strip()
self.assertEqual(value, "World")
# On FreeBSD < 7 and OS X < 10.6, unsetenv() doesn't return a value (issue
# #13415).
@unittest.skipIf(sys.platform.startswith(('freebsd', 'darwin')),
"due to known OS bug: see issue #13415")
def test_unset_error(self):
if sys.platform == "win32":
# an environment variable is limited to 32,767 characters
key = 'x' * 50000
self.assertRaises(ValueError, os.environ.__delitem__, key)
else:
# "=" is not allowed in a variable name
key = 'key='
self.assertRaises(OSError, os.environ.__delitem__, key)
class WalkTests(unittest.TestCase):
"""Tests for os.walk()."""
def test_traversal(self):
import os
from os.path import join
# Build:
# TESTFN/
# TEST1/ a file kid and two directory kids
# tmp1
# SUB1/ a file kid and a directory kid
# tmp2
# SUB11/ no kids
# SUB2/ a file kid and a dirsymlink kid
# tmp3
# link/ a symlink to TESTFN.2
# TEST2/
# tmp4 a lone file
walk_path = join(test_support.TESTFN, "TEST1")
sub1_path = join(walk_path, "SUB1")
sub11_path = join(sub1_path, "SUB11")
sub2_path = join(walk_path, "SUB2")
tmp1_path = join(walk_path, "tmp1")
tmp2_path = join(sub1_path, "tmp2")
tmp3_path = join(sub2_path, "tmp3")
link_path = join(sub2_path, "link")
t2_path = join(test_support.TESTFN, "TEST2")
tmp4_path = join(test_support.TESTFN, "TEST2", "tmp4")
# Create stuff.
os.makedirs(sub11_path)
os.makedirs(sub2_path)
os.makedirs(t2_path)
for path in tmp1_path, tmp2_path, tmp3_path, tmp4_path:
f = file(path, "w")
f.write("I'm " + path + " and proud of it. Blame test_os.\n")
f.close()
if hasattr(os, "symlink"):
os.symlink(os.path.abspath(t2_path), link_path)
sub2_tree = (sub2_path, ["link"], ["tmp3"])
else:
sub2_tree = (sub2_path, [], ["tmp3"])
# Walk top-down.
all = list(os.walk(walk_path))
self.assertEqual(len(all), 4)
# We can't know which order SUB1 and SUB2 will appear in.
# Not flipped: TESTFN, SUB1, SUB11, SUB2
# flipped: TESTFN, SUB2, SUB1, SUB11
flipped = all[0][1][0] != "SUB1"
all[0][1].sort()
self.assertEqual(all[0], (walk_path, ["SUB1", "SUB2"], ["tmp1"]))
self.assertEqual(all[1 + flipped], (sub1_path, ["SUB11"], ["tmp2"]))
self.assertEqual(all[2 + flipped], (sub11_path, [], []))
self.assertEqual(all[3 - 2 * flipped], sub2_tree)
# Prune the search.
all = []
for root, dirs, files in os.walk(walk_path):
all.append((root, dirs, files))
# Don't descend into SUB1.
if 'SUB1' in dirs:
# Note that this also mutates the dirs we appended to all!
dirs.remove('SUB1')
self.assertEqual(len(all), 2)
self.assertEqual(all[0], (walk_path, ["SUB2"], ["tmp1"]))
self.assertEqual(all[1], sub2_tree)
# Walk bottom-up.
all = list(os.walk(walk_path, topdown=False))
self.assertEqual(len(all), 4)
# We can't know which order SUB1 and SUB2 will appear in.
# Not flipped: SUB11, SUB1, SUB2, TESTFN
# flipped: SUB2, SUB11, SUB1, TESTFN
flipped = all[3][1][0] != "SUB1"
all[3][1].sort()
self.assertEqual(all[3], (walk_path, ["SUB1", "SUB2"], ["tmp1"]))
self.assertEqual(all[flipped], (sub11_path, [], []))
self.assertEqual(all[flipped + 1], (sub1_path, ["SUB11"], ["tmp2"]))
self.assertEqual(all[2 - 2 * flipped], sub2_tree)
if hasattr(os, "symlink"):
# Walk, following symlinks.
for root, dirs, files in os.walk(walk_path, followlinks=True):
if root == link_path:
self.assertEqual(dirs, [])
self.assertEqual(files, ["tmp4"])
break
else:
self.fail("Didn't follow symlink with followlinks=True")
def tearDown(self):
# Tear everything down. This is a decent use for bottom-up on
# Windows, which doesn't have a recursive delete command. The
# (not so) subtlety is that rmdir will fail unless the dir's
# kids are removed first, so bottom up is essential.
for root, dirs, files in os.walk(test_support.TESTFN, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
dirname = os.path.join(root, name)
if not os.path.islink(dirname):
os.rmdir(dirname)
else:
os.remove(dirname)
os.rmdir(test_support.TESTFN)
class MakedirTests (unittest.TestCase):
def setUp(self):
os.mkdir(test_support.TESTFN)
def test_makedir(self):
base = test_support.TESTFN
path = os.path.join(base, 'dir1', 'dir2', 'dir3')
os.makedirs(path) # Should work
path = os.path.join(base, 'dir1', 'dir2', 'dir3', 'dir4')
os.makedirs(path)
# Try paths with a '.' in them
self.assertRaises(OSError, os.makedirs, os.curdir)
path = os.path.join(base, 'dir1', 'dir2', 'dir3', 'dir4', 'dir5', os.curdir)
os.makedirs(path)
path = os.path.join(base, 'dir1', os.curdir, 'dir2', 'dir3', 'dir4',
'dir5', 'dir6')
os.makedirs(path)
def tearDown(self):
path = os.path.join(test_support.TESTFN, 'dir1', 'dir2', 'dir3',
'dir4', 'dir5', 'dir6')
# If the tests failed, the bottom-most directory ('../dir6')
# may not have been created, so we look for the outermost directory
# that exists.
while not os.path.exists(path) and path != test_support.TESTFN:
path = os.path.dirname(path)
os.removedirs(path)
class DevNullTests (unittest.TestCase):
def test_devnull(self):
f = file(os.devnull, 'w')
f.write('hello')
f.close()
f = file(os.devnull, 'r')
self.assertEqual(f.read(), '')
f.close()
class URandomTests (unittest.TestCase):
def test_urandom_length(self):
self.assertEqual(len(os.urandom(0)), 0)
self.assertEqual(len(os.urandom(1)), 1)
self.assertEqual(len(os.urandom(10)), 10)
self.assertEqual(len(os.urandom(100)), 100)
self.assertEqual(len(os.urandom(1000)), 1000)
def test_urandom_value(self):
data1 = os.urandom(16)
data2 = os.urandom(16)
self.assertNotEqual(data1, data2)
def get_urandom_subprocess(self, count):
# We need to use repr() and eval() to avoid line ending conversions
# under Windows.
code = '\n'.join((
'import os, sys',
'data = os.urandom(%s)' % count,
'sys.stdout.write(repr(data))',
'sys.stdout.flush()',
'print >> sys.stderr, (len(data), data)'))
cmd_line = [sys.executable, '-c', code]
p = subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
self.assertEqual(p.wait(), 0, (p.wait(), err))
out = eval(out)
self.assertEqual(len(out), count, err)
return out
def test_urandom_subprocess(self):
data1 = self.get_urandom_subprocess(16)
data2 = self.get_urandom_subprocess(16)
self.assertNotEqual(data1, data2)
HAVE_GETENTROPY = (sysconfig.get_config_var('HAVE_GETENTROPY') == 1)
@unittest.skipIf(HAVE_GETENTROPY,
"getentropy() does not use a file descriptor")
class URandomFDTests(unittest.TestCase):
@unittest.skipUnless(resource, "test requires the resource module")
def test_urandom_failure(self):
# Check urandom() failing when it is not able to open /dev/random.
# We spawn a new process to make the test more robust (if getrlimit()
# failed to restore the file descriptor limit after this, the whole
# test suite would crash; this actually happened on the OS X Tiger
# buildbot).
code = """if 1:
import errno
import os
import resource
soft_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (1, hard_limit))
try:
os.urandom(16)
except OSError as e:
assert e.errno == errno.EMFILE, e.errno
else:
raise AssertionError("OSError not raised")
"""
assert_python_ok('-c', code)
class ExecTests(unittest.TestCase):
def test_execvpe_with_bad_arglist(self):
self.assertRaises(ValueError, os.execvpe, 'notepad', [], None)
def test_execve_invalid_env(self):
args = [sys.executable, '-c', 'pass']
# null character in the enviroment variable name
newenv = os.environ.copy()
newenv["FRUIT\0VEGETABLE"] = "cabbage"
with self.assertRaises(TypeError):
os.execve(args[0], args, newenv)
# null character in the enviroment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange\0VEGETABLE=cabbage"
with self.assertRaises(TypeError):
os.execve(args[0], args, newenv)
# equal character in the enviroment variable name
newenv = os.environ.copy()
newenv["FRUIT=ORANGE"] = "lemon"
with self.assertRaises(ValueError):
os.execve(args[0], args, newenv)
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
class Win32ErrorTests(unittest.TestCase):
def test_rename(self):
self.assertRaises(WindowsError, os.rename, test_support.TESTFN, test_support.TESTFN+".bak")
def test_remove(self):
self.assertRaises(WindowsError, os.remove, test_support.TESTFN)
def test_chdir(self):
self.assertRaises(WindowsError, os.chdir, test_support.TESTFN)
def test_mkdir(self):
f = open(test_support.TESTFN, "w")
try:
self.assertRaises(WindowsError, os.mkdir, test_support.TESTFN)
finally:
f.close()
os.unlink(test_support.TESTFN)
def test_utime(self):
self.assertRaises(WindowsError, os.utime, test_support.TESTFN, None)
def test_chmod(self):
self.assertRaises(WindowsError, os.chmod, test_support.TESTFN, 0)
class TestInvalidFD(unittest.TestCase):
singles = ["fchdir", "fdopen", "dup", "fdatasync", "fstat",
"fstatvfs", "fsync", "tcgetpgrp", "ttyname"]
#singles.append("close")
#We omit close because it doesn't raise an exception on some platforms
def get_single(f):
def helper(self):
if hasattr(os, f):
self.check(getattr(os, f))
return helper
for f in singles:
locals()["test_"+f] = get_single(f)
def check(self, f, *args):
try:
f(test_support.make_bad_fd(), *args)
except OSError as e:
self.assertEqual(e.errno, errno.EBADF)
else:
self.fail("%r didn't raise an OSError with a bad file descriptor"
% f)
@unittest.skipUnless(hasattr(os, 'isatty'), 'test needs os.isatty()')
def test_isatty(self):
self.assertEqual(os.isatty(test_support.make_bad_fd()), False)
@unittest.skipUnless(hasattr(os, 'closerange'), 'test needs os.closerange()')
def test_closerange(self):
fd = test_support.make_bad_fd()
# Make sure none of the descriptors we are about to close are
# currently valid (issue 6542).
for i in range(10):
try: os.fstat(fd+i)
except OSError:
pass
else:
break
if i < 2:
raise unittest.SkipTest(
"Unable to acquire a range of invalid file descriptors")
self.assertEqual(os.closerange(fd, fd + i-1), None)
@unittest.skipUnless(hasattr(os, 'dup2'), 'test needs os.dup2()')
def test_dup2(self):
self.check(os.dup2, 20)
@unittest.skipUnless(hasattr(os, 'fchmod'), 'test needs os.fchmod()')
def test_fchmod(self):
self.check(os.fchmod, 0)
@unittest.skipUnless(hasattr(os, 'fchown'), 'test needs os.fchown()')
def test_fchown(self):
self.check(os.fchown, -1, -1)
@unittest.skipUnless(hasattr(os, 'fpathconf'), 'test needs os.fpathconf()')
def test_fpathconf(self):
self.check(os.fpathconf, "PC_NAME_MAX")
@unittest.skipUnless(hasattr(os, 'ftruncate'), 'test needs os.ftruncate()')
def test_ftruncate(self):
self.check(os.ftruncate, 0)
@unittest.skipUnless(hasattr(os, 'lseek'), 'test needs os.lseek()')
def test_lseek(self):
self.check(os.lseek, 0, 0)
@unittest.skipUnless(hasattr(os, 'read'), 'test needs os.read()')
def test_read(self):
self.check(os.read, 1)
@unittest.skipUnless(hasattr(os, 'tcsetpgrp'), 'test needs os.tcsetpgrp()')
def test_tcsetpgrpt(self):
self.check(os.tcsetpgrp, 0)
@unittest.skipUnless(hasattr(os, 'write'), 'test needs os.write()')
def test_write(self):
self.check(os.write, " ")
@unittest.skipIf(sys.platform == "win32", "Posix specific tests")
class PosixUidGidTests(unittest.TestCase):
@unittest.skipUnless(hasattr(os, 'setuid'), 'test needs os.setuid()')
def test_setuid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setuid, 0)
self.assertRaises(TypeError, os.setuid, 'not an int')
self.assertRaises(OverflowError, os.setuid, 1<<32)
@unittest.skipUnless(hasattr(os, 'setgid'), 'test needs os.setgid()')
def test_setgid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setgid, 0)
self.assertRaises(TypeError, os.setgid, 'not an int')
self.assertRaises(OverflowError, os.setgid, 1<<32)
@unittest.skipUnless(hasattr(os, 'seteuid'), 'test needs os.seteuid()')
def test_seteuid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.seteuid, 0)
self.assertRaises(TypeError, os.seteuid, 'not an int')
self.assertRaises(OverflowError, os.seteuid, 1<<32)
@unittest.skipUnless(hasattr(os, 'setegid'), 'test needs os.setegid()')
def test_setegid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setegid, 0)
self.assertRaises(TypeError, os.setegid, 'not an int')
self.assertRaises(OverflowError, os.setegid, 1<<32)
@unittest.skipUnless(hasattr(os, 'setreuid'), 'test needs os.setreuid()')
def test_setreuid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setreuid, 0, 0)
self.assertRaises(TypeError, os.setreuid, 'not an int', 0)
self.assertRaises(TypeError, os.setreuid, 0, 'not an int')
self.assertRaises(OverflowError, os.setreuid, 1<<32, 0)
self.assertRaises(OverflowError, os.setreuid, 0, 1<<32)
@unittest.skipUnless(hasattr(os, 'setreuid'), 'test needs os.setreuid()')
def test_setreuid_neg1(self):
# Needs to accept -1. We run this in a subprocess to avoid
# altering the test runner's process state (issue8045).
subprocess.check_call([
sys.executable, '-c',
'import os,sys;os.setreuid(-1,-1);sys.exit(0)'])
@unittest.skipUnless(hasattr(os, 'setregid'), 'test needs os.setregid()')
def test_setregid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setregid, 0, 0)
self.assertRaises(TypeError, os.setregid, 'not an int', 0)
self.assertRaises(TypeError, os.setregid, 0, 'not an int')
self.assertRaises(OverflowError, os.setregid, 1<<32, 0)
self.assertRaises(OverflowError, os.setregid, 0, 1<<32)
@unittest.skipUnless(hasattr(os, 'setregid'), 'test needs os.setregid()')
def test_setregid_neg1(self):
# Needs to accept -1. We run this in a subprocess to avoid
# altering the test runner's process state (issue8045).
subprocess.check_call([
sys.executable, '-c',
'import os,sys;os.setregid(-1,-1);sys.exit(0)'])
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
class Win32KillTests(unittest.TestCase):
def _kill(self, sig):
# Start sys.executable as a subprocess and communicate from the
# subprocess to the parent that the interpreter is ready. When it
# becomes ready, send *sig* via os.kill to the subprocess and check
# that the return code is equal to *sig*.
import ctypes
from ctypes import wintypes
import msvcrt
# Since we can't access the contents of the process' stdout until the
# process has exited, use PeekNamedPipe to see what's inside stdout
# without waiting. This is done so we can tell that the interpreter
# is started and running at a point where it could handle a signal.
PeekNamedPipe = ctypes.windll.kernel32.PeekNamedPipe
PeekNamedPipe.restype = wintypes.BOOL
PeekNamedPipe.argtypes = (wintypes.HANDLE, # Pipe handle
ctypes.POINTER(ctypes.c_char), # stdout buf
wintypes.DWORD, # Buffer size
ctypes.POINTER(wintypes.DWORD), # bytes read
ctypes.POINTER(wintypes.DWORD), # bytes avail
ctypes.POINTER(wintypes.DWORD)) # bytes left
msg = "running"
proc = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('{}');"
"sys.stdout.flush();"
"input()".format(msg)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
self.addCleanup(proc.stdout.close)
self.addCleanup(proc.stderr.close)
self.addCleanup(proc.stdin.close)
count, max = 0, 100
while count < max and proc.poll() is None:
# Create a string buffer to store the result of stdout from the pipe
buf = ctypes.create_string_buffer(len(msg))
# Obtain the text currently in proc.stdout
# Bytes read/avail/left are left as NULL and unused
rslt = PeekNamedPipe(msvcrt.get_osfhandle(proc.stdout.fileno()),
buf, ctypes.sizeof(buf), None, None, None)
self.assertNotEqual(rslt, 0, "PeekNamedPipe failed")
if buf.value:
self.assertEqual(msg, buf.value)
break
time.sleep(0.1)
count += 1
else:
self.fail("Did not receive communication from the subprocess")
os.kill(proc.pid, sig)
self.assertEqual(proc.wait(), sig)
@unittest.skipIf(sys.platform == 'cli', 'ipy.exe does not support getting killed by signals.')
def test_kill_sigterm(self):
# SIGTERM doesn't mean anything special, but make sure it works
self._kill(signal.SIGTERM)
@unittest.skipIf(sys.platform == 'cli', 'ipy.exe does not support getting killed by signals.')
def test_kill_int(self):
# os.kill on Windows can take an int which gets set as the exit code
self._kill(100)
def _kill_with_event(self, event, name):
tagname = "test_os_%s" % uuid.uuid1()
m = mmap.mmap(-1, 1, tagname)
m[0] = '0'
# Run a script which has console control handling enabled.
proc = subprocess.Popen([sys.executable,
os.path.join(os.path.dirname(__file__),
"win_console_handler.py"), tagname],
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
# Let the interpreter startup before we send signals. See #3137.
count, max = 0, 20
while count < max and proc.poll() is None:
if m[0] == '1':
break
time.sleep(0.5)
count += 1
else:
self.fail("Subprocess didn't finish initialization")
os.kill(proc.pid, event)
# proc.send_signal(event) could also be done here.
# Allow time for the signal to be passed and the process to exit.
time.sleep(0.5)
if not proc.poll():
# Forcefully kill the process if we weren't able to signal it.
os.kill(proc.pid, signal.SIGINT)
self.fail("subprocess did not stop on {}".format(name))
@unittest.skip("subprocesses aren't inheriting Ctrl+C property")
def test_CTRL_C_EVENT(self):
from ctypes import wintypes
import ctypes
# Make a NULL value by creating a pointer with no argument.
NULL = ctypes.POINTER(ctypes.c_int)()
SetConsoleCtrlHandler = ctypes.windll.kernel32.SetConsoleCtrlHandler
SetConsoleCtrlHandler.argtypes = (ctypes.POINTER(ctypes.c_int),
wintypes.BOOL)
SetConsoleCtrlHandler.restype = wintypes.BOOL
# Calling this with NULL and FALSE causes the calling process to
# handle Ctrl+C, rather than ignore it. This property is inherited
# by subprocesses.
SetConsoleCtrlHandler(NULL, 0)
self._kill_with_event(signal.CTRL_C_EVENT, "CTRL_C_EVENT")
def test_CTRL_BREAK_EVENT(self):
self._kill_with_event(signal.CTRL_BREAK_EVENT, "CTRL_BREAK_EVENT")
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
class Win32ListdirTests(unittest.TestCase):
"""Test listdir on Windows."""
def setUp(self):
self.created_paths = []
for i in range(2):
dir_name = 'SUB%d' % i
dir_path = os.path.join(support.TESTFN, dir_name)
file_name = 'FILE%d' % i
file_path = os.path.join(support.TESTFN, file_name)
os.makedirs(dir_path)
with open(file_path, 'w') as f:
f.write("I'm %s and proud of it. Blame test_os.\n" % file_path)
self.created_paths.extend([dir_name, file_name])
self.created_paths.sort()
def tearDown(self):
shutil.rmtree(support.TESTFN)
def test_listdir_no_extended_path(self):
"""Test when the path is not an "extended" path."""
# unicode
fs_encoding = sys.getfilesystemencoding()
self.assertEqual(
sorted(os.listdir(support.TESTFN.decode(fs_encoding))),
[path.decode(fs_encoding) for path in self.created_paths])
# bytes
self.assertEqual(
sorted(os.listdir(os.fsencode(support.TESTFN))),
self.created_paths)
def test_listdir_extended_path(self):
"""Test when the path starts with '\\\\?\\'."""
# See: http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath
# unicode
fs_encoding = sys.getfilesystemencoding()
path = u'\\\\?\\' + os.path.abspath(support.TESTFN.decode(fs_encoding))
self.assertEqual(
sorted(os.listdir(path)),
[path.decode(fs_encoding) for path in self.created_paths])
# bytes
path = b'\\\\?\\' + os.path.abspath(support.TESTFN)
self.assertEqual(
sorted(os.listdir(path)),
self.created_paths)
class SpawnTests(unittest.TestCase):
def _test_invalid_env(self, spawn):
args = [sys.executable, '-c', 'pass']
# null character in the enviroment variable name
newenv = os.environ.copy()
newenv["FRUIT\0VEGETABLE"] = "cabbage"
try:
exitcode = spawn(os.P_WAIT, args[0], args, newenv)
except TypeError:
pass
else:
self.assertEqual(exitcode, 127)
# null character in the enviroment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange\0VEGETABLE=cabbage"
try:
exitcode = spawn(os.P_WAIT, args[0], args, newenv)
except TypeError:
pass
else:
self.assertEqual(exitcode, 127)
# equal character in the enviroment variable name
newenv = os.environ.copy()
newenv["FRUIT=ORANGE"] = "lemon"
try:
exitcode = spawn(os.P_WAIT, args[0], args, newenv)
except ValueError:
pass
else:
self.assertEqual(exitcode, 127)
# equal character in the enviroment variable value
filename = test_support.TESTFN
self.addCleanup(test_support.unlink, filename)
with open(filename, "w") as fp:
fp.write('import sys, os\n'
'if os.getenv("FRUIT") != "orange=lemon":\n'
' raise AssertionError')
args = [sys.executable, filename]
newenv = os.environ.copy()
newenv["FRUIT"] = "orange=lemon"
exitcode = spawn(os.P_WAIT, args[0], args, newenv)
self.assertEqual(exitcode, 0)
@unittest.skipUnless(hasattr(os, 'spawnve'), 'test needs os.spawnve()')
def test_spawnve_invalid_env(self):
self._test_invalid_env(os.spawnve)
@unittest.skipUnless(hasattr(os, 'spawnvpe'), 'test needs os.spawnvpe()')
def test_spawnvpe_invalid_env(self):
self._test_invalid_env(os.spawnvpe)
def test_main():
test_support.run_unittest(
FileTests,
TemporaryFileTests,
StatAttributeTests,
EnvironTests,
WalkTests,
MakedirTests,
DevNullTests,
URandomTests,
URandomFDTests,
ExecTests,
Win32ErrorTests,
TestInvalidFD,
PosixUidGidTests,
Win32KillTests,
SpawnTests,
)
if __name__ == "__main__":
test_main()
| |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from kombu import Connection
from oslo_config import cfg
from st2common import log as logging
from st2common.constants.action import LIVEACTION_STATUS_SUCCEEDED
from st2common.constants.action import LIVEACTION_FAILED_STATES
from st2common.constants.action import LIVEACTION_COMPLETED_STATES
from st2common.constants.triggers import INTERNAL_TRIGGER_TYPES
from st2common.models.api.trace import TraceContext
from st2common.models.db.liveaction import LiveActionDB
from st2common.persistence.action import Action
from st2common.persistence.policy import Policy
from st2common import policies
from st2common.models.system.common import ResourceReference
from st2common.persistence.execution import ActionExecution
from st2common.services import trace as trace_service
from st2common.transport import consumers, liveaction, publishers
from st2common.transport import utils as transport_utils
from st2common.transport.reactor import TriggerDispatcher
from st2common.util import isotime
from st2common.util import jinja as jinja_utils
from st2common.constants.action import ACTION_CONTEXT_KV_PREFIX
from st2common.constants.action import ACTION_PARAMETERS_KV_PREFIX
from st2common.constants.action import ACTION_RESULTS_KV_PREFIX
from st2common.constants.system import SYSTEM_KV_PREFIX
from st2common.services.keyvalues import KeyValueLookup
__all__ = [
'Notifier',
'get_notifier'
]
LOG = logging.getLogger(__name__)
ACTIONUPDATE_WORK_Q = liveaction.get_queue('st2.notifiers.work',
routing_key=publishers.UPDATE_RK)
ACTION_SENSOR_ENABLED = cfg.CONF.action_sensor.enable
# XXX: Fix this nasty positional dependency.
ACTION_TRIGGER_TYPE = INTERNAL_TRIGGER_TYPES['action'][0]
NOTIFY_TRIGGER_TYPE = INTERNAL_TRIGGER_TYPES['action'][1]
class Notifier(consumers.MessageHandler):
message_type = LiveActionDB
def __init__(self, connection, queues, trigger_dispatcher=None):
super(Notifier, self).__init__(connection, queues)
if not trigger_dispatcher:
trigger_dispatcher = TriggerDispatcher(LOG)
self._trigger_dispatcher = trigger_dispatcher
self._notify_trigger = ResourceReference.to_string_reference(
pack=NOTIFY_TRIGGER_TYPE['pack'],
name=NOTIFY_TRIGGER_TYPE['name'])
self._action_trigger = ResourceReference.to_string_reference(
pack=ACTION_TRIGGER_TYPE['pack'],
name=ACTION_TRIGGER_TYPE['name'])
def process(self, liveaction):
live_action_id = str(liveaction.id)
extra = {'live_action_db': liveaction}
LOG.debug('Processing liveaction %s', live_action_id, extra=extra)
if liveaction.status not in LIVEACTION_COMPLETED_STATES:
LOG.debug('Skipping processing of liveaction %s since it\'s not in a completed state' %
(live_action_id), extra=extra)
return
execution = self._get_execution_for_liveaction(liveaction)
if not execution:
LOG.exception('Execution object corresponding to LiveAction %s not found.',
live_action_id, extra=extra)
return None
self._apply_post_run_policies(liveaction=liveaction)
if liveaction.notify is not None:
self._post_notify_triggers(liveaction=liveaction, execution=execution)
self._post_generic_trigger(liveaction=liveaction, execution=execution)
def _get_execution_for_liveaction(self, liveaction):
execution = ActionExecution.get(liveaction__id=str(liveaction.id))
if not execution:
return None
return execution
def _post_notify_triggers(self, liveaction=None, execution=None):
notify = getattr(liveaction, 'notify', None)
if not notify:
return
if notify.on_complete:
self._post_notify_subsection_triggers(
liveaction=liveaction, execution=execution,
notify_subsection=notify.on_complete,
default_message_suffix='completed.')
if liveaction.status == LIVEACTION_STATUS_SUCCEEDED and notify.on_success:
self._post_notify_subsection_triggers(
liveaction=liveaction, execution=execution,
notify_subsection=notify.on_success,
default_message_suffix='succeeded.')
if liveaction.status in LIVEACTION_FAILED_STATES and notify.on_failure:
self._post_notify_subsection_triggers(
liveaction=liveaction, execution=execution,
notify_subsection=notify.on_failure,
default_message_suffix='failed.')
def _post_notify_subsection_triggers(self, liveaction=None, execution=None,
notify_subsection=None,
default_message_suffix=None):
routes = (getattr(notify_subsection, 'routes') or
getattr(notify_subsection, 'channels', None))
execution_id = str(execution.id)
if routes and len(routes) >= 1:
payload = {}
message = notify_subsection.message or (
'Action ' + liveaction.action + ' ' + default_message_suffix)
data = notify_subsection.data or {}
jinja_context = self._build_jinja_context(liveaction=liveaction, execution=execution)
try:
message = self._transform_message(message=message,
context=jinja_context)
except:
LOG.exception('Failed (Jinja) transforming `message`.')
try:
data = self._transform_data(data=data, context=jinja_context)
except:
LOG.exception('Failed (Jinja) transforming `data`.')
# At this point convert result to a string. This restricts the rulesengines
# ability to introspect the result. On the other handle atleast a json usable
# result is sent as part of the notification. If jinja is required to convert
# to a string representation it uses str(...) which make it impossible to
# parse the result as json any longer.
# TODO: Use to_serializable_dict
data['result'] = json.dumps(liveaction.result)
payload['message'] = message
payload['data'] = data
payload['execution_id'] = execution_id
payload['status'] = liveaction.status
payload['start_timestamp'] = isotime.format(liveaction.start_timestamp)
payload['end_timestamp'] = isotime.format(liveaction.end_timestamp)
payload['action_ref'] = liveaction.action
payload['runner_ref'] = self._get_runner_ref(liveaction.action)
trace_context = self._get_trace_context(execution_id=execution_id)
failed_routes = []
for route in routes:
try:
payload['route'] = route
# Deprecated. Only for backward compatibility reasons.
payload['channel'] = route
LOG.debug('POSTing %s for %s. Payload - %s.', NOTIFY_TRIGGER_TYPE['name'],
liveaction.id, payload)
self._trigger_dispatcher.dispatch(self._notify_trigger, payload=payload,
trace_context=trace_context)
except:
failed_routes.append(route)
if len(failed_routes) > 0:
raise Exception('Failed notifications to routes: %s' % ', '.join(failed_routes))
def _build_jinja_context(self, liveaction, execution):
context = {SYSTEM_KV_PREFIX: KeyValueLookup()}
context.update({ACTION_PARAMETERS_KV_PREFIX: liveaction.parameters})
context.update({ACTION_CONTEXT_KV_PREFIX: liveaction.context})
context.update({ACTION_RESULTS_KV_PREFIX: execution.result})
return context
def _transform_message(self, message, context=None):
mapping = {'message': message}
context = context or {}
return (jinja_utils.render_values(mapping=mapping, context=context)).get('message',
message)
def _transform_data(self, data, context=None):
return jinja_utils.render_values(mapping=data, context=context)
def _get_trace_context(self, execution_id):
trace_db = trace_service.get_trace_db_by_action_execution(
action_execution_id=execution_id)
if trace_db:
return TraceContext(id_=str(trace_db.id), trace_tag=trace_db.trace_tag)
# If no trace_context is found then do not create a new one here. If necessary
# it shall be created downstream. Sure this is impl leakage of some sort.
return None
def _post_generic_trigger(self, liveaction=None, execution=None):
if not ACTION_SENSOR_ENABLED:
LOG.debug('Action trigger is disabled, skipping trigger dispatch...')
return
execution_id = str(execution.id)
payload = {'execution_id': execution_id,
'status': liveaction.status,
'start_timestamp': str(liveaction.start_timestamp),
# deprecate 'action_name' at some point and switch to 'action_ref'
'action_name': liveaction.action,
'action_ref': liveaction.action,
'runner_ref': self._get_runner_ref(liveaction.action),
'parameters': liveaction.get_masked_parameters(),
'result': liveaction.result}
# Use execution_id to extract trace rather than liveaction. execution_id
# will look-up an exact TraceDB while liveaction depending on context
# may not end up going to the DB.
trace_context = self._get_trace_context(execution_id=execution_id)
LOG.debug('POSTing %s for %s. Payload - %s. TraceContext - %s',
ACTION_TRIGGER_TYPE['name'], liveaction.id, payload, trace_context)
self._trigger_dispatcher.dispatch(self._action_trigger, payload=payload,
trace_context=trace_context)
def _apply_post_run_policies(self, liveaction=None):
# Apply policies defined for the action.
policy_dbs = Policy.query(resource_ref=liveaction.action)
LOG.debug('Applying %s post_run policies' % (len(policy_dbs)))
for policy_db in policy_dbs:
driver = policies.get_driver(policy_db.ref,
policy_db.policy_type,
**policy_db.parameters)
try:
LOG.debug('Applying post_run policy "%s" (%s) for liveaction %s' %
(policy_db.ref, policy_db.policy_type, str(liveaction.id)))
liveaction = driver.apply_after(liveaction)
except:
LOG.exception('An exception occurred while applying policy "%s".', policy_db.ref)
def _get_runner_ref(self, action_ref):
"""
Retrieve a runner reference for the provided action.
:rtype: ``str``
"""
action = Action.get_by_ref(action_ref)
return action['runner_type']['name']
def get_notifier():
with Connection(transport_utils.get_messaging_urls()) as conn:
return Notifier(conn, [ACTIONUPDATE_WORK_Q], trigger_dispatcher=TriggerDispatcher(LOG))
| |
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This StaticWeb WSGI middleware will serve container data as a static web site
with index file and error file resolution and optional file listings. This mode
is normally only active for anonymous requests. When using keystone for
authentication set ``delay_auth_decision = true`` in the authtoken middleware
configuration in your ``/etc/swift/proxy-server.conf`` file. If you want to
use it with authenticated requests, set the ``X-Web-Mode: true`` header on the
request.
The ``staticweb`` filter should be added to the pipeline in your
``/etc/swift/proxy-server.conf`` file just after any auth middleware. Also, the
configuration section for the ``staticweb`` middleware itself needs to be
added. For example::
[DEFAULT]
...
[pipeline:main]
pipeline = catch_errors healthcheck proxy-logging cache ratelimit tempauth
staticweb proxy-logging proxy-server
...
[filter:staticweb]
use = egg:swift#staticweb
Any publicly readable containers (for example, ``X-Container-Read: .r:*``, see
`acls`_ for more information on this) will be checked for
X-Container-Meta-Web-Index and X-Container-Meta-Web-Error header values::
X-Container-Meta-Web-Index <index.name>
X-Container-Meta-Web-Error <error.name.suffix>
If X-Container-Meta-Web-Index is set, any <index.name> files will be served
without having to specify the <index.name> part. For instance, setting
``X-Container-Meta-Web-Index: index.html`` will be able to serve the object
.../pseudo/path/index.html with just .../pseudo/path or .../pseudo/path/
If X-Container-Meta-Web-Error is set, any errors (currently just 401
Unauthorized and 404 Not Found) will instead serve the
.../<status.code><error.name.suffix> object. For instance, setting
``X-Container-Meta-Web-Error: error.html`` will serve .../404error.html for
requests for paths not found.
For pseudo paths that have no <index.name>, this middleware can serve HTML file
listings if you set the ``X-Container-Meta-Web-Listings: true`` metadata item
on the container.
If listings are enabled, the listings can have a custom style sheet by setting
the X-Container-Meta-Web-Listings-CSS header. For instance, setting
``X-Container-Meta-Web-Listings-CSS: listing.css`` will make listings link to
the .../listing.css style sheet. If you "view source" in your browser on a
listing page, you will see the well defined document structure that can be
styled.
The content-type of directory marker objects can be modified by setting
the ``X-Container-Meta-Web-Directory-Type`` header. If the header is not set,
application/directory is used by default. Directory marker objects are
0-byte objects that represent directories to create a simulated hierarchical
structure.
Example usage of this middleware via ``swift``:
Make the container publicly readable::
swift post -r '.r:*' container
You should be able to get objects directly, but no index.html resolution or
listings.
Set an index file directive::
swift post -m 'web-index:index.html' container
You should be able to hit paths that have an index.html without needing to
type the index.html part.
Turn on listings::
swift post -m 'web-listings: true' container
Now you should see object listings for paths and pseudo paths that have no
index.html.
Enable a custom listings style sheet::
swift post -m 'web-listings-css:listings.css' container
Set an error file::
swift post -m 'web-error:error.html' container
Now 401's should load 401error.html, 404's should load 404error.html, etc.
Set Content-Type of directory marker object::
swift post -m 'web-directory-type:text/directory' container
Now 0-byte objects with a content-type of text/directory will be treated
as directories rather than objects.
"""
import cgi
import time
from urllib import quote as urllib_quote
from swift.common.utils import human_readable, split_path, config_true_value, \
json
from swift.common.wsgi import make_pre_authed_env, WSGIContext
from swift.common.http import is_success, is_redirection, HTTP_NOT_FOUND
from swift.common.swob import Response, HTTPMovedPermanently, HTTPNotFound
from swift.proxy.controllers.base import get_container_info
def ensure_utf8_bytes(value):
if isinstance(value, unicode):
value = value.encode('utf-8')
return value
def quote(value, safe='/'):
"""
Patched version of urllib.quote that encodes utf-8 strings before quoting
"""
return urllib_quote(ensure_utf8_bytes(value), safe)
class _StaticWebContext(WSGIContext):
"""
The Static Web WSGI middleware filter; serves container data as a
static web site. See `staticweb`_ for an overview.
This _StaticWebContext is used by StaticWeb with each request
that might need to be handled to make keeping contextual
information about the request a bit simpler than storing it in
the WSGI env.
"""
def __init__(self, staticweb, version, account, container, obj):
WSGIContext.__init__(self, staticweb.app)
self.version = version
self.account = account
self.container = container
self.obj = obj
self.app = staticweb.app
self.agent = '%(orig)s StaticWeb'
# Results from the last call to self._get_container_info.
self._index = self._error = self._listings = self._listings_css = \
self._dir_type = None
def _error_response(self, response, env, start_response):
"""
Sends the error response to the remote client, possibly resolving a
custom error response body based on x-container-meta-web-error.
:param response: The error response we should default to sending.
:param env: The original request WSGI environment.
:param start_response: The WSGI start_response hook.
"""
if not self._error:
start_response(self._response_status, self._response_headers,
self._response_exc_info)
return response
save_response_status = self._response_status
save_response_headers = self._response_headers
save_response_exc_info = self._response_exc_info
resp = self._app_call(make_pre_authed_env(
env, 'GET', '/%s/%s/%s/%s%s' % (
self.version, self.account, self.container,
self._get_status_int(), self._error),
self.agent, swift_source='SW'))
if is_success(self._get_status_int()):
start_response(save_response_status, self._response_headers,
self._response_exc_info)
return resp
start_response(save_response_status, save_response_headers,
save_response_exc_info)
return response
def _get_container_info(self, env):
"""
Retrieves x-container-meta-web-index, x-container-meta-web-error,
x-container-meta-web-listings, x-container-meta-web-listings-css,
and x-container-meta-web-directory-type from memcache or from the
cluster and stores the result in memcache and in self._index,
self._error, self._listings, self._listings_css and self._dir_type.
:param env: The WSGI environment dict.
"""
self._index = self._error = self._listings = self._listings_css = \
self._dir_type = None
container_info = get_container_info(env, self.app, swift_source='SW')
if is_success(container_info['status']):
meta = container_info.get('meta', {})
self._index = meta.get('web-index', '').strip()
self._error = meta.get('web-error', '').strip()
self._listings = meta.get('web-listings', '').strip()
self._listings_css = meta.get('web-listings-css', '').strip()
self._dir_type = meta.get('web-directory-type', '').strip()
def _listing(self, env, start_response, prefix=None):
"""
Sends an HTML object listing to the remote client.
:param env: The original WSGI environment dict.
:param start_response: The original WSGI start_response hook.
:param prefix: Any prefix desired for the container listing.
"""
if not config_true_value(self._listings):
resp = HTTPNotFound()(env, self._start_response)
return self._error_response(resp, env, start_response)
tmp_env = make_pre_authed_env(
env, 'GET', '/%s/%s/%s' % (
self.version, self.account, self.container),
self.agent, swift_source='SW')
tmp_env['QUERY_STRING'] = 'delimiter=/&format=json'
if prefix:
tmp_env['QUERY_STRING'] += '&prefix=%s' % quote(prefix)
else:
prefix = ''
resp = self._app_call(tmp_env)
if not is_success(self._get_status_int()):
return self._error_response(resp, env, start_response)
listing = None
body = ''.join(resp)
if body:
listing = json.loads(body)
if not listing:
resp = HTTPNotFound()(env, self._start_response)
return self._error_response(resp, env, start_response)
headers = {'Content-Type': 'text/html; charset=UTF-8'}
body = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 ' \
'Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\n' \
'<html>\n' \
' <head>\n' \
' <title>Listing of %s</title>\n' % \
cgi.escape(env['PATH_INFO'])
if self._listings_css:
body += ' <link rel="stylesheet" type="text/css" ' \
'href="%s" />\n' % (self._build_css_path(prefix))
else:
body += ' <style type="text/css">\n' \
' h1 {font-size: 1em; font-weight: bold;}\n' \
' th {text-align: left; padding: 0px 1em 0px 1em;}\n' \
' td {padding: 0px 1em 0px 1em;}\n' \
' a {text-decoration: none;}\n' \
' </style>\n'
body += ' </head>\n' \
' <body>\n' \
' <h1 id="title">Listing of %s</h1>\n' \
' <table id="listing">\n' \
' <tr id="heading">\n' \
' <th class="colname">Name</th>\n' \
' <th class="colsize">Size</th>\n' \
' <th class="coldate">Date</th>\n' \
' </tr>\n' % \
cgi.escape(env['PATH_INFO'])
if prefix:
body += ' <tr id="parent" class="item">\n' \
' <td class="colname"><a href="../">../</a></td>\n' \
' <td class="colsize"> </td>\n' \
' <td class="coldate"> </td>\n' \
' </tr>\n'
for item in listing:
if 'subdir' in item:
subdir = ensure_utf8_bytes(item['subdir'])
if prefix:
subdir = subdir[len(prefix):]
body += ' <tr class="item subdir">\n' \
' <td class="colname"><a href="%s">%s</a></td>\n' \
' <td class="colsize"> </td>\n' \
' <td class="coldate"> </td>\n' \
' </tr>\n' % \
(quote(subdir), cgi.escape(subdir))
for item in listing:
if 'name' in item:
name = ensure_utf8_bytes(item['name'])
if prefix:
name = name[len(prefix):]
content_type = ensure_utf8_bytes(item['content_type'])
bytes = ensure_utf8_bytes(human_readable(item['bytes']))
last_modified = (cgi.escape(item['last_modified']).
split('.')[0].replace('T', ' '))
body += ' <tr class="item %s">\n' \
' <td class="colname"><a href="%s">%s</a></td>\n' \
' <td class="colsize">%s</td>\n' \
' <td class="coldate">%s</td>\n' \
' </tr>\n' % \
(' '.join('type-' + cgi.escape(t.lower(), quote=True)
for t in content_type.split('/')),
quote(name), cgi.escape(name),
bytes, ensure_utf8_bytes(last_modified))
body += ' </table>\n' \
' </body>\n' \
'</html>\n'
resp = Response(headers=headers, body=body)
return resp(env, start_response)
def _build_css_path(self, prefix=''):
"""
Constructs a relative path from a given prefix within the container.
URLs and paths starting with '/' are not modified.
:param prefix: The prefix for the container listing.
"""
if self._listings_css.startswith(('/', 'http://', 'https://')):
css_path = quote(self._listings_css, ':/')
else:
css_path = '../' * prefix.count('/') + quote(self._listings_css)
return css_path
def handle_container(self, env, start_response):
"""
Handles a possible static web request for a container.
:param env: The original WSGI environment dict.
:param start_response: The original WSGI start_response hook.
"""
self._get_container_info(env)
if not self._listings and not self._index:
if config_true_value(env.get('HTTP_X_WEB_MODE', 'f')):
return HTTPNotFound()(env, start_response)
return self.app(env, start_response)
if env['PATH_INFO'][-1] != '/':
resp = HTTPMovedPermanently(
location=(env['PATH_INFO'] + '/'))
return resp(env, start_response)
if not self._index:
return self._listing(env, start_response)
tmp_env = dict(env)
tmp_env['HTTP_USER_AGENT'] = \
'%s StaticWeb' % env.get('HTTP_USER_AGENT')
tmp_env['swift.source'] = 'SW'
tmp_env['PATH_INFO'] += self._index
resp = self._app_call(tmp_env)
status_int = self._get_status_int()
if status_int == HTTP_NOT_FOUND:
return self._listing(env, start_response)
elif not is_success(self._get_status_int()) and \
not is_redirection(self._get_status_int()):
return self._error_response(resp, env, start_response)
start_response(self._response_status, self._response_headers,
self._response_exc_info)
return resp
def handle_object(self, env, start_response):
"""
Handles a possible static web request for an object. This object could
resolve into an index or listing request.
:param env: The original WSGI environment dict.
:param start_response: The original WSGI start_response hook.
"""
tmp_env = dict(env)
tmp_env['HTTP_USER_AGENT'] = \
'%s StaticWeb' % env.get('HTTP_USER_AGENT')
tmp_env['swift.source'] = 'SW'
resp = self._app_call(tmp_env)
status_int = self._get_status_int()
self._get_container_info(env)
if is_success(status_int) or is_redirection(status_int):
# Treat directory marker objects as not found
if not self._dir_type:
self._dir_type = 'application/directory'
content_length = self._response_header_value('content-length')
content_length = int(content_length) if content_length else 0
if self._response_header_value('content-type') == self._dir_type \
and content_length <= 1:
status_int = HTTP_NOT_FOUND
else:
start_response(self._response_status, self._response_headers,
self._response_exc_info)
return resp
if status_int != HTTP_NOT_FOUND:
# Retaining the previous code's behavior of not using custom error
# pages for non-404 errors.
self._error = None
return self._error_response(resp, env, start_response)
if not self._listings and not self._index:
start_response(self._response_status, self._response_headers,
self._response_exc_info)
return resp
status_int = HTTP_NOT_FOUND
if self._index:
tmp_env = dict(env)
tmp_env['HTTP_USER_AGENT'] = \
'%s StaticWeb' % env.get('HTTP_USER_AGENT')
tmp_env['swift.source'] = 'SW'
if tmp_env['PATH_INFO'][-1] != '/':
tmp_env['PATH_INFO'] += '/'
tmp_env['PATH_INFO'] += self._index
resp = self._app_call(tmp_env)
status_int = self._get_status_int()
if is_success(status_int) or is_redirection(status_int):
if env['PATH_INFO'][-1] != '/':
resp = HTTPMovedPermanently(
location=env['PATH_INFO'] + '/')
return resp(env, start_response)
start_response(self._response_status, self._response_headers,
self._response_exc_info)
return resp
if status_int == HTTP_NOT_FOUND:
if env['PATH_INFO'][-1] != '/':
tmp_env = make_pre_authed_env(
env, 'GET', '/%s/%s/%s' % (
self.version, self.account, self.container),
self.agent, swift_source='SW')
tmp_env['QUERY_STRING'] = 'limit=1&format=json&delimiter' \
'=/&limit=1&prefix=%s' % quote(self.obj + '/')
resp = self._app_call(tmp_env)
body = ''.join(resp)
if not is_success(self._get_status_int()) or not body or \
not json.loads(body):
resp = HTTPNotFound()(env, self._start_response)
return self._error_response(resp, env, start_response)
resp = HTTPMovedPermanently(location=env['PATH_INFO'] + '/')
return resp(env, start_response)
return self._listing(env, start_response, self.obj)
class StaticWeb(object):
"""
The Static Web WSGI middleware filter; serves container data as a static
web site. See `staticweb`_ for an overview.
The proxy logs created for any subrequests made will have swift.source set
to "SW".
:param app: The next WSGI application/filter in the paste.deploy pipeline.
:param conf: The filter configuration dict.
"""
def __init__(self, app, conf):
#: The next WSGI application/filter in the paste.deploy pipeline.
self.app = app
#: The filter configuration dict.
self.conf = conf
def __call__(self, env, start_response):
"""
Main hook into the WSGI paste.deploy filter/app pipeline.
:param env: The WSGI environment dict.
:param start_response: The WSGI start_response hook.
"""
env['staticweb.start_time'] = time.time()
try:
(version, account, container, obj) = \
split_path(env['PATH_INFO'], 2, 4, True)
except ValueError:
return self.app(env, start_response)
if env['REQUEST_METHOD'] not in ('HEAD', 'GET'):
return self.app(env, start_response)
if env.get('REMOTE_USER') and \
not config_true_value(env.get('HTTP_X_WEB_MODE', 'f')):
return self.app(env, start_response)
if not container:
return self.app(env, start_response)
context = _StaticWebContext(self, version, account, container, obj)
if obj:
return context.handle_object(env, start_response)
return context.handle_container(env, start_response)
def filter_factory(global_conf, **local_conf):
"""Returns a Static Web WSGI filter for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def staticweb_filter(app):
return StaticWeb(app, conf)
return staticweb_filter
| |
import logging
from django.contrib.sites.models import Site
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import NoReverseMatch, reverse
from django.http import HttpResponseRedirect
from oscar.core.loading import get_class, get_model
OrderCreator = get_class('order.utils', 'OrderCreator')
Dispatcher = get_class('customer.utils', 'Dispatcher')
CheckoutSessionMixin = get_class('checkout.session', 'CheckoutSessionMixin')
ShippingAddress = get_model('order', 'ShippingAddress')
OrderNumberGenerator = get_class('order.utils', 'OrderNumberGenerator')
PaymentEventType = get_model('order', 'PaymentEventType')
PaymentEvent = get_model('order', 'PaymentEvent')
PaymentEventQuantity = get_model('order', 'PaymentEventQuantity')
UserAddress = get_model('address', 'UserAddress')
Basket = get_model('basket', 'Basket')
CommunicationEventType = get_model('customer', 'CommunicationEventType')
UnableToPlaceOrder = get_class('order.exceptions', 'UnableToPlaceOrder')
post_checkout = get_class('checkout.signals', 'post_checkout')
# Standard logger for checkout events
logger = logging.getLogger('oscar.checkout')
class OrderPlacementMixin(CheckoutSessionMixin):
"""
Mixin which provides functionality for placing orders.
Any view class which needs to place an order should use this mixin.
"""
# Any payment sources should be added to this list as part of the
# handle_payment method. If the order is placed successfully, then
# they will be persisted. We need to have the order instance before the
# payment sources can be saved.
_payment_sources = None
# Any payment events should be added to this list as part of the
# handle_payment method.
_payment_events = None
# Default code for the email to send after successful checkout
communication_type_code = 'ORDER_PLACED'
view_signal = post_checkout
# Payment handling methods
# ------------------------
def handle_payment(self, order_number, total, **kwargs):
"""
Handle any payment processing and record payment sources and events.
This method is designed to be overridden within your project. The
default is to do nothing as payment is domain-specific.
This method is responsible for handling payment and recording the
payment sources (using the add_payment_source method) and payment
events (using add_payment_event) so they can be
linked to the order when it is saved later on.
"""
pass
def add_payment_source(self, source):
"""
Record a payment source for this order
"""
if self._payment_sources is None:
self._payment_sources = []
self._payment_sources.append(source)
def add_payment_event(self, event_type_name, amount, reference=''):
"""
Record a payment event for creation once the order is placed
"""
event_type, __ = PaymentEventType.objects.get_or_create(
name=event_type_name)
# We keep a local cache of (unsaved) payment events
if self._payment_events is None:
self._payment_events = []
event = PaymentEvent(
event_type=event_type, amount=amount,
reference=reference)
self._payment_events.append(event)
# Placing order methods
# ---------------------
def generate_order_number(self, basket):
"""
Return a new order number
"""
return OrderNumberGenerator().order_number(basket)
def handle_order_placement(self, order_number, user, basket,
shipping_address, shipping_date, shipping_method,
shipping_charge, billing_address, order_total,
**kwargs):
print shipping_date
"""
Write out the order models and return the appropriate HTTP response
We deliberately pass the basket in here as the one tied to the request
isn't necessarily the correct one to use in placing the order. This
can happen when a basket gets frozen.
"""
order = self.place_order(
order_number=order_number, user=user, basket=basket,
shipping_address=shipping_address, shipping_date=shipping_date,
shipping_method=shipping_method,
shipping_charge=shipping_charge, order_total=order_total,
billing_address=billing_address, **kwargs)
basket.submit()
return self.handle_successful_order(order)
def place_order(self, order_number, user, basket, shipping_date, shipping_address,
shipping_method, shipping_charge, order_total,
billing_address=None, **kwargs):
"""
Writes the order out to the DB including the payment models
"""
# Create saved shipping address instance from passed in unsaved
# instance
shipping_address = self.create_shipping_address(user, shipping_address)
# We pass the kwargs as they often include the billing address form
# which will be needed to save a billing address.
billing_address = self.create_billing_address(
billing_address, shipping_address, **kwargs)
if 'status' not in kwargs:
status = self.get_initial_order_status(basket)
else:
status = kwargs.pop('status')
order = OrderCreator().place_order(
user=user,
order_number=order_number,
basket=basket,
shipping_address=shipping_address,
shipping_date=shipping_date,
shipping_method=shipping_method,
shipping_charge=shipping_charge,
total=order_total,
billing_address=billing_address,
status=status, **kwargs)
self.save_payment_details(order)
return order
def create_shipping_address(self, user, shipping_address):
"""
Create and return the shipping address for the current order.
Compared to self.get_shipping_address(), ShippingAddress is saved and
makes sure that appropriate UserAddress exists.
"""
# For an order that only contains items that don't require shipping we
# won't have a shipping address, so we have to check for it.
if not shipping_address:
return None
shipping_address.save()
if user.is_authenticated():
self.update_address_book(user, shipping_address)
return shipping_address
def update_address_book(self, user, shipping_addr):
"""
Update the user's address book based on the new shipping address
"""
try:
user_addr = user.addresses.get(
hash=shipping_addr.generate_hash())
except ObjectDoesNotExist:
# Create a new user address
user_addr = UserAddress(user=user)
shipping_addr.populate_alternative_model(user_addr)
user_addr.num_orders += 1
user_addr.save()
def create_billing_address(self, billing_address=None,
shipping_address=None, **kwargs):
"""
Saves any relevant billing data (eg a billing address).
"""
if billing_address is not None:
billing_address.save()
return billing_address
def save_payment_details(self, order):
"""
Saves all payment-related details. This could include a billing
address, payment sources and any order payment events.
"""
self.save_payment_events(order)
self.save_payment_sources(order)
def save_payment_events(self, order):
"""
Saves any relevant payment events for this order
"""
if not self._payment_events:
return
for event in self._payment_events:
event.order = order
event.save()
# We assume all lines are involved in the initial payment event
for line in order.lines.all():
PaymentEventQuantity.objects.create(
event=event, line=line, quantity=line.quantity)
def save_payment_sources(self, order):
"""
Saves any payment sources used in this order.
When the payment sources are created, the order model does not exist
and so they need to have it set before saving.
"""
if not self._payment_sources:
return
for source in self._payment_sources:
source.order = order
source.save()
def get_initial_order_status(self, basket):
return None
# Post-order methods
# ------------------
def handle_successful_order(self, order):
"""
Handle the various steps required after an order has been successfully
placed.
Override this view if you want to perform custom actions when an
order is submitted.
"""
# Send confirmation message (normally an email)
self.send_confirmation_message(order, self.communication_type_code)
# Flush all session data
self.checkout_session.flush()
# Save order id in session so thank-you page can load it
self.request.session['checkout_order_id'] = order.id
response = HttpResponseRedirect(self.get_success_url())
self.send_signal(self.request, response, order)
return response
def send_signal(self, request, response, order):
self.view_signal.send(
sender=self, order=order, user=request.user,
request=request, response=response)
def get_success_url(self):
return reverse('checkout:thank-you')
def send_confirmation_message(self, order, code, **kwargs):
ctx = self.get_message_context(order)
try:
event_type = CommunicationEventType.objects.get(code=code)
except CommunicationEventType.DoesNotExist:
# No event-type in database, attempt to find templates for this
# type and render them immediately to get the messages. Since we
# have not CommunicationEventType to link to, we can't create a
# CommunicationEvent instance.
messages = CommunicationEventType.objects.get_and_render(code, ctx)
event_type = None
else:
messages = event_type.get_messages(ctx)
if messages and messages['body']:
logger.info("Order #%s - sending %s messages", order.number, code)
dispatcher = Dispatcher(logger)
dispatcher.dispatch_order_messages(order, messages,
event_type, **kwargs)
else:
logger.warning("Order #%s - no %s communication event type",
order.number, code)
def get_message_context(self, order):
ctx = {
'user': self.request.user,
'order': order,
'site': get_current_site(self.request),
'lines': order.lines.all()
}
if not self.request.user.is_authenticated():
# Attempt to add the anon order status URL to the email template
# ctx.
try:
path = reverse('customer:anon-order',
kwargs={'order_number': order.number,
'hash': order.verification_hash()})
except NoReverseMatch:
# We don't care that much if we can't resolve the URL
pass
else:
site = Site.objects.get_current()
ctx['status_url'] = 'http://%s%s' % (site.domain, path)
return ctx
# Basket helpers
# --------------
def get_submitted_basket(self):
basket_id = self.checkout_session.get_submitted_basket_id()
return Basket._default_manager.get(pk=basket_id)
def freeze_basket(self, basket):
"""
Freeze the basket so it can no longer be modified
"""
# We freeze the basket to prevent it being modified once the payment
# process has started. If your payment fails, then the basket will
# need to be "unfrozen". We also store the basket ID in the session
# so the it can be retrieved by multistage checkout processes.
basket.freeze()
def restore_frozen_basket(self):
"""
Restores a frozen basket as the sole OPEN basket. Note that this also
merges in any new products that have been added to a basket that has
been created while payment.
"""
try:
fzn_basket = self.get_submitted_basket()
except Basket.DoesNotExist:
# Strange place. The previous basket stored in the session does
# not exist.
pass
else:
fzn_basket.thaw()
if self.request.basket.id != fzn_basket.id:
fzn_basket.merge(self.request.basket)
# Use same strategy as current request basket
fzn_basket.strategy = self.request.basket.strategy
self.request.basket = fzn_basket
| |
# The MIT License (MIT)
#
# Copyright (c) 2018 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Run a command with admin privileges on Windows and Posix systems.
Many thanks to the answers in this StackOverflow question:
https://stackoverflow.com/q/19672352/791713
Requires pywin32 on Windows.
"""
from __future__ import print_function
import os
import sys
if __name__ == '__main__':
# Ensure that the parent directory is not in sys.path as otherwise at
# some point in Python 3 it may try to import "enum" from the nr package...
norm = lambda x: os.path.normpath(os.path.abspath(x))
dirname = os.path.dirname(norm(__file__))
sys.path[:] = [x for x in sys.path if norm(x) != dirname]
del norm, dirname
import ctypes
import io
import json
import re
import shutil
import shlex
import subprocess
import tempfile
import traceback
if os.name == 'nt':
import ctypes.wintypes as wintypes
class winapi:
_WaitForSingleObject = ctypes.windll.kernel32.WaitForSingleObject
_WaitForSingleObject.restype = wintypes.DWORD
_WaitForSingleObject.argtypes = [wintypes.HANDLE, wintypes.DWORD]
@staticmethod
def WaitForSingleObject(handle, ms=0):
return winapi._WaitForSingleObject(handle, ms)
_GetExitCodeProcess = ctypes.windll.kernel32.GetExitCodeProcess
_GetExitCodeProcess.restype = wintypes.BOOL
_GetExitCodeProcess.argtypes = [wintypes.HANDLE, ctypes.POINTER(wintypes.DWORD)]
@staticmethod
def GetExitCodeProcess(handle):
result = wintypes.DWORD()
success = winapi._GetExitCodeProcess(handle, ctypes.byref(result))
if not success:
raise ctypes.WinError(ctypes.get_last_error())
return result.value
_MessageBox = ctypes.windll.user32.MessageBoxW
_MessageBox.restype = ctypes.c_int
_MessageBox.argtypes = [wintypes.HWND, wintypes.LPWSTR, wintypes.LPWSTR, wintypes.UINT]
@staticmethod
def MessageBox(hwnd, text, caption, type):
return winapi._MessageBox(hwnd, text, caption, type)
class _SHELLEXECUTEINFO(ctypes.Structure):
_fields_ = [
('cbSize', wintypes.DWORD),
('fMask', wintypes.ULONG),
('hwnd', wintypes.HWND),
('lpVerb', wintypes.LPCSTR),
('lpFile', wintypes.LPCSTR),
('lpParameters', wintypes.LPCSTR),
('lpDirectory', wintypes.LPCSTR),
('nShow', ctypes.c_int),
('hInstApp', wintypes.HINSTANCE),
('lpIDList', wintypes.LPVOID),
('lpClass', wintypes.LPCSTR),
('hkeyClass', wintypes.HKEY),
('dwHotKey', wintypes.DWORD),
('DUMMYUNIONNAME', wintypes.HANDLE),
('hProcess', wintypes.HANDLE),
]
_ShellExecuteEx = ctypes.windll.shell32.ShellExecuteEx
_ShellExecuteEx.restype = wintypes.BOOL
_ShellExecuteEx.argtypes = [ctypes.POINTER(_SHELLEXECUTEINFO)]
SW_HIDE = 0
SW_MAXIMIMIZE = 3
SW_MINIMIZE = 6
SW_RESTORE = 9
SW_SHOW = 5
SW_SHOWDEFAULT = 10
SW_SHOWMAXIMIZED = 3
SW_SHOWMINIMIZED = 2
SW_SHOWMINNOACTIVE = 7
SW_SHOWNA = 8
SW_SHOWNOACTIVE = 4
SW_SHOWNORMAL = 1
@staticmethod
def ShellExecuteEx(hwnd=None, verb='', file='', parameters=None,
directory=None, show=SW_SHOW, mask=0): # TODO: More parameters
data = winapi._SHELLEXECUTEINFO()
data.cbSize = ctypes.sizeof(data)
data.fMask = mask
data.hwnd = hwnd
data.lpVerb = verb.encode()
data.lpFile = file.encode()
data.lpParameters = parameters.encode()
data.lpDirectory = directory.encode()
data.nShow = show
data.hInstApp = None
data.lpIDList = None
data.lpClass = None
data.hkeyClass = None
data.dwHotKey = 0
data.DUMMYUNIONNAME = None
data.hProcess = None
result = winapi._ShellExecuteEx(ctypes.byref(data))
if not result:
raise ctypes.WinError(ctypes.get_last_error())
return {'hInstApp': data.hInstApp, 'hProcess': data.hProcess}
def alert(*msg):
msg = ' '.join(map(str, msg))
print(msg, file=sys.stderr)
sys.stderr.flush()
if os.name == 'nt':
winapi.MessageBox(None, msg, "Python", 0)
def quote(s):
if os.name == 'nt' and os.sep == '\\':
s = s.replace('"', '\\"')
if re.search('\s', s) or any(c in s for c in '<>'):
s = '"' + s + '"'
else:
s = shlex.quote(s)
return s
def is_admin():
if os.name == 'nt':
try:
return ctypes.windll.shell32.IsUserAnAdmin()
except:
traceback.print_exc()
print("ctypes.windll.shell32.IsUserAnAdmin() failed -- "
"assuming not an admin.", file=sys.stderr)
sys.stderr.flush()
return False
elif os.name == 'posix':
return os.getuid() == 0
else:
raise RuntimeError('Unsupported os: {!r}'.format(os.name))
def run_as_admin(command, cwd=None, environ=None):
"""
Runs a command as an admin in the specified *cwd* and *environ*.
On Windows, this creates a temporary directory where this information
is stored temporarily so that the new process can launch the proper
subprocess.
"""
if isinstance(command, str):
command = shlex.split(command)
if os.name == 'nt':
return _run_as_admin_windows(command, cwd, environ)
elif os.name == 'posix':
command = ['sudo', '-E'] + list(command)
sys.exit(subprocess.call(command))
else:
raise RuntimeError('Unsupported os: {!r}'.format(os.name))
def _run_as_admin_windows(command, cwd, environ):
datadir = tempfile.mkdtemp()
try:
# TODO: Maybe we could also use named pipes and transfer them
# via the processdata.json to the elevated process.
# This file will receive all the process information.
datafile = os.path.join(datadir, 'processdata.json')
data = {
'command': command,
'cwd': cwd or os.getcwd(),
'environ': environ or os.environ.copy(),
'outfile': os.path.join(datadir, 'out.bin')
}
with open(datafile, 'w') as fp:
json.dump(data, fp)
# Ensure the output file exists.
open(data['outfile'], 'w').close()
# Create the windows elevated process that calls this file. This
# file will then know what to do with the information from the
# process data directory.
hProc = winapi.ShellExecuteEx(
file=sys.executable,
verb='runas',
parameters=' '.join(map(quote, [os.path.abspath(__file__), '--windows-process-data', datadir])),
directory=datadir,
mask=64,
show=winapi.SW_HIDE
)['hProcess']
# Read the output from the process and write it to our stdout.
with open(data['outfile'], 'rb+', 0) as outfile:
while True:
hr = winapi.WaitForSingleObject(hProc, 40)
while True:
line = outfile.readline()
if not line: break
sys.stdout.buffer.write(line)
if hr != 0x102: break
return winapi.GetExitCodeProcess(hProc)
finally:
try:
shutil.rmtree(datadir)
except:
print("ERROR: Unable to remove data directory of elevated process.")
print("ERROR: Directory at \"{}\"".format(datadir))
traceback.print_exc()
def _run_as_admin_windows_elevated(datadir):
datafile = os.path.join(datadir, 'processdata.json')
with open(datafile, 'r') as fp:
data = json.load(fp)
try:
with open(data['outfile'], 'wb', 0) as fp:
sys.stderr = sys.stdout = io.TextIOWrapper(io.BufferedWriter(fp))
os.environ.update(data['environ'])
return subprocess.call(data['command'], cwd=data['cwd'], stdout=fp, stderr=fp)
except:
alert(traceback.format_exc())
sys.exit(1)
def main(prog=None, argv=None):
import argparse
parser = argparse.ArgumentParser(prog=prog)
parser.add_argument('--windows-process-data',
help='The path to a Windows process data directory. This is used to '
'provide data for the elevated process since no environment variables '
'can be via ShellExecuteEx().')
args, unknown = parser.parse_known_args(argv)
if args.windows_process_data:
if not is_admin():
alert("--windows-process-data can only be used in an elevated process.")
sys.exit(1)
sys.exit(_run_as_admin_windows_elevated(args.windows_process_data))
elif unknown:
sys.exit(run_as_admin(unknown))
else:
parser.print_usage()
if __name__ == '__main__':
sys.exit(main())
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils as json
import six
from heat.common import exception
from heat.common import identifier
from heat.engine import parameters
from heat.engine import template
from heat.tests import common
def new_parameter(name, schema, value=None, validate_value=True):
tmpl = template.Template({'HeatTemplateFormatVersion': '2012-12-12',
'Parameters': {name: schema}})
schema = tmpl.param_schemata()[name]
param = parameters.Parameter(name, schema, value)
param.validate(validate_value)
return param
class ParameterTestCommon(common.HeatTestCase):
scenarios = [
('type_string', dict(p_type='String',
inst=parameters.StringParam,
value='test',
expected='test',
allowed_value=['foo'],
zero='',
default='default')),
('type_number', dict(p_type='Number',
inst=parameters.NumberParam,
value=10,
expected='10',
allowed_value=[42],
zero=0,
default=13)),
('type_list', dict(p_type='CommaDelimitedList',
inst=parameters.CommaDelimitedListParam,
value=['a', 'b', 'c'],
expected='a,b,c',
allowed_value=['foo'],
zero=[],
default=['d', 'e', 'f'])),
('type_json', dict(p_type='Json',
inst=parameters.JsonParam,
value={'a': 1, 'b': '2'},
expected='{"a": 1, "b": "2"}',
allowed_value=[{'foo': 'bar'}],
zero={},
default={'d': 1, 'e': 'f'})),
('type_boolean', dict(p_type='Boolean',
inst=parameters.BooleanParam,
value=True,
expected='True',
allowed_value=[False],
zero=False,
default=True))
]
def test_new_param(self):
p = new_parameter('p', {'Type': self.p_type}, validate_value=False)
self.assertIsInstance(p, self.inst)
def test_param_to_str(self):
p = new_parameter('p', {'Type': self.p_type}, self.value)
self.assertEqual(self.expected, str(p))
def test_default_no_override(self):
p = new_parameter('defaulted', {'Type': self.p_type,
'Default': self.default})
self.assertTrue(p.has_default())
self.assertEqual(self.default, p.default())
self.assertEqual(self.default, p.value())
def test_default_override(self):
p = new_parameter('defaulted', {'Type': self.p_type,
'Default': self.default},
self.value)
self.assertTrue(p.has_default())
self.assertEqual(self.default, p.default())
self.assertEqual(self.value, p.value())
def test_default_invalid(self):
schema = {'Type': self.p_type,
'AllowedValues': self.allowed_value,
'ConstraintDescription': 'wibble',
'Default': self.default}
if self.p_type == 'Json':
err = self.assertRaises(exception.InvalidSchemaError,
new_parameter, 'p', schema)
self.assertIn('AllowedValues constraint invalid for Json',
six.text_type(err))
else:
err = self.assertRaises(exception.InvalidSchemaError,
new_parameter, 'p', schema)
self.assertIn('wibble', six.text_type(err))
def test_description(self):
description = 'Description of the parameter'
p = new_parameter('p', {'Type': self.p_type,
'Description': description},
validate_value=False)
self.assertEqual(description, p.description())
def test_no_description(self):
p = new_parameter('p', {'Type': self.p_type}, validate_value=False)
self.assertEqual('', p.description())
def test_no_echo_true(self):
p = new_parameter('anechoic', {'Type': self.p_type,
'NoEcho': 'true'},
self.value)
self.assertTrue(p.hidden())
self.assertEqual('******', str(p))
def test_no_echo_true_caps(self):
p = new_parameter('anechoic', {'Type': self.p_type,
'NoEcho': 'TrUe'},
self.value)
self.assertTrue(p.hidden())
self.assertEqual('******', str(p))
def test_no_echo_false(self):
p = new_parameter('echoic', {'Type': self.p_type,
'NoEcho': 'false'},
self.value)
self.assertFalse(p.hidden())
self.assertEqual(self.expected, str(p))
def test_default_empty(self):
p = new_parameter('defaulted', {'Type': self.p_type,
'Default': self.zero})
self.assertTrue(p.has_default())
self.assertEqual(self.zero, p.default())
self.assertEqual(self.zero, p.value())
def test_default_no_empty_user_value_empty(self):
p = new_parameter('defaulted', {'Type': self.p_type,
'Default': self.default},
self.zero)
self.assertTrue(p.has_default())
self.assertEqual(self.default, p.default())
self.assertEqual(self.zero, p.value())
class ParameterTestSpecific(common.HeatTestCase):
def test_new_bad_type(self):
self.assertRaises(exception.InvalidSchemaError, new_parameter,
'p', {'Type': 'List'}, validate_value=False)
def test_string_len_good(self):
schema = {'Type': 'String',
'MinLength': '3',
'MaxLength': '3'}
p = new_parameter('p', schema, 'foo')
self.assertEqual('foo', p.value())
def test_string_underflow(self):
schema = {'Type': 'String',
'ConstraintDescription': 'wibble',
'MinLength': '4'}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, 'foo')
self.assertIn('wibble', six.text_type(err))
def test_string_overflow(self):
schema = {'Type': 'String',
'ConstraintDescription': 'wibble',
'MaxLength': '2'}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, 'foo')
self.assertIn('wibble', six.text_type(err))
def test_string_pattern_good(self):
schema = {'Type': 'String',
'AllowedPattern': '[a-z]*'}
p = new_parameter('p', schema, 'foo')
self.assertEqual('foo', p.value())
def test_string_pattern_bad_prefix(self):
schema = {'Type': 'String',
'ConstraintDescription': 'wibble',
'AllowedPattern': '[a-z]*'}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, '1foo')
self.assertIn('wibble', six.text_type(err))
def test_string_pattern_bad_suffix(self):
schema = {'Type': 'String',
'ConstraintDescription': 'wibble',
'AllowedPattern': '[a-z]*'}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, 'foo1')
self.assertIn('wibble', six.text_type(err))
def test_string_value_list_good(self):
schema = {'Type': 'String',
'AllowedValues': ['foo', 'bar', 'baz']}
p = new_parameter('p', schema, 'bar')
self.assertEqual('bar', p.value())
def test_string_value_unicode(self):
schema = {'Type': 'String'}
p = new_parameter('p', schema, u'test\u2665')
self.assertEqual(u'test\u2665', p.value())
def test_string_value_list_bad(self):
schema = {'Type': 'String',
'ConstraintDescription': 'wibble',
'AllowedValues': ['foo', 'bar', 'baz']}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, 'blarg')
self.assertIn('wibble', six.text_type(err))
def test_number_int_good(self):
schema = {'Type': 'Number',
'MinValue': '3',
'MaxValue': '3'}
p = new_parameter('p', schema, '3')
self.assertEqual(3, p.value())
def test_number_float_good_string(self):
schema = {'Type': 'Number',
'MinValue': '3.0',
'MaxValue': '4.0'}
p = new_parameter('p', schema, '3.5')
self.assertEqual(3.5, p.value())
def test_number_float_good_number(self):
schema = {'Type': 'Number',
'MinValue': '3.0',
'MaxValue': '4.0'}
p = new_parameter('p', schema, 3.5)
self.assertEqual(3.5, p.value())
def test_number_low(self):
schema = {'Type': 'Number',
'ConstraintDescription': 'wibble',
'MinValue': '4'}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, '3')
self.assertIn('wibble', six.text_type(err))
def test_number_high(self):
schema = {'Type': 'Number',
'ConstraintDescription': 'wibble',
'MaxValue': '2'}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, '3')
self.assertIn('wibble', six.text_type(err))
def test_number_bad(self):
schema = {'Type': 'Number'}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, 'str')
self.assertIn('float', six.text_type(err))
def test_number_value_list_good(self):
schema = {'Type': 'Number',
'AllowedValues': ['1', '3', '5']}
p = new_parameter('p', schema, '5')
self.assertEqual(5, p.value())
def test_number_value_list_bad(self):
schema = {'Type': 'Number',
'ConstraintDescription': 'wibble',
'AllowedValues': ['1', '3', '5']}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, '2')
self.assertIn('wibble', six.text_type(err))
def test_list_value_list_default_empty(self):
schema = {'Type': 'CommaDelimitedList', 'Default': ''}
p = new_parameter('p', schema)
self.assertEqual([], p.value())
def test_list_value_list_good(self):
schema = {'Type': 'CommaDelimitedList',
'AllowedValues': ['foo', 'bar', 'baz']}
p = new_parameter('p', schema, 'baz,foo,bar')
self.assertEqual('baz,foo,bar'.split(','), p.value())
schema['Default'] = []
p = new_parameter('p', schema)
self.assertEqual([], p.value())
schema['Default'] = 'baz,foo,bar'
p = new_parameter('p', schema)
self.assertEqual('baz,foo,bar'.split(','), p.value())
schema['AllowedValues'] = ['1', '3', '5']
schema['Default'] = []
p = new_parameter('p', schema, [1, 3, 5])
self.assertEqual('1,3,5', str(p))
schema['Default'] = [1, 3, 5]
p = new_parameter('p', schema)
self.assertEqual('1,3,5'.split(','), p.value())
def test_list_value_list_bad(self):
schema = {'Type': 'CommaDelimitedList',
'ConstraintDescription': 'wibble',
'AllowedValues': ['foo', 'bar', 'baz']}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema,
'foo,baz,blarg')
self.assertIn('wibble', six.text_type(err))
def test_map_value(self):
'''Happy path for value that's already a map.'''
schema = {'Type': 'Json'}
val = {"foo": "bar", "items": [1, 2, 3]}
p = new_parameter('p', schema, val)
self.assertEqual(val, p.value())
self.assertEqual(val, p.parsed)
def test_map_value_bad(self):
'''Map value is not JSON parsable.'''
schema = {'Type': 'Json',
'ConstraintDescription': 'wibble'}
val = {"foo": "bar", "not_json": len}
err = self.assertRaises(ValueError,
new_parameter, 'p', schema, val)
self.assertIn('Value must be valid JSON', six.text_type(err))
def test_map_value_parse(self):
'''Happy path for value that's a string.'''
schema = {'Type': 'Json'}
val = {"foo": "bar", "items": [1, 2, 3]}
val_s = json.dumps(val)
p = new_parameter('p', schema, val_s)
self.assertEqual(val, p.value())
self.assertEqual(val, p.parsed)
def test_map_value_bad_parse(self):
'''Test value error for unparsable string value.'''
schema = {'Type': 'Json',
'ConstraintDescription': 'wibble'}
val = "I am not a map"
err = self.assertRaises(ValueError,
new_parameter, 'p', schema, val)
self.assertIn('Value must be valid JSON', six.text_type(err))
def test_map_underrun(self):
'''Test map length under MIN_LEN.'''
schema = {'Type': 'Json',
'MinLength': 3}
val = {"foo": "bar", "items": [1, 2, 3]}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, val)
self.assertIn('out of range', six.text_type(err))
def test_map_overrun(self):
'''Test map length over MAX_LEN.'''
schema = {'Type': 'Json',
'MaxLength': 1}
val = {"foo": "bar", "items": [1, 2, 3]}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, val)
self.assertIn('out of range', six.text_type(err))
def test_json_list(self):
schema = {'Type': 'Json'}
val = ["fizz", "buzz"]
p = new_parameter('p', schema, val)
self.assertIsInstance(p.value(), list)
self.assertIn("fizz", p.value())
self.assertIn("buzz", p.value())
def test_json_string_list(self):
schema = {'Type': 'Json'}
val = '["fizz", "buzz"]'
p = new_parameter('p', schema, val)
self.assertIsInstance(p.value(), list)
self.assertIn("fizz", p.value())
self.assertIn("buzz", p.value())
def test_bool_value_true(self):
schema = {'Type': 'Boolean'}
for val in ('1', 't', 'true', 'on', 'y', 'yes', True, 1):
bo = new_parameter('bo', schema, val)
self.assertEqual(True, bo.value())
def test_bool_value_false(self):
schema = {'Type': 'Boolean'}
for val in ('0', 'f', 'false', 'off', 'n', 'no', False, 0):
bo = new_parameter('bo', schema, val)
self.assertEqual(False, bo.value())
def test_bool_value_invalid(self):
schema = {'Type': 'Boolean'}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'bo', schema, 'foo')
self.assertIn("Unrecognized value 'foo'", six.text_type(err))
def test_missing_param_str(self):
'''Test missing user parameter.'''
self.assertRaises(exception.UserParameterMissing,
new_parameter, 'p',
{'Type': 'String'})
def test_missing_param_list(self):
'''Test missing user parameter.'''
self.assertRaises(exception.UserParameterMissing,
new_parameter, 'p',
{'Type': 'CommaDelimitedList'})
def test_missing_param_map(self):
'''Test missing user parameter.'''
self.assertRaises(exception.UserParameterMissing,
new_parameter, 'p',
{'Type': 'Json'})
def test_param_name_in_error_message(self):
schema = {'Type': 'String',
'AllowedPattern': '[a-z]*'}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'testparam', schema, '234')
expected = ("Parameter 'testparam' is invalid: "
'"234" does not match pattern "[a-z]*"')
self.assertEqual(expected, six.text_type(err))
params_schema = json.loads('''{
"Parameters" : {
"User" : { "Type": "String" },
"Defaulted" : {
"Type": "String",
"Default": "foobar"
}
}
}''')
class ParametersTest(common.HeatTestCase):
def new_parameters(self, stack_name, tmpl, user_params=None,
stack_id=None, validate_value=True,
param_defaults=None):
user_params = user_params or {}
tmpl.update({'HeatTemplateFormatVersion': '2012-12-12'})
tmpl = template.Template(tmpl)
params = tmpl.parameters(
identifier.HeatIdentifier('', stack_name, stack_id),
user_params, param_defaults=param_defaults)
params.validate(validate_value)
return params
def test_pseudo_params(self):
stack_name = 'test_stack'
params = self.new_parameters(stack_name, {"Parameters": {}})
self.assertEqual('test_stack', params['AWS::StackName'])
self.assertEqual(
'arn:openstack:heat:::stacks/{0}/{1}'.format(stack_name, 'None'),
params['AWS::StackId'])
self.assertIn('AWS::Region', params)
def test_pseudo_param_stackid(self):
stack_name = 'test_stack'
params = self.new_parameters(stack_name, {'Parameters': {}},
stack_id='abc123')
self.assertEqual(
'arn:openstack:heat:::stacks/{0}/{1}'.format(stack_name, 'abc123'),
params['AWS::StackId'])
stack_identifier = identifier.HeatIdentifier('', '', 'def456')
params.set_stack_id(stack_identifier)
self.assertEqual(stack_identifier.arn(), params['AWS::StackId'])
def test_schema_invariance(self):
params1 = self.new_parameters('test', params_schema,
{'User': 'foo',
'Defaulted': 'wibble'})
self.assertEqual('wibble', params1['Defaulted'])
params2 = self.new_parameters('test', params_schema, {'User': 'foo'})
self.assertEqual('foobar', params2['Defaulted'])
def test_to_dict(self):
template = {'Parameters': {'Foo': {'Type': 'String'},
'Bar': {'Type': 'Number', 'Default': '42'}}}
params = self.new_parameters('test_params', template, {'Foo': 'foo'})
as_dict = dict(params)
self.assertEqual('foo', as_dict['Foo'])
self.assertEqual(42, as_dict['Bar'])
self.assertEqual('test_params', as_dict['AWS::StackName'])
self.assertIn('AWS::Region', as_dict)
def test_map(self):
template = {'Parameters': {'Foo': {'Type': 'String'},
'Bar': {'Type': 'Number', 'Default': '42'}}}
params = self.new_parameters('test_params', template, {'Foo': 'foo'})
expected = {'Foo': False,
'Bar': True,
'AWS::Region': True,
'AWS::StackId': True,
'AWS::StackName': True}
self.assertEqual(expected, params.map(lambda p: p.has_default()))
def test_map_str(self):
template = {'Parameters': {'Foo': {'Type': 'String'},
'Bar': {'Type': 'Number'},
'Uni': {'Type': 'String'}}}
stack_name = 'test_params'
params = self.new_parameters(stack_name, template,
{'Foo': 'foo',
'Bar': '42',
'Uni': u'test\u2665'})
expected = {'Foo': 'foo',
'Bar': '42',
'Uni': 'test\xe2\x99\xa5',
'AWS::Region': 'ap-southeast-1',
'AWS::StackId':
'arn:openstack:heat:::stacks/{0}/{1}'.format(
stack_name,
'None'),
'AWS::StackName': 'test_params'}
self.assertEqual(expected, params.map(str))
def test_unknown_params(self):
user_params = {'Foo': 'wibble'}
self.assertRaises(exception.UnknownUserParameter,
self.new_parameters,
'test',
params_schema,
user_params)
def test_missing_params(self):
user_params = {}
self.assertRaises(exception.UserParameterMissing,
self.new_parameters,
'test',
params_schema,
user_params)
def test_missing_attribute_params(self):
params = {'Parameters': {'Foo': {'Type': 'String'},
'NoAttr': 'No attribute.',
'Bar': {'Type': 'Number', 'Default': '1'}}}
self.assertRaises(exception.InvalidSchemaError,
self.new_parameters,
'test',
params)
def test_use_user_default(self):
template = {'Parameters': {'a': {'Type': 'Number', 'Default': '42'}}}
params = self.new_parameters('test_params', template,
param_defaults={'a': '77'})
self.assertEqual(77, params['a'])
def test_dont_use_user_default(self):
template = {'Parameters': {'a': {'Type': 'Number', 'Default': '42'}}}
params = self.new_parameters('test_params', template,
{'a': '111'},
param_defaults={'a': '77'})
self.assertEqual(111, params['a'])
class ParameterSchemaTest(common.HeatTestCase):
def test_validate_schema_wrong_key(self):
error = self.assertRaises(exception.InvalidSchemaError,
parameters.Schema.from_dict, 'param_name',
{"foo": "bar"})
self.assertEqual("Invalid key 'foo' for parameter (param_name)",
six.text_type(error))
def test_validate_schema_no_type(self):
error = self.assertRaises(exception.InvalidSchemaError,
parameters.Schema.from_dict,
'broken',
{"Description": "Hi!"})
self.assertEqual("Missing parameter type for parameter: broken",
six.text_type(error))
| |
"""
Soft Voting/Majority Rule classifier.
This module contains a Soft Voting/Majority Rule classifier for
classification estimators.
"""
# Authors: Sebastian Raschka <se.raschka@gmail.com>,
# Gilles Louppe <g.louppe@gmail.com>
#
# Licence: BSD 3 clause
import numpy as np
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import TransformerMixin
from ..base import clone
from ..externals import six
from ..preprocessing import LabelEncoder
from ..utils.validation import check_is_fitted
class VotingClassifier(BaseEstimator, ClassifierMixin, TransformerMixin):
"""Soft Voting/Majority Rule classifier for unfitted estimators.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <voting_classifier>`.
Parameters
----------
estimators : list of (string, estimator) tuples
Invoking the ``fit`` method on the ``VotingClassifier`` will fit clones
of those original estimators that will be stored in the class attribute
`self.estimators_`.
voting : str, {'hard', 'soft'} (default='hard')
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probabilities, which is recommended for
an ensemble of well-calibrated classifiers.
weights : array-like, shape = [n_classifiers], optional (default=`None`)
Sequence of weights (`float` or `int`) to weight the occurrences of
predicted class labels (`hard` voting) or class probabilities
before averaging (`soft` voting). Uses uniform weights if `None`.
Attributes
----------
classes_ : array-like, shape = [n_predictions]
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.ensemble import RandomForestClassifier
>>> clf1 = LogisticRegression(random_state=1)
>>> clf2 = RandomForestClassifier(random_state=1)
>>> clf3 = GaussianNB()
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> eclf1 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard')
>>> eclf1 = eclf1.fit(X, y)
>>> print(eclf1.predict(X))
[1 1 1 2 2 2]
>>> eclf2 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft')
>>> eclf2 = eclf2.fit(X, y)
>>> print(eclf2.predict(X))
[1 1 1 2 2 2]
>>> eclf3 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft', weights=[2,1,1])
>>> eclf3 = eclf3.fit(X, y)
>>> print(eclf3.predict(X))
[1 1 1 2 2 2]
>>>
"""
def __init__(self, estimators, voting='hard', weights=None):
self.estimators = estimators
self.named_estimators = dict(estimators)
self.voting = voting
self.weights = weights
def fit(self, X, y):
""" Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
"""
if isinstance(y, np.ndarray) and len(y.shape) > 1 and y.shape[1] > 1:
raise NotImplementedError('Multilabel and multi-output'
' classification is not supported.')
if self.voting not in ('soft', 'hard'):
raise ValueError("Voting must be 'soft' or 'hard'; got (voting=%r)"
% self.voting)
if self.estimators is None or len(self.estimators) == 0:
raise AttributeError('Invalid `estimators` attribute, `estimators`'
' should be a list of (string, estimator)'
' tuples')
if self.weights and len(self.weights) != len(self.estimators):
raise ValueError('Number of classifiers and weights must be equal'
'; got %d weights, %d estimators'
% (len(self.weights), len(self.estimators)))
self.le_ = LabelEncoder()
self.le_.fit(y)
self.classes_ = self.le_.classes_
self.estimators_ = []
for name, clf in self.estimators:
fitted_clf = clone(clf).fit(X, self.le_.transform(y))
self.estimators_.append(fitted_clf)
return self
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
maj : array-like, shape = [n_samples]
Predicted class labels.
"""
check_is_fitted(self, 'estimators_')
if self.voting == 'soft':
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X)
maj = np.apply_along_axis(lambda x:
np.argmax(np.bincount(x,
weights=self.weights)),
axis=1,
arr=predictions.astype('int'))
maj = self.le_.inverse_transform(maj)
return maj
def _collect_probas(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict_proba(X) for clf in self.estimators_])
def _predict_proba(self, X):
"""Predict class probabilities for X in 'soft' voting """
if self.voting == 'hard':
raise AttributeError("predict_proba is not available when"
" voting=%r" % self.voting)
check_is_fitted(self, 'estimators_')
avg = np.average(self._collect_probas(X), axis=0, weights=self.weights)
return avg
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
avg : array-like, shape = [n_samples, n_classes]
Weighted average probability for each class per sample.
"""
return self._predict_proba
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
If `voting='soft'`:
array-like = [n_classifiers, n_samples, n_classes]
Class probabilities calculated by each classifier.
If `voting='hard'`:
array-like = [n_classifiers, n_samples]
Class labels predicted by each classifier.
"""
check_is_fitted(self, 'estimators_')
if self.voting == 'soft':
return self._collect_probas(X)
else:
return self._predict(X)
def get_params(self, deep=True):
"""Return estimator parameter names for GridSearch support"""
if not deep:
return super(VotingClassifier, self).get_params(deep=False)
else:
out = super(VotingClassifier, self).get_params(deep=False)
out.update(self.named_estimators.copy())
for name, step in six.iteritems(self.named_estimators):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
def _predict(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict(X) for clf in self.estimators_]).T
| |
#!/usr/bin/python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import os
import sys
import time
import ctypes
class otApi:
def __init__(self, nodeid):
self.verbose = int(float(os.getenv('VERBOSE', 0)))
self.__init_dll(nodeid)
def __del__(self):
self.Api.otNodeFinalize(self.otNode)
def set_mode(self, mode):
if self.Api.otNodeSetMode(self.otNode, mode.encode('utf-8')) != 0:
raise OSError("otNodeSetMode failed!")
def interface_up(self):
if self.Api.otNodeInterfaceUp(self.otNode) != 0:
raise OSError("otNodeInterfaceUp failed!")
def interface_down(self):
if self.Api.otNodeInterfaceDown(self.otNode) != 0:
raise OSError("otNodeInterfaceDown failed!")
def thread_start(self):
if self.Api.otNodeThreadStart(self.otNode) != 0:
raise OSError("otNodeThreadStart failed!")
def thread_stop(self):
if self.Api.otNodeThreadStop(self.otNode) != 0:
raise OSError("otNodeThreadStop failed!")
def commissioner_start(self):
if self.Api.otNodeCommissionerStart(self.otNode) != 0:
raise OSError("otNodeCommissionerStart failed!")
def commissioner_add_joiner(self, addr, psk):
if self.Api.otNodeCommissionerJoinerAdd(self.otNode, addr.encode('utf-8'), psk.encode('utf-8')) != 0:
raise OSError("otNodeCommissionerJoinerAdd failed!")
def joiner_start(self, pskd='', provisioning_url=''):
if self.Api.otNodeJoinerStart(self.otNode, pskd.encode('utf-8'), provisioning_url.encode('utf-8')) != 0:
raise OSError("otNodeJoinerStart failed!")
def clear_whitelist(self):
if self.Api.otNodeClearWhitelist(self.otNode) != 0:
raise OSError("otNodeClearWhitelist failed!")
def enable_whitelist(self):
if self.Api.otNodeEnableWhitelist(self.otNode) != 0:
raise OSError("otNodeEnableWhitelist failed!")
def disable_whitelist(self):
if self.Api.otNodeDisableWhitelist(self.otNode) != 0:
raise OSError("otNodeDisableWhitelist failed!")
def add_whitelist(self, addr, rssi=None):
if rssi == None:
rssi = 0
if self.Api.otNodeAddWhitelist(self.otNode, addr.encode('utf-8'), ctypes.c_byte(rssi)) != 0:
raise OSError("otNodeAddWhitelist failed!")
def remove_whitelist(self, addr):
if self.Api.otNodeRemoveWhitelist(self.otNode, addr.encode('utf-8')) != 0:
raise OSError("otNodeRemoveWhitelist failed!")
def get_addr16(self):
return self.Api.otNodeGetAddr16(self.otNode)
def get_addr64(self):
return self.Api.otNodeGetAddr64(self.otNode).decode('utf-8')
def get_hashmacaddr(self):
return self.Api.otNodeGetHashMacAddress(self.otNode).decode('utf-8')
def get_channel(self):
return self.Api.otNodeGetChannel(self.otNode)
def set_channel(self, channel):
if self.Api.otNodeSetChannel(self.otNode, ctypes.c_ubyte(channel)) != 0:
raise OSError("otNodeSetChannel failed!")
def get_masterkey(self):
return self.Api.otNodeGetMasterkey(self.otNode).decode("utf-8")
def set_masterkey(self, masterkey):
if self.Api.otNodeSetMasterkey(self.otNode, masterkey.encode('utf-8')) != 0:
raise OSError("otNodeSetMasterkey failed!")
def get_key_sequence_counter(self):
return self.Api.otNodeGetKeySequenceCounter(self.otNode)
def set_key_sequence_counter(self, key_sequence_counter):
if self.Api.otNodeSetKeySequenceCounter(self.otNode, ctypes.c_uint(key_sequence_counter)) != 0:
raise OSError("otNodeSetKeySequenceCounter failed!")
def set_key_switch_guardtime(self, key_switch_guardtime):
if self.Api.otNodeSetKeySwitchGuardTime(self.otNode, ctypes.c_uint(key_switch_guardtime)) != 0:
raise OSError("otNodeSetKeySwitchGuardTime failed!")
def set_network_id_timeout(self, network_id_timeout):
if self.Api.otNodeSetNetworkIdTimeout(self.otNode, ctypes.c_ubyte(network_id_timeout)) != 0:
raise OSError("otNodeSetNetworkIdTimeout failed!")
def get_network_name(self):
return self.Api.otNodeGetNetworkName(self.otNode).decode("utf-8")
def set_network_name(self, network_name):
if self.Api.otNodeSetNetworkName(self.otNode, network_name.encode('utf-8')) != 0:
raise OSError("otNodeSetNetworkName failed!")
def get_panid(self):
return int(self.Api.otNodeGetPanId(self.otNode))
def set_panid(self, panid):
if self.Api.otNodeSetPanId(self.otNode, ctypes.c_ushort(panid)) != 0:
raise OSError("otNodeSetPanId failed!")
def get_partition_id(self):
return int(self.Api.otNodeGetPartitionId(self.otNode))
def set_partition_id(self, partition_id):
if self.Api.otNodeSetPartitionId(self.otNode, ctypes.c_uint(partition_id)) != 0:
raise OSError("otNodeSetPartitionId failed!")
def set_router_upgrade_threshold(self, threshold):
if self.Api.otNodeSetRouterUpgradeThreshold(self.otNode, ctypes.c_ubyte(threshold)) != 0:
raise OSError("otNodeSetRouterUpgradeThreshold failed!")
def set_router_downgrade_threshold(self, threshold):
if self.Api.otNodeSetRouterDowngradeThreshold(self.otNode, ctypes.c_ubyte(threshold)) != 0:
raise OSError("otNodeSetRouterDowngradeThreshold failed!")
def release_router_id(self, router_id):
if self.Api.otNodeReleaseRouterId(self.otNode, ctypes.c_ubyte(router_id)) != 0:
raise OSError("otNodeReleaseRouterId failed!")
def get_state(self):
return self.Api.otNodeGetState(self.otNode).decode('utf-8')
def set_state(self, state):
if self.Api.otNodeSetState(self.otNode, state.encode('utf-8')) != 0:
raise OSError("otNodeSetState failed!")
def get_timeout(self):
return int(self.Api.otNodeGetTimeout(self.otNode))
def set_timeout(self, timeout):
if self.Api.otNodeSetTimeout(self.otNode, ctypes.c_uint(timeout)) != 0:
raise OSError("otNodeSetTimeout failed!")
def set_max_children(self, number):
if self.Api.otNodeSetMaxChildren(self.otNode, ctypes.c_ubyte(number)) != 0:
raise OSError("otNodeSetMaxChildren failed!")
def get_weight(self):
return int(self.Api.otNodeGetWeight(self.otNode))
def set_weight(self, weight):
if self.Api.otNodeSetWeight(self.otNode, ctypes.c_ubyte(weight)) != 0:
raise OSError("otNodeSetWeight failed!")
def add_ipaddr(self, ipaddr):
if self.Api.otNodeAddIpAddr(self.otNode, ipaddr.encode('utf-8')) != 0:
raise OSError("otNodeAddIpAddr failed!")
def get_addrs(self):
return self.Api.otNodeGetAddrs(self.otNode).decode("utf-8").split("\n")
def get_context_reuse_delay(self):
return int(self.Api.otNodeGetContextReuseDelay(self.otNode))
def set_context_reuse_delay(self, delay):
if self.Api.otNodeSetContextReuseDelay(self.otNode, ctypes.c_uint(delay)) != 0:
raise OSError("otNodeSetContextReuseDelay failed!")
def add_prefix(self, prefix, flags, prf = 'med'):
if self.Api.otNodeAddPrefix(self.otNode, prefix.encode('utf-8'), flags.encode('utf-8'), prf.encode('utf-8')) != 0:
raise OSError("otNodeAddPrefix failed!")
def remove_prefix(self, prefix):
if self.Api.otNodeRemovePrefix(self.otNode, prefix.encode('utf-8')) != 0:
raise OSError("otNodeRemovePrefix failed!")
def add_route(self, prefix, prf = 'med'):
if self.Api.otNodeAddRoute(self.otNode, prefix.encode('utf-8'), prf.encode('utf-8')) != 0:
raise OSError("otNodeAddRoute failed!")
def remove_route(self, prefix):
if self.Api.otNodeRemoveRoute(self.otNode, prefix.encode('utf-8')) != 0:
raise OSError("otNodeRemovePrefix failed!")
def register_netdata(self):
if self.Api.otNodeRegisterNetdata(self.otNode) != 0:
raise OSError("otNodeRegisterNetdata failed!")
def energy_scan(self, mask, count, period, scan_duration, ipaddr):
if self.Api.otNodeEnergyScan(self.otNode, ctypes.c_uint(mask), ctypes.c_ubyte(count), ctypes.c_ushort(period), ctypes.c_ushort(scan_duration), ipaddr.encode('utf-8')) != 0:
raise OSError("otNodeEnergyScan failed!")
def panid_query(self, panid, mask, ipaddr):
if self.Api.otNodePanIdQuery(self.otNode, ctypes.c_ushort(panid), ctypes.c_uint(mask), ipaddr.encode('utf-8')) != 0:
raise OSError("otNodePanIdQuery failed!")
def scan(self):
return self.Api.otNodeScan(self.otNode).decode("utf-8").split("\n")
def ping(self, ipaddr, num_responses=1, size=None, timeout=5000):
if size == None:
size = 100
numberOfResponders = self.Api.otNodePing(self.otNode, ipaddr.encode('utf-8'), ctypes.c_ushort(size),
ctypes.c_uint(num_responses), ctypes.c_uint16(timeout))
return numberOfResponders >= num_responses
def set_router_selection_jitter(self, jitter):
if self.Api.otNodeSetRouterSelectionJitter(self.otNode, ctypes.c_ubyte(jitter)) != 0:
raise OSError("otNodeSetRouterSelectionJitter failed!")
def set_active_dataset(self, timestamp, panid=None, channel=None, channel_mask=None, master_key=None):
if panid == None:
panid = 0
if channel == None:
channel = 0
if channel_mask == None:
channel_mask = 0
if master_key == None:
master_key = ""
if self.Api.otNodeSetActiveDataset(
self.otNode,
ctypes.c_ulonglong(timestamp),
ctypes.c_ushort(panid),
ctypes.c_ushort(channel),
ctypes.c_uint(channel_mask),
master_key.encode('utf-8')
) != 0:
raise OSError("otNodeSetActiveDataset failed!")
def set_pending_dataset(self, pendingtimestamp, activetimestamp, panid=None, channel=None):
if pendingtimestamp == None:
pendingtimestamp = 0
if activetimestamp == None:
activetimestamp = 0
if panid == None:
panid = 0
if channel == None:
channel = 0
if self.Api.otNodeSetPendingDataset(
self.otNode,
ctypes.c_ulonglong(activetimestamp),
ctypes.c_ulonglong(pendingtimestamp),
ctypes.c_ushort(panid),
ctypes.c_ushort(channel)
) != 0:
raise OSError("otNodeSetPendingDataset failed!")
def announce_begin(self, mask, count, period, ipaddr):
if self.Api.otNodeCommissionerAnnounceBegin(self.otNode, ctypes.c_uint(mask), ctypes.c_ubyte(count), ctypes.c_ushort(period), ipaddr.encode('utf-8')) != 0:
raise OSError("otNodeCommissionerAnnounceBegin failed!")
def send_mgmt_active_set(self, active_timestamp=None, channel=None, channel_mask=None, extended_panid=None,
panid=None, master_key=None, mesh_local=None, network_name=None, binary=None):
if active_timestamp == None:
active_timestamp = 0
if panid == None:
panid = 0
if channel == None:
channel = 0
if channel_mask == None:
channel_mask = 0
if extended_panid == None:
extended_panid = ""
if master_key == None:
master_key = ""
if mesh_local == None:
mesh_local = ""
if network_name == None:
network_name = ""
if binary == None:
binary = ""
if self.Api.otNodeSendActiveSet(
self.otNode,
ctypes.c_ulonglong(active_timestamp),
ctypes.c_ushort(panid),
ctypes.c_ushort(channel),
ctypes.c_uint(channel_mask),
extended_panid.encode('utf-8'),
master_key.encode('utf-8'),
mesh_local.encode('utf-8'),
network_name.encode('utf-8'),
binary.encode('utf-8')
) != 0:
raise OSError("otNodeSendActiveSet failed!")
def send_mgmt_pending_set(self, pending_timestamp=None, active_timestamp=None, delay_timer=None, channel=None,
panid=None, master_key=None, mesh_local=None, network_name=None):
if pending_timestamp == None:
pending_timestamp = 0
if active_timestamp == None:
active_timestamp = 0
if delay_timer == None:
delay_timer = 0
if panid == None:
panid = 0
if channel == None:
channel = 0
if master_key == None:
master_key = ""
if mesh_local == None:
mesh_local = ""
if network_name == None:
network_name = ""
if self.Api.otNodeSendPendingSet(
self.otNode,
ctypes.c_ulonglong(active_timestamp),
ctypes.c_ulonglong(pending_timestamp),
ctypes.c_uint(delay_timer),
ctypes.c_ushort(panid),
ctypes.c_ushort(channel),
master_key.encode('utf-8'),
mesh_local.encode('utf-8'),
network_name.encode('utf-8')
) != 0:
raise OSError("otNodeSendPendingSet failed!")
def log(self, message):
self.Api.otNodeLog(message)
def __init_dll(self, nodeid):
""" Initialize the API from a Windows DLL. """
# Load the DLL
self.Api = ctypes.WinDLL("otnodeapi.dll")
if self.Api == None:
raise OSError("Failed to load otnodeapi.dll!")
# Define the functions
self.Api.otNodeLog.argtypes = [ctypes.c_char_p]
self.Api.otNodeInit.argtypes = [ctypes.c_uint]
self.Api.otNodeInit.restype = ctypes.c_void_p
self.Api.otNodeFinalize.argtypes = [ctypes.c_void_p]
self.Api.otNodeSetMode.argtypes = [ctypes.c_void_p,
ctypes.c_char_p]
self.Api.otNodeInterfaceUp.argtypes = [ctypes.c_void_p]
self.Api.otNodeInterfaceDown.argtypes = [ctypes.c_void_p]
self.Api.otNodeThreadStart.argtypes = [ctypes.c_void_p]
self.Api.otNodeThreadStop.argtypes = [ctypes.c_void_p]
self.Api.otNodeCommissionerStart.argtypes = [ctypes.c_void_p]
self.Api.otNodeCommissionerJoinerAdd.argtypes = [ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_char_p]
self.Api.otNodeCommissionerStop.argtypes = [ctypes.c_void_p]
self.Api.otNodeJoinerStart.argtypes = [ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_char_p]
self.Api.otNodeJoinerStop.argtypes = [ctypes.c_void_p]
self.Api.otNodeClearWhitelist.argtypes = [ctypes.c_void_p]
self.Api.otNodeEnableWhitelist.argtypes = [ctypes.c_void_p]
self.Api.otNodeDisableWhitelist.argtypes = [ctypes.c_void_p]
self.Api.otNodeAddWhitelist.argtypes = [ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_byte]
self.Api.otNodeRemoveWhitelist.argtypes = [ctypes.c_void_p,
ctypes.c_char_p]
self.Api.otNodeGetAddr16.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetAddr16.restype = ctypes.c_ushort
self.Api.otNodeGetAddr64.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetAddr64.restype = ctypes.c_char_p
self.Api.otNodeGetHashMacAddress.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetHashMacAddress.restype = ctypes.c_char_p
self.Api.otNodeSetChannel.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
self.Api.otNodeGetChannel.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetChannel.restype = ctypes.c_ubyte
self.Api.otNodeSetMasterkey.argtypes = [ctypes.c_void_p,
ctypes.c_char_p]
self.Api.otNodeGetMasterkey.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetMasterkey.restype = ctypes.c_char_p
self.Api.otNodeGetKeySequenceCounter.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetKeySequenceCounter.restype = ctypes.c_uint
self.Api.otNodeSetKeySequenceCounter.argtypes = [ctypes.c_void_p,
ctypes.c_uint]
self.Api.otNodeSetKeySwitchGuardTime.argtypes = [ctypes.c_void_p,
ctypes.c_uint]
self.Api.otNodeSetNetworkIdTimeout.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
self.Api.otNodeGetNetworkName.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetNetworkName.restype = ctypes.c_char_p
self.Api.otNodeSetNetworkName.argtypes = [ctypes.c_void_p,
ctypes.c_char_p]
self.Api.otNodeGetPanId.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetPanId.restype = ctypes.c_ushort
self.Api.otNodeSetPanId.argtypes = [ctypes.c_void_p,
ctypes.c_ushort]
self.Api.otNodeGetPartitionId.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetPartitionId.restype = ctypes.c_uint
self.Api.otNodeSetPartitionId.argtypes = [ctypes.c_void_p,
ctypes.c_uint]
self.Api.otNodeSetRouterUpgradeThreshold.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
self.Api.otNodeSetRouterDowngradeThreshold.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
self.Api.otNodeReleaseRouterId.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
self.Api.otNodeGetState.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetState.restype = ctypes.c_char_p
self.Api.otNodeSetState.argtypes = [ctypes.c_void_p,
ctypes.c_char_p]
self.Api.otNodeGetTimeout.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetTimeout.restype = ctypes.c_uint
self.Api.otNodeSetTimeout.argtypes = [ctypes.c_void_p,
ctypes.c_uint]
self.Api.otNodeGetWeight.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetWeight.restype = ctypes.c_ubyte
self.Api.otNodeSetWeight.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
self.Api.otNodeAddIpAddr.argtypes = [ctypes.c_void_p,
ctypes.c_char_p]
self.Api.otNodeGetAddrs.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetAddrs.restype = ctypes.c_char_p
self.Api.otNodeGetContextReuseDelay.argtypes = [ctypes.c_void_p]
self.Api.otNodeGetContextReuseDelay.restype = ctypes.c_uint
self.Api.otNodeSetContextReuseDelay.argtypes = [ctypes.c_void_p,
ctypes.c_uint]
self.Api.otNodeAddPrefix.argtypes = [ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.c_char_p]
self.Api.otNodeRemovePrefix.argtypes = [ctypes.c_void_p,
ctypes.c_char_p]
self.Api.otNodeAddRoute.argtypes = [ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_char_p]
self.Api.otNodeRemoveRoute.argtypes = [ctypes.c_void_p,
ctypes.c_char_p]
self.Api.otNodeRegisterNetdata.argtypes = [ctypes.c_void_p]
self.Api.otNodeEnergyScan.argtypes = [ctypes.c_void_p,
ctypes.c_uint,
ctypes.c_ubyte,
ctypes.c_ushort,
ctypes.c_ushort,
ctypes.c_char_p]
self.Api.otNodePanIdQuery.argtypes = [ctypes.c_void_p,
ctypes.c_ushort,
ctypes.c_uint,
ctypes.c_char_p]
self.Api.otNodeScan.argtypes = [ctypes.c_void_p]
self.Api.otNodeScan.restype = ctypes.c_char_p
self.Api.otNodePing.argtypes = [ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_ushort,
ctypes.c_uint,
ctypes.c_uint16]
self.Api.otNodePing.restype = ctypes.c_uint
self.Api.otNodeSetRouterSelectionJitter.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
self.Api.otNodeCommissionerAnnounceBegin.argtypes = [ctypes.c_void_p,
ctypes.c_uint,
ctypes.c_ubyte,
ctypes.c_ushort,
ctypes.c_char_p]
self.Api.otNodeSetActiveDataset.argtypes = [ctypes.c_void_p,
ctypes.c_ulonglong,
ctypes.c_ushort,
ctypes.c_ushort,
ctypes.c_uint,
ctypes.c_char_p]
self.Api.otNodeSetPendingDataset.argtypes = [ctypes.c_void_p,
ctypes.c_ulonglong,
ctypes.c_ulonglong,
ctypes.c_ushort,
ctypes.c_ushort]
self.Api.otNodeSendPendingSet.argtypes = [ctypes.c_void_p,
ctypes.c_ulonglong,
ctypes.c_ulonglong,
ctypes.c_uint,
ctypes.c_ushort,
ctypes.c_ushort,
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.c_char_p]
self.Api.otNodeSendActiveSet.argtypes = [ctypes.c_void_p,
ctypes.c_ulonglong,
ctypes.c_ushort,
ctypes.c_ushort,
ctypes.c_uint,
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.c_char_p]
self.Api.otNodeSetMaxChildren.argtypes = [ctypes.c_void_p,
ctypes.c_ubyte]
# Initialize a new node
self.otNode = self.Api.otNodeInit(ctypes.c_uint(nodeid))
if self.otNode == None:
raise OSError("otNodeInit failed!")
| |
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright 2012 Varnish Software AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Builders define actions that the Jenkins job should execute. Examples
include shell scripts or maven targets. The ``builders`` attribute in
the :ref:`Job` definition accepts a list of builders to invoke. They
may be components defined below, locally defined macros (using the top
level definition of ``builder:``, or locally defined components found
via the ``jenkins_jobs.builders`` entry point.
**Component**: builders
:Macro: builder
:Entry Point: jenkins_jobs.builders
Example::
job:
name: test_job
builders:
- shell: "make test"
"""
import xml.etree.ElementTree as XML
import jenkins_jobs.modules.base
from jenkins_jobs.errors import JenkinsJobsException
import logging
logger = logging.getLogger(__name__)
def shell(parser, xml_parent, data):
"""yaml: shell
Execute a shell command.
:Parameter: the shell command to execute
Example::
builders:
- shell: "make test"
"""
shell = XML.SubElement(xml_parent, 'hudson.tasks.Shell')
XML.SubElement(shell, 'command').text = data
def copyartifact(parser, xml_parent, data):
"""yaml: copyartifact
Copy artifact from another project. Requires the Jenkins `Copy Artifact
plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Copy+Artifact+Plugin>`_
:arg str project: Project to copy from
:arg str filter: what files to copy
:arg str target: Target base directory for copy, blank means use workspace
:arg bool flatten: Flatten directories (default: false)
:arg bool optional: If the artifact is missing (for any reason) and
optional is true, the build won't fail because of this builder
(default: false)
:arg str which-build: which build to get artifacts from
(optional, default last-successful)
:arg str build-number: specifies the build number to get when
when specific-build is specified as which-build
:arg str permalink: specifies the permalink to get when
permalink is specified as which-build
:arg bool stable: specifies to get only last stable build when
last-successful is specified as which-build
:arg bool fallback-to-last-successful: specifies to fallback to
last successful build when upstream-build is specified as which-build
:arg string param: specifies to use a build parameter to get the build when
build-param is specified as which-build
:arg string parameter-filters: Filter matching jobs based on these
parameters (optional)
:which-build values:
* **last-successful**
* **specific-build**
* **last-saved**
* **upstream-build**
* **permalink**
* **workspace-latest**
* **build-param**
:permalink values:
* **last**
* **last-stable**
* **last-successful**
* **last-failed**
* **last-unstable**
* **last-unsuccessful**
Example::
builders:
- copyartifact:
project: foo
filter: *.tar.gz
target: /home/foo
which-build: specific-build
build-number: 123
optional: true
flatten: true
parameter-filters: PUBLISH=true
"""
t = XML.SubElement(xml_parent, 'hudson.plugins.copyartifact.CopyArtifact')
# Warning: this only works with copy artifact version 1.26+,
# for copy artifact version 1.25- the 'projectName' element needs
# to be used instead of 'project'
XML.SubElement(t, 'project').text = data["project"]
XML.SubElement(t, 'filter').text = data.get("filter", "")
XML.SubElement(t, 'target').text = data.get("target", "")
flatten = data.get("flatten", False)
XML.SubElement(t, 'flatten').text = str(flatten).lower()
optional = data.get('optional', False)
XML.SubElement(t, 'optional').text = str(optional).lower()
XML.SubElement(t, 'parameters').text = data.get("parameter-filters", "")
select = data.get('which-build', 'last-successful')
selectdict = {'last-successful': 'StatusBuildSelector',
'specific-build': 'SpecificBuildSelector',
'last-saved': 'SavedBuildSelector',
'upstream-build': 'TriggeredBuildSelector',
'permalink': 'PermalinkBuildSelector',
'workspace-latest': 'WorkspaceSelector',
'build-param': 'ParameterizedBuildSelector'}
if select not in selectdict:
raise JenkinsJobsException("which-build entered is not valid must be "
"one of: last-successful, specific-build, "
"last-saved, upstream-build, permalink, "
"workspace-latest, or build-param")
permalink = data.get('permalink', 'last')
permalinkdict = {'last': 'lastBuild',
'last-stable': 'lastStableBuild',
'last-successful': 'lastSuccessfulBuild',
'last-failed': 'lastFailedBuild',
'last-unstable': 'lastUnstableBuild',
'last-unsuccessful': 'lastUnsuccessfulBuild'}
if permalink not in permalinkdict:
raise JenkinsJobsException("permalink entered is not valid must be "
"one of: last, last-stable, "
"last-successful, last-failed, "
"last-unstable, or last-unsuccessful")
selector = XML.SubElement(t, 'selector',
{'class': 'hudson.plugins.copyartifact.' +
selectdict[select]})
if select == 'specific-build':
XML.SubElement(selector, 'buildNumber').text = data['build-number']
if select == 'last-successful':
XML.SubElement(selector, 'stable').text = str(
data.get('stable', False)).lower()
if select == 'upstream-build':
XML.SubElement(selector, 'fallbackToLastSuccessful').text = str(
data.get('fallback-to-last-successful', False)).lower()
if select == 'permalink':
XML.SubElement(selector, 'id').text = permalinkdict[permalink]
if select == 'build-param':
XML.SubElement(selector, 'parameterName').text = data['param']
def ant(parser, xml_parent, data):
"""yaml: ant
Execute an ant target. Requires the Jenkins `Ant Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Ant+Plugin>`_
To setup this builder you can either reference the list of targets
or use named parameters. Below is a description of both forms:
*1) Listing targets:*
After the ant directive, simply pass as argument a space separated list
of targets to build.
:Parameter: space separated list of Ant targets
:arg str ant-name: the name of the ant installation,
defaults to 'default' (optional)
Example to call two Ant targets::
builders:
- ant: "target1 target2"
ant-name: "Standard Ant"
The build file would be whatever the Jenkins Ant Plugin is set to use
per default (i.e build.xml in the workspace root).
*2) Using named parameters:*
:arg str targets: the space separated list of ANT targets.
:arg str buildfile: the path to the ANT build file.
:arg list properties: Passed to ant script using -Dkey=value (optional)
:arg str ant-name: the name of the ant installation,
defaults to 'default' (optional)
:arg str java-opts: java options for ant, can have multiples,
must be in quotes (optional)
Example specifying the build file too and several targets::
builders:
- ant:
targets: "debug test install"
buildfile: "build.xml"
properties:
builddir: "/tmp/"
failonerror: true
java-opts:
- "-ea"
- "-Xmx512m"
ant-name: "Standard Ant"
"""
ant = XML.SubElement(xml_parent, 'hudson.tasks.Ant')
if type(data) is str:
# Support for short form: -ant: "target"
data = {'targets': data}
for setting, value in data.iteritems():
if setting == 'targets':
targets = XML.SubElement(ant, 'targets')
targets.text = value
if setting == 'buildfile':
buildfile = XML.SubElement(ant, 'buildFile')
buildfile.text = value
if setting == 'properties':
properties = data['properties']
prop_string = ''
for prop, val in properties.items():
prop_string += "%s=%s\n" % (prop, val)
prop_element = XML.SubElement(ant, 'properties')
prop_element.text = prop_string
if setting == 'java-opts':
javaopts = data['java-opts']
jopt_string = ' '.join(javaopts)
jopt_element = XML.SubElement(ant, 'antOpts')
jopt_element.text = jopt_string
XML.SubElement(ant, 'antName').text = data.get('ant-name', 'default')
def trigger_builds(parser, xml_parent, data):
"""yaml: trigger-builds
Trigger builds of other jobs.
Requires the Jenkins `Parameterized Trigger Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/
Parameterized+Trigger+Plugin>`_
:arg str project: the Jenkins project to trigger
:arg str predefined-parameters:
key/value pairs to be passed to the job (optional)
:arg bool current-parameters: Whether to include the
parameters passed to the current build to the
triggered job.
:arg bool svn-revision: Whether to pass the svn revision
to the triggered job
:arg bool block: whether to wait for the triggered jobs
to finish or not (default false)
Example::
builders:
- trigger-builds:
- project: "build_started"
predefined-parameters:
FOO="bar"
block: true
"""
tbuilder = XML.SubElement(xml_parent,
'hudson.plugins.parameterizedtrigger.'
'TriggerBuilder')
configs = XML.SubElement(tbuilder, 'configs')
for project_def in data:
if 'project' not in project_def or project_def['project'] == '':
logger.debug("No project specified - skipping trigger-build")
continue
tconfig = XML.SubElement(configs,
'hudson.plugins.parameterizedtrigger.'
'BlockableBuildTriggerConfig')
tconfigs = XML.SubElement(tconfig, 'configs')
if(project_def.get('current-parameters')):
XML.SubElement(tconfigs,
'hudson.plugins.parameterizedtrigger.'
'CurrentBuildParameters')
if(project_def.get('svn-revision')):
XML.SubElement(tconfigs,
'hudson.plugins.parameterizedtrigger.'
'SubversionRevisionBuildParameters')
if 'predefined-parameters' in project_def:
params = XML.SubElement(tconfigs,
'hudson.plugins.parameterizedtrigger.'
'PredefinedBuildParameters')
properties = XML.SubElement(params, 'properties')
properties.text = project_def['predefined-parameters']
if(len(list(tconfigs)) == 0):
tconfigs.set('class', 'java.util.Collections$EmptyList')
projects = XML.SubElement(tconfig, 'projects')
projects.text = project_def['project']
condition = XML.SubElement(tconfig, 'condition')
condition.text = 'ALWAYS'
trigger_with_no_params = XML.SubElement(tconfig,
'triggerWithNoParameters')
trigger_with_no_params.text = 'false'
build_all_nodes_with_label = XML.SubElement(tconfig,
'buildAllNodesWithLabel')
build_all_nodes_with_label.text = 'false'
block = project_def.get('block', False)
if(block):
block = XML.SubElement(tconfig, 'block')
bsft = XML.SubElement(block, 'buildStepFailureThreshold')
XML.SubElement(bsft, 'name').text = 'FAILURE'
XML.SubElement(bsft, 'ordinal').text = '2'
XML.SubElement(bsft, 'color').text = 'RED'
ut = XML.SubElement(block, 'unstableThreshold')
XML.SubElement(ut, 'name').text = 'UNSTABLE'
XML.SubElement(ut, 'ordinal').text = '1'
XML.SubElement(ut, 'color').text = 'Yellow'
ft = XML.SubElement(block, 'failureThreshold')
XML.SubElement(ft, 'name').text = 'FAILURE'
XML.SubElement(ft, 'ordinal').text = '2'
XML.SubElement(ft, 'color').text = 'RED'
# If configs is empty, remove the entire tbuilder tree.
if(len(configs) == 0):
logger.debug("Pruning empty TriggerBuilder tree.")
xml_parent.remove(tbuilder)
def builders_from(parser, xml_parent, data):
"""yaml: builders-from
Use builders from another project.
Requires the Jenkins `Template Project Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Template+Project+Plugin>`_
:arg str projectName: the name of the other project
Example::
builders:
- builders-from:
- project: "base-build"
"""
pbs = XML.SubElement(xml_parent,
'hudson.plugins.templateproject.ProxyBuilder')
XML.SubElement(pbs, 'projectName').text = data
def inject(parser, xml_parent, data):
"""yaml: inject
Inject an environment for the job.
Requires the Jenkins `EnvInject Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/EnvInject+Plugin>`_
:arg str properties-file: the name of the property file (optional)
:arg str properties-content: the properties content (optional)
Example::
builders:
- inject:
properties-file: example.prop
properties-content: EXAMPLE=foo-bar
"""
eib = XML.SubElement(xml_parent, 'EnvInjectBuilder')
info = XML.SubElement(eib, 'info')
jenkins_jobs.modules.base.add_nonblank_xml_subelement(
info, 'propertiesFilePath', data.get('properties-file'))
jenkins_jobs.modules.base.add_nonblank_xml_subelement(
info, 'propertiesContent', data.get('properties-content'))
def artifact_resolver(parser, xml_parent, data):
"""yaml: artifact-resolver
Allows one to resolve artifacts from a maven repository like nexus
(without having maven installed)
Requires the Jenkins `Repository Connector Plugin
<https://wiki.jenkins-ci.org/display/JENKINS/Repository+Connector+Plugin>`_
:arg bool fail-on-error: Whether to fail the build on error (default false)
:arg bool repository-logging: Enable repository logging (default false)
:arg str target-directory: Where to resolve artifacts to
:arg list artifacts: list of artifacts to resolve
:Artifact: * **group-id** (`str`) -- Group ID of the artifact
* **artifact-id** (`str`) -- Artifact ID of the artifact
* **version** (`str`) -- Version of the artifact
* **classifier** (`str`) -- Classifier of the artifact
(default '')
* **extension** (`str`) -- Extension of the artifact
(default 'jar')
* **target-file-name** (`str`) -- What to name the artifact
(default '')
Example::
builders:
- artifact-resolver:
fail-on-error: true
repository-logging: true
target-directory: foo
artifacts:
- group-id: commons-logging
artifact-id: commons-logging
version: 1.1
classifier: src
extension: jar
target-file-name: comm-log.jar
- group-id: commons-lang
artifact-id: commons-lang
version: 1.2
"""
ar = XML.SubElement(xml_parent,
'org.jvnet.hudson.plugins.repositoryconnector.'
'ArtifactResolver')
XML.SubElement(ar, 'targetDirectory').text = data['target-directory']
artifacttop = XML.SubElement(ar, 'artifacts')
artifacts = data['artifacts']
for artifact in artifacts:
rcartifact = XML.SubElement(artifacttop,
'org.jvnet.hudson.plugins.'
'repositoryconnector.Artifact')
XML.SubElement(rcartifact, 'groupId').text = artifact['group-id']
XML.SubElement(rcartifact, 'artifactId').text = artifact['artifact-id']
XML.SubElement(rcartifact, 'classifier').text = artifact.get(
'classifier', '')
XML.SubElement(rcartifact, 'version').text = artifact['version']
XML.SubElement(rcartifact, 'extension').text = artifact.get(
'extension', 'jar')
XML.SubElement(rcartifact, 'targetFileName').text = artifact.get(
'target-file-name', '')
XML.SubElement(ar, 'failOnError').text = str(data.get(
'fail-on-error', False)).lower()
XML.SubElement(ar, 'enableRepoLogging').text = str(data.get(
'repository-logging', False)).lower()
XML.SubElement(ar, 'snapshotUpdatePolicy').text = 'never'
XML.SubElement(ar, 'releaseUpdatePolicy').text = 'never'
XML.SubElement(ar, 'snapshotChecksumPolicy').text = 'warn'
XML.SubElement(ar, 'releaseChecksumPolicy').text = 'warn'
def gradle(parser, xml_parent, data):
"""yaml: gradle
Execute gradle tasks. Requires the Jenkins `Gradle Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Gradle+Plugin>`_
:arg str tasks: List of tasks to execute
:arg str gradle-name: Use a custom gradle name (optional)
:arg bool wrapper: use gradle wrapper (default false)
:arg bool executable: make gradlew executable (default false)
:arg list switches: Switches for gradle, can have multiples
:arg bool use-root-dir: Whether to run the gradle script from the
top level directory or from a different location (default false)
:arg str root-build-script-dir: If your workspace has the
top-level build.gradle in somewhere other than the module
root directory, specify the path (relative to the module
root) here, such as ${workspace}/parent/ instead of just
${workspace}.
Example::
builders:
- gradle:
gradle-name: "gradle-1.2"
wrapper: true
executable: true
use-root-dir: true
root-build-script-dir: ${workspace}/tests
switches:
- "-g /foo/bar/.gradle"
- "-PmavenUserName=foobar"
tasks: |
init
build
tests
"""
gradle = XML.SubElement(xml_parent, 'hudson.plugins.gradle.Gradle')
XML.SubElement(gradle, 'description').text = ''
XML.SubElement(gradle, 'tasks').text = data['tasks']
XML.SubElement(gradle, 'buildFile').text = ''
XML.SubElement(gradle, 'rootBuildScriptDir').text = data.get(
'root-build-script-dir', '')
XML.SubElement(gradle, 'gradleName').text = data.get(
'gradle-name', '')
XML.SubElement(gradle, 'useWrapper').text = str(data.get(
'wrapper', False)).lower()
XML.SubElement(gradle, 'makeExecutable').text = str(data.get(
'executable', False)).lower()
switch_string = '\n'.join(data.get('switches', []))
XML.SubElement(gradle, 'switches').text = switch_string
XML.SubElement(gradle, 'fromRootBuildScriptDir').text = str(data.get(
'use-root-dir', False)).lower()
def batch(parser, xml_parent, data):
"""yaml: batch
Execute a batch command.
:Parameter: the batch command to execute
Example::
builders:
- batch: "foo/foo.bat"
"""
batch = XML.SubElement(xml_parent, 'hudson.tasks.BatchFile')
XML.SubElement(batch, 'command').text = data
def msbuild(parser, xml_parent, data):
"""yaml: msbuild
Build .NET project using msbuild. Requires the `Jenkins MSBuild Plugin
<https://wiki.jenkins-ci.org/display/JENKINS/MSBuild+Plugin>`_.
:arg str msbuild-version: which msbuild configured in Jenkins to use
(optional)
:arg str solution-file: location of the solution file to build
:arg str extra-parameters: extra parameters to pass to msbuild (optional)
:arg bool pass-build-variables: should build variables be passed
to msbuild (defaults to true)
:arg bool continue-on-build-failure: should the build continue if
msbuild returns an error (defaults to false)
Example::
builders:
- msbuild:
solution-file: "MySolution.sln"
msbuild-version: "msbuild-4.0"
extra-parameters: "/maxcpucount:4"
pass-build-variables: False
continue-on-build-failure: True
"""
msbuilder = XML.SubElement(xml_parent,
'hudson.plugins.msbuild.MsBuildBuilder')
XML.SubElement(msbuilder, 'msBuildName').text = data.get('msbuild-version',
'(Default)')
XML.SubElement(msbuilder, 'msBuildFile').text = data['solution-file']
XML.SubElement(msbuilder, 'cmdLineArgs').text = \
data.get('extra-parameters', '')
XML.SubElement(msbuilder, 'buildVariablesAsProperties').text = \
str(data.get('pass-build-variables', True)).lower()
XML.SubElement(msbuilder, 'continueOnBuildFailure').text = \
str(data.get('continue-on-build-failure', False)).lower()
def create_builders(parser, step):
dummy_parent = XML.Element("dummy")
parser.registry.dispatch('builder', parser, dummy_parent, step)
return list(dummy_parent)
def conditional_step(parser, xml_parent, data):
"""yaml: conditional-step
Conditionaly execute some build steps. Requires the Jenkins `Conditional
BuildStep Plugin`_.
Depending on the number of declared steps, a `Conditional step (single)`
or a `Conditional steps (multiple)` is created in Jenkins.
:arg str condition-kind: Condition kind that must be verified before the
steps are executed. Valid values and their additional attributes are
described in the conditions_ table.
:arg str on-evaluation-failure: What should be the outcome of the build
if the evaluation of the condition fails. Possible values are `fail`,
`mark-unstable`, `run-and-mark-unstable`, `run` and `dont-run`.
Default is `fail`.
:arg list steps: List of steps to run if the condition is verified. Items
in the list can be any builder known by Jenkins Job Builder.
.. _conditions:
================== ====================================================
Condition kind Description
================== ====================================================
always Condition is always verified
never Condition is never verified
boolean-expression Run the step if the expression expends to a
representation of true
:condition-expression: Expression to expand
current-status Run the build step if the current build status is
within the configured range
:condition-worst: Worst status
:condition-best: Best status
shell Run the step if the shell command succeed
:condition-command: Shell command to execute
windows-shell Similar to shell, except that commands will be
executed by cmd, under Windows
:condition-command: Command to execute
file-exists Run the step if a file exists
:condition-filename: Check existence of this file
:condition-basedir: If condition-filename is
relative, it will be considered relative to
either `workspace`, `artifact-directory`,
or `jenkins-home`. Default is `workspace`.
================== ====================================================
Example::
builders:
- conditional-step:
condition-kind: boolean-expression
condition-expression: "${ENV,var=IS_STABLE_BRANCH}"
on-evaluation-failure: mark-unstable
steps:
- shell: "echo Making extra checks"
.. _Conditional BuildStep Plugin: https://wiki.jenkins-ci.org/display/
JENKINS/Conditional+BuildStep+Plugin
"""
def build_condition(cdata):
kind = cdata['condition-kind']
ctag = XML.SubElement(root_tag, condition_tag)
if kind == "always":
ctag.set('class',
'org.jenkins_ci.plugins.run_condition.core.AlwaysRun')
elif kind == "never":
ctag.set('class',
'org.jenkins_ci.plugins.run_condition.core.NeverRun')
elif kind == "boolean-expression":
ctag.set('class',
'org.jenkins_ci.plugins.run_condition.core.'
'BooleanCondition')
XML.SubElement(ctag, "token").text = cdata['condition-expression']
elif kind == "current-status":
ctag.set('class',
'org.jenkins_ci.plugins.run_condition.core.'
'StatusCondition')
wr = XML.SubElement(ctag, 'worstResult')
XML.SubElement(wr, "name").text = cdata['condition-worst']
br = XML.SubElement(ctag, 'bestResult')
XML.SubElement(br, "name").text = cdata['condition-best']
elif kind == "shell":
ctag.set('class',
'org.jenkins_ci.plugins.run_condition.contributed.'
'ShellCondition')
XML.SubElement(ctag, "command").text = cdata['condition-command']
elif kind == "windows-shell":
ctag.set('class',
'org.jenkins_ci.plugins.run_condition.contributed.'
'BatchFileCondition')
XML.SubElement(ctag, "command").text = cdata['condition-command']
elif kind == "file-exists":
ctag.set('class',
'org.jenkins_ci.plugins.run_condition.core.'
'FileExistsCondition')
XML.SubElement(ctag, "file").text = cdata['condition-filename']
basedir = cdata.get('condition-basedir', 'workspace')
basedir_tag = XML.SubElement(ctag, "baseDir")
if "workspace" == basedir:
basedir_tag.set('class',
'org.jenkins_ci.plugins.run_condition.common.'
'BaseDirectory$Workspace')
elif "artifact-directory" == basedir:
basedir_tag.set('class',
'org.jenkins_ci.plugins.run_condition.common.'
'BaseDirectory$ArtifactsDir')
elif "jenkins-home" == basedir:
basedir_tag.set('class',
'org.jenkins_ci.plugins.run_condition.common.'
'BaseDirectory$JenkinsHome')
def build_step(parent, step):
for edited_node in create_builders(parser, step):
if not has_multiple_steps:
edited_node.set('class', edited_node.tag)
edited_node.tag = 'buildStep'
parent.append(edited_node)
cond_builder_tag = 'org.jenkinsci.plugins.conditionalbuildstep.' \
'singlestep.SingleConditionalBuilder'
cond_builders_tag = 'org.jenkinsci.plugins.conditionalbuildstep.' \
'ConditionalBuilder'
steps = data['steps']
has_multiple_steps = len(steps) > 1
if has_multiple_steps:
root_tag = XML.SubElement(xml_parent, cond_builders_tag)
steps_parent = XML.SubElement(root_tag, "conditionalbuilders")
condition_tag = "runCondition"
else:
root_tag = XML.SubElement(xml_parent, cond_builder_tag)
steps_parent = root_tag
condition_tag = "condition"
build_condition(data)
evaluation_classes_pkg = 'org.jenkins_ci.plugins.run_condition'
evaluation_classes = {
'fail': evaluation_classes_pkg + '.BuildStepRunner$Fail',
'mark-unstable': evaluation_classes_pkg + '.BuildStepRunner$Unstable',
'run-and-mark-unstable': evaluation_classes_pkg +
'.BuildStepRunner$RunUnstable',
'run': evaluation_classes_pkg + '.BuildStepRunner$Run',
'dont-run': evaluation_classes_pkg + '.BuildStepRunner$DontRun',
}
evaluation_class = evaluation_classes[data.get('on-evaluation-failure',
'fail')]
XML.SubElement(root_tag, "runner").set('class',
evaluation_class)
for step in steps:
build_step(steps_parent, step)
def maven_target(parser, xml_parent, data):
"""yaml: maven-target
Execute top-level Maven targets
:arg str goals: Goals to execute
:arg str properties: Properties for maven, can have multiples
:arg str pom: Location of pom.xml (defaults to pom.xml)
:arg bool private-repository: Use private maven repository for this
job (defaults to false)
:arg str maven-version: Installation of maven which should be used
(optional)
:arg str java-opts: java options for maven, can have multiples,
must be in quotes (optional)
:arg str settings: Path to use as user settings.xml (optional)
:arg str global-settings: Path to use as global settings.xml (optional)
Example:
.. literalinclude:: ../../tests/builders/fixtures/maven-target-doc.yaml
"""
maven = XML.SubElement(xml_parent, 'hudson.tasks.Maven')
XML.SubElement(maven, 'targets').text = data['goals']
prop_string = '\n'.join(data.get('properties', []))
XML.SubElement(maven, 'properties').text = prop_string
if 'maven-version' in data:
XML.SubElement(maven, 'mavenName').text = str(data['maven-version'])
if 'pom' in data:
XML.SubElement(maven, 'pom').text = str(data['pom'])
use_private = str(data.get('private-repository', False)).lower()
XML.SubElement(maven, 'usePrivateRepository').text = use_private
if 'java-opts' in data:
javaoptions = ' '.join(data.get('java-opts', []))
XML.SubElement(maven, 'jvmOptions').text = javaoptions
if 'settings' in data:
settings = XML.SubElement(maven, 'settings',
{'class':
'jenkins.mvn.FilePathSettingsProvider'})
XML.SubElement(settings, 'path').text = data.get('settings')
else:
XML.SubElement(maven, 'settings',
{'class':
'jenkins.mvn.DefaultSettingsProvider'})
if 'global-settings' in data:
provider = 'jenkins.mvn.FilePathGlobalSettingsProvider'
global_settings = XML.SubElement(maven, 'globalSettings',
{'class': provider})
XML.SubElement(global_settings, 'path').text = data.get(
'global-settings')
else:
XML.SubElement(maven, 'globalSettings',
{'class':
'jenkins.mvn.DefaultGlobalSettingsProvider'})
def multijob(parser, xml_parent, data):
"""yaml: multijob
Define a multijob phase. Requires the Jenkins `Multijob Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Multijob+Plugin>`_
This builder may only be used in \
:py:class:`jenkins_jobs.modules.project_multijob.MultiJob` projects.
:arg str name: MultiJob phase name
:arg str condition: when to trigger the other job (default 'SUCCESSFUL')
:arg list projects: list of projects to include in the MultiJob phase
:Project: * **name** (`str`) -- Project name
* **current-parameters** (`bool`) -- Pass current build
parameters to the other job (default false)
* **node-label-name** (`str`) -- Define a list of nodes
on which the job should be allowed to be executed on.
Requires NodeLabel Parameter Plugin (optional)
* **node-label** (`str`) -- Define a label
of 'Restrict where this project can be run' on the fly.
Requires NodeLabel Parameter Plugin (optional)
* **git-revision** (`bool`) -- Pass current git-revision
to the other job (default false)
* **property-file** (`str`) -- Pass properties from file
to the other job (optional)
* **predefined-parameters** (`str`) -- Pass predefined
parameters to the other job (optional)
Example::
builders:
- multijob:
name: PhaseOne
condition: SUCCESSFUL
projects:
- name: PhaseOneJobA
current-parameters: true
node-label-name: "vm_name"
node-label: "agent-${BUILD_NUMBER}"
git-revision: true
- name: PhaseOneJobB
current-parameters: true
property-file: build.props
- multijob:
name: PhaseTwo
condition: UNSTABLE
projects:
- name: PhaseTwoJobA
current-parameters: true
predefined-parameters: foo=bar
- name: PhaseTwoJobB
current-parameters: false
"""
builder = XML.SubElement(xml_parent, 'com.tikal.jenkins.plugins.multijob.'
'MultiJobBuilder')
XML.SubElement(builder, 'phaseName').text = data['name']
condition = data.get('condition', 'SUCCESSFUL')
XML.SubElement(builder, 'continuationCondition').text = condition
phaseJobs = XML.SubElement(builder, 'phaseJobs')
for project in data.get('projects', []):
phaseJob = XML.SubElement(phaseJobs, 'com.tikal.jenkins.plugins.'
'multijob.PhaseJobsConfig')
XML.SubElement(phaseJob, 'jobName').text = project['name']
# Pass through the current build params
currParams = str(project.get('current-parameters', False)).lower()
XML.SubElement(phaseJob, 'currParams').text = currParams
# Pass through other params
configs = XML.SubElement(phaseJob, 'configs')
nodeLabelName = project.get('node-label-name')
nodeLabel = project.get('node-label')
if (nodeLabelName and nodeLabel):
node = XML.SubElement(
configs, 'org.jvnet.jenkins.plugins.nodelabelparameter.'
'parameterizedtrigger.NodeLabelBuildParameter')
XML.SubElement(node, 'name').text = nodeLabelName
XML.SubElement(node, 'nodeLabel').text = nodeLabel
# Git Revision
if project.get('git-revision', False):
param = XML.SubElement(configs,
'hudson.plugins.git.'
'GitRevisionBuildParameters')
combine = XML.SubElement(param, 'combineQueuedCommits')
combine.text = 'false'
# Properties File
properties_file = project.get('property-file', False)
if properties_file:
param = XML.SubElement(configs,
'hudson.plugins.parameterizedtrigger.'
'FileBuildParameters')
propertiesFile = XML.SubElement(param, 'propertiesFile')
propertiesFile.text = properties_file
failOnMissing = XML.SubElement(param, 'failTriggerOnMissing')
failOnMissing.text = 'true'
# Predefined Parameters
predefined_parameters = project.get('predefined-parameters', False)
if predefined_parameters:
param = XML.SubElement(configs,
'hudson.plugins.parameterizedtrigger.'
'PredefinedBuildParameters')
properties = XML.SubElement(param, 'properties')
properties.text = predefined_parameters
def grails(parser, xml_parent, data):
"""yaml: grails
Execute a grails build step. Requires the `Jenkins Grails Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Grails+Plugin>`_
:arg bool use-wrapper: Use a grails wrapper (default false)
:arg str name: Select a grails installation to use (optional)
:arg bool force-upgrade: Run 'grails upgrade --non-interactive'
first (default false)
:arg bool non-interactive: append --non-interactive to all build targets
(default false)
:arg str targets: Specify target(s) to run separated by spaces
:arg str server-port: Specify a value for the server.port system
property (optional)
:arg str work-dir: Specify a value for the grails.work.dir system
property (optional)
:arg str project-dir: Specify a value for the grails.project.work.dir
system property (optional)
:arg str base-dir: Specify a path to the root of the Grails
project (optional)
:arg str properties: Additional system properties to set (optional)
:arg bool plain-output: append --plain-output to all build targets
(default false)
:arg bool stack-trace: append --stack-trace to all build targets
(default false)
:arg bool verbose: append --verbose to all build targets
(default false)
:arg bool refresh-dependencies: append --refresh-dependencies to all
build targets (default false)
Example::
builders:
- grails:
use-wrapper: "true"
name: "grails-2.2.2"
force-upgrade: "true"
non-interactive: "true"
targets: "war ear"
server-port: "8003"
work-dir: "./grails-work"
project-dir: "./project-work"
base-dir: "./grails/project"
properties: "program.name=foo"
plain-output: "true"
stack-trace: "true"
verbose: "true"
refresh-dependencies: "true"
"""
grails = XML.SubElement(xml_parent, 'com.g2one.hudson.grails.'
'GrailsBuilder')
XML.SubElement(grails, 'targets').text = data['targets']
XML.SubElement(grails, 'name').text = data.get(
'name', '(Default)')
XML.SubElement(grails, 'grailsWorkDir').text = data.get(
'work-dir', '')
XML.SubElement(grails, 'projectWorkDir').text = data.get(
'project-dir', '')
XML.SubElement(grails, 'projectBaseDir').text = data.get(
'base-dir', '')
XML.SubElement(grails, 'serverPort').text = data.get(
'server-port', '')
XML.SubElement(grails, 'properties').text = data.get(
'properties', '')
XML.SubElement(grails, 'forceUpgrade').text = str(
data.get('force-upgrade', False)).lower()
XML.SubElement(grails, 'nonInteractive').text = str(
data.get('non-interactive', False)).lower()
XML.SubElement(grails, 'useWrapper').text = str(
data.get('use-wrapper', False)).lower()
XML.SubElement(grails, 'plainOutput').text = str(
data.get('plain-output', False)).lower()
XML.SubElement(grails, 'stackTrace').text = str(
data.get('stack-trace', False)).lower()
XML.SubElement(grails, 'verbose').text = str(
data.get('verbose', False)).lower()
XML.SubElement(grails, 'refreshDependencies').text = str(
data.get('refresh-dependencies', False)).lower()
def sbt(parser, xml_parent, data):
"""yaml: sbt
Execute a sbt build step. Requires the Jenkins `Sbt Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/sbt+plugin>`_
:arg str name: Select a sbt installation to use. If no name is
provided, the first in the list of defined SBT
builders will be used. (default to first in list)
:arg str jvm-flags: Parameters to pass to the JVM (default '')
:arg str actions: Select the sbt tasks to execute (default '')
:arg str sbt-flags: Add flags to SBT launcher
(default '-Dsbt.log.noformat=true')
:arg str subdir-path: Path relative to workspace to run sbt in (default '')
Example::
builders:
- sbt:
name: "default"
actions: "clean package"
jvm-flags: "-Xmx8G"
"""
sbt = XML.SubElement(xml_parent, 'org.jvnet.hudson.plugins.'
'SbtPluginBuilder')
XML.SubElement(sbt, 'name').text = data.get(
'name', '')
XML.SubElement(sbt, 'jvmFlags').text = data.get(
'jvm-flags', '')
XML.SubElement(sbt, 'sbtFlags').text = data.get(
'sbt-flags', '-Dsbt.log.noformat=true')
XML.SubElement(sbt, 'actions').text = data.get(
'actions', '')
XML.SubElement(sbt, 'subdirPath').text = data.get(
'subdir-path', '')
class Builders(jenkins_jobs.modules.base.Base):
sequence = 60
component_type = 'builder'
component_list_type = 'builders'
def gen_xml(self, parser, xml_parent, data):
for alias in ['prebuilders', 'builders', 'postbuilders']:
if alias in data:
builders = XML.SubElement(xml_parent, alias)
for builder in data[alias]:
self.registry.dispatch('builder', parser, builders,
builder)
# Make sure freestyle projects always have a <builders> entry
# or Jenkins v1.472 (at least) will NPE.
project_type = data.get('project-type', 'freestyle')
if project_type in ('freestyle', 'matrix') and 'builders' not in data:
XML.SubElement(xml_parent, 'builders')
| |
""" Commandline scripts """
import argparse
import getpass
import gzip
import json
import logging
import os
import sys
from base64 import b64encode
import transaction
from jinja2 import Template
from pkg_resources import resource_string # pylint: disable=E0611
from pyramid.paster import bootstrap
from pypicloud.access import SCHEMES, get_pwd_context
def gen_password(argv=None):
"""Generate a salted password"""
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(gen_password.__doc__)
parser.add_argument("-i", help="Read password from stdin", action="store_true")
parser.add_argument("-r", help="Number of rounds", type=int)
parser.add_argument(
"-s",
help="Hashing scheme (default %(default)s)",
default=SCHEMES[0],
choices=SCHEMES,
)
args = parser.parse_args(argv)
if args.i:
password = sys.stdin.readline()
else:
password = _get_password()
print(_gen_password(password, args.s, args.r))
def _get_password():
"""Prompt user for a password twice for safety"""
while True:
password = getpass.getpass()
verify = getpass.getpass()
if password == verify:
return password
else:
print("Passwords do not match!")
def _gen_password(password: str, scheme: str = None, rounds: int = None) -> str:
pwd_context = get_pwd_context(scheme, rounds)
return pwd_context.hash(password)
NO_DEFAULT = object()
def wrapped_input(msg):
"""Wraps input for tests"""
return input(msg)
def prompt(msg, default=NO_DEFAULT, validate=None):
"""Prompt user for input"""
while True:
response = wrapped_input(msg + " ").strip()
if not response:
if default is NO_DEFAULT:
continue
return default
if validate is None or validate(response):
return response
def prompt_option(text, choices, default=NO_DEFAULT):
"""Prompt the user to choose one of a list of options"""
while True:
for i, msg in enumerate(choices):
print("[%d] %s" % (i + 1, msg))
response = prompt(text, default=default)
try:
idx = int(response) - 1
return choices[idx]
except (ValueError, IndexError):
print("Invalid choice\n")
def promptyn(msg, default=None):
"""Display a blocking prompt until the user confirms"""
while True:
yes = "Y" if default else "y"
if default or default is None:
no = "n"
else:
no = "N"
confirm = prompt("%s [%s/%s]" % (msg, yes, no), "").lower()
if confirm in ("y", "yes"):
return True
elif confirm in ("n", "no"):
return False
elif not confirm and default is not None:
return default
def bucket_validate(name):
"""Check for valid bucket name"""
if name.startswith("."):
print("Bucket names cannot start with '.'")
return False
if name.endswith("."):
print("Bucket names cannot end with '.'")
return False
if ".." in name:
print("Bucket names cannot contain '..'")
return False
return True
def storage_account_name_validate(name):
"""Check for valid storage account name"""
if "." in name:
print("Storage account names cannot contain '.'")
return False
return True
def make_config(argv=None):
"""Create a server config file"""
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(description=make_config.__doc__)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"-d", action="store_true", help="Create config file for development"
)
group.add_argument("-t", action="store_true", help="Create config file for testing")
group.add_argument(
"-p", action="store_true", help="Create config file for production"
)
group.add_argument(
"-r", action="store_true", help="Create config file for docker image"
)
parser.add_argument(
"outfile", nargs="?", help="Name of output file (default stdout)"
)
args = parser.parse_args(argv)
if args.outfile is not None and os.path.exists(args.outfile):
msg = "'%s' already exists. Overwrite?" % args.outfile
if not promptyn(msg, False):
return
if args.d:
env = "dev"
elif args.t:
env = "test"
elif args.p:
env = "prod"
elif args.r:
env = "docker"
else:
env = prompt_option("What is this config file for?", ["dev", "test", "prod"])
data = {
"env": env,
"workdir": "/var/lib/pypicloud" if env == "docker" else r"%(here)s",
}
data["reload_templates"] = env == "dev"
storage = prompt_option(
"Where do you want to store your packages?",
["s3", "gcs", "filesystem", "azure-blob"],
)
if storage == "filesystem":
storage = "file"
data["storage"] = storage
if storage == "s3":
if "AWS_ACCESS_KEY_ID" in os.environ:
data["access_key"] = os.environ["AWS_ACCESS_KEY_ID"]
else:
data["access_key"] = prompt("AWS access key id?")
if "AWS_SECRET_ACCESS_KEY" in os.environ:
data["secret_key"] = os.environ["AWS_SECRET_ACCESS_KEY"]
else:
data["secret_key"] = prompt("AWS secret access key?")
data["s3_bucket"] = prompt("S3 bucket name?", validate=bucket_validate)
if "." in data["s3_bucket"]:
data["bucket_region"] = prompt("S3 bucket region?")
if storage == "gcs":
data["gcs_bucket"] = prompt("GCS bucket name?", validate=bucket_validate)
if storage == "azure-blob":
data["storage_account_name"] = prompt(
"Storage account name?", validate=storage_account_name_validate
)
data["storage_account_key"] = prompt("Storage account key?")
data["storage_container_name"] = prompt("Container name?")
data["encrypt_key"] = b64encode(os.urandom(32)).decode("utf-8")
data["validate_key"] = b64encode(os.urandom(32)).decode("utf-8")
data["admin"] = prompt("Admin username?")
data["password"] = _gen_password(_get_password())
data["session_secure"] = env == "prod"
data["env"] = env
if env == "dev" or env == "test":
data["wsgi"] = "waitress"
else:
if hasattr(sys, "real_prefix"):
data["venv"] = sys.prefix
data["wsgi"] = "uwsgi"
tmpl_str = resource_string("pypicloud", "templates/config.ini.jinja2").decode(
"utf-8"
)
template = Template(tmpl_str)
config_file = template.render(**data)
if args.outfile is None:
sys.stdout.write(config_file)
sys.stdout.write(os.linesep)
else:
with open(args.outfile, "w", encoding="utf-8") as ofile:
ofile.write(config_file)
print("Config file written to '%s'" % args.outfile)
def migrate_packages(argv=None):
"""
Migrate packages from one storage backend to another
Create two config.ini files that are configured to use different storage
backends. All packages will be migrated from the storage backend in the
first to the storage backend in the second.
ex: pypicloud-migrate-packages file_config.ini s3_config.ini
"""
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(
description=migrate_packages.__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("config_from", help="Name of config file to migrate from")
parser.add_argument("config_to", help="Name of config file to migrate to")
args = parser.parse_args(argv)
logging.basicConfig()
old_env = bootstrap(args.config_from)
old_storage = old_env["request"].db.storage
all_packages = old_storage.list()
new_env = bootstrap(args.config_to)
new_storage = new_env["request"].db.storage
for package in all_packages:
print("Migrating %s" % package)
with old_storage.open(package) as data:
# we need to recalculate the path for the new storage config
package.data.pop("path", None)
new_storage.upload(package, data)
def export_access(argv=None):
"""Dump the access control data to a universal format"""
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(description=export_access.__doc__)
parser.add_argument("config", help="Name of config file")
parser.add_argument("-o", help="Name of output file")
args = parser.parse_args(argv)
logging.basicConfig()
env = bootstrap(args.config)
access = env["request"].access
data = access.dump()
if args.o:
with gzip.open(args.o, "w") as ofile:
json.dump(data, ofile)
else:
print(json.dumps(data, indent=2))
def import_access(argv=None):
"""
Load the access control data from a dump file or stdin
This operation is idempotent and graceful. It will not clobber your
existing ACL.
"""
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(
description=import_access.__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("config", help="Name of config file")
parser.add_argument("-i", help="Name of input file")
args = parser.parse_args(argv)
logging.basicConfig()
if args.i:
with gzip.open(args.i, "r") as ifile:
data = json.load(ifile)
else:
print("Reading data from stdin...")
data = json.load(sys.stdin)
env = bootstrap(args.config)
access = env["request"].access
result = access.load(data)
transaction.commit()
if result is not None:
print(result)
| |
#!/usr/bin/python -u
import sys
import os
import subprocess
import time
from datetime import datetime
import shutil
import tempfile
import hashlib
import re
import logging
import argparse
################
#### Telegraf Variables
################
# Packaging variables
PACKAGE_NAME = "telegraf"
INSTALL_ROOT_DIR = "/usr/bin"
LOG_DIR = "/var/log/telegraf"
SCRIPT_DIR = "/usr/lib/telegraf/scripts"
CONFIG_DIR = "/etc/telegraf"
CONFIG_DIR_D = "/etc/telegraf/telegraf.d"
LOGROTATE_DIR = "/etc/logrotate.d"
INIT_SCRIPT = "scripts/init.sh"
SYSTEMD_SCRIPT = "scripts/telegraf.service"
LOGROTATE_SCRIPT = "etc/logrotate.d/telegraf"
DEFAULT_CONFIG = "etc/telegraf.conf"
DEFAULT_WINDOWS_CONFIG = "etc/telegraf_windows.conf"
POSTINST_SCRIPT = "scripts/post-install.sh"
PREINST_SCRIPT = "scripts/pre-install.sh"
POSTREMOVE_SCRIPT = "scripts/post-remove.sh"
PREREMOVE_SCRIPT = "scripts/pre-remove.sh"
# Default AWS S3 bucket for uploads
DEFAULT_BUCKET = "dl.influxdata.com/telegraf/artifacts"
CONFIGURATION_FILES = [
CONFIG_DIR + '/telegraf.conf',
LOGROTATE_DIR + '/telegraf',
]
# META-PACKAGE VARIABLES
PACKAGE_LICENSE = "MIT"
PACKAGE_URL = "https://github.com/influxdata/telegraf"
MAINTAINER = "support@influxdb.com"
VENDOR = "InfluxData"
DESCRIPTION = "Plugin-driven server agent for reporting metrics into InfluxDB."
# SCRIPT START
prereqs = [ 'git', 'go' ]
go_vet_command = "go tool vet -composites=true ./"
optional_prereqs = [ 'gvm', 'fpm', 'rpmbuild' ]
fpm_common_args = "-f -s dir --log error \
--vendor {} \
--url {} \
--license {} \
--maintainer {} \
--config-files {} \
--config-files {} \
--after-install {} \
--before-install {} \
--after-remove {} \
--before-remove {} \
--description \"{}\"".format(
VENDOR,
PACKAGE_URL,
PACKAGE_LICENSE,
MAINTAINER,
CONFIG_DIR + '/telegraf.conf',
LOGROTATE_DIR + '/telegraf',
POSTINST_SCRIPT,
PREINST_SCRIPT,
POSTREMOVE_SCRIPT,
PREREMOVE_SCRIPT,
DESCRIPTION)
targets = {
'telegraf' : './cmd/telegraf',
}
supported_builds = {
"windows": [ "amd64", "i386" ],
"linux": [ "amd64", "i386", "armhf", "armel", "arm64", "static_amd64", "s390x"],
"freebsd": [ "amd64", "i386" ]
}
supported_packages = {
"linux": [ "deb", "rpm", "tar" ],
"windows": [ "zip" ],
"freebsd": [ "tar" ]
}
next_version = '1.7.0'
################
#### Telegraf Functions
################
def print_banner():
logging.info("""
_____ _ __
/__ \\___| | ___ __ _ _ __ __ _ / _|
/ /\\/ _ \\ |/ _ \\/ _` | '__/ _` | |_
/ / | __/ | __/ (_| | | | (_| | _|
\\/ \\___|_|\\___|\\__, |_| \\__,_|_|
|___/
Build Script
""")
def create_package_fs(build_root):
"""Create a filesystem structure to mimic the package filesystem.
"""
logging.debug("Creating a filesystem hierarchy from directory: {}".format(build_root))
# Using [1:] for the path names due to them being absolute
# (will overwrite previous paths, per 'os.path.join' documentation)
dirs = [ INSTALL_ROOT_DIR[1:], LOG_DIR[1:], SCRIPT_DIR[1:], CONFIG_DIR[1:], LOGROTATE_DIR[1:], CONFIG_DIR_D[1:] ]
for d in dirs:
os.makedirs(os.path.join(build_root, d))
os.chmod(os.path.join(build_root, d), 0o755)
def package_scripts(build_root, config_only=False, windows=False):
"""Copy the necessary scripts and configuration files to the package
filesystem.
"""
if config_only or windows:
logging.info("Copying configuration to build directory")
if windows:
shutil.copyfile(DEFAULT_WINDOWS_CONFIG, os.path.join(build_root, "telegraf.conf"))
else:
shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, "telegraf.conf"))
os.chmod(os.path.join(build_root, "telegraf.conf"), 0o644)
else:
logging.info("Copying scripts and configuration to build directory")
shutil.copyfile(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]))
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]), 0o644)
shutil.copyfile(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]))
os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0o644)
shutil.copyfile(LOGROTATE_SCRIPT, os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"))
os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"), 0o644)
shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"))
os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"), 0o644)
def run_generate():
# NOOP for Telegraf
return True
def go_get(branch, update=False, no_uncommitted=False):
"""Retrieve build dependencies or restore pinned dependencies.
"""
if local_changes() and no_uncommitted:
logging.error("There are uncommitted changes in the current directory.")
return False
if not check_path_for("gdm"):
logging.info("Downloading `gdm`...")
get_command = "go get github.com/sparrc/gdm"
run(get_command)
logging.info("Retrieving dependencies with `gdm`...")
run("{}/bin/gdm restore -v".format(os.environ.get("GOPATH",
os.path.expanduser("~/go"))))
return True
def run_tests(race, parallel, timeout, no_vet):
# Currently a NOOP for Telegraf
return True
################
#### All Telegraf-specific content above this line
################
def run(command, allow_failure=False, shell=False):
"""Run shell command (convenience wrapper around subprocess).
"""
out = None
logging.debug("{}".format(command))
try:
if shell:
out = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=shell)
else:
out = subprocess.check_output(command.split(), stderr=subprocess.STDOUT)
out = out.decode('utf-8').strip()
# logging.debug("Command output: {}".format(out))
except subprocess.CalledProcessError as e:
if allow_failure:
logging.warn("Command '{}' failed with error: {}".format(command, e.output))
return None
else:
logging.error("Command '{}' failed with error: {}".format(command, e.output))
sys.exit(1)
except OSError as e:
if allow_failure:
logging.warn("Command '{}' failed with error: {}".format(command, e))
return out
else:
logging.error("Command '{}' failed with error: {}".format(command, e))
sys.exit(1)
else:
return out
def create_temp_dir(prefix = None):
""" Create temporary directory with optional prefix.
"""
if prefix is None:
return tempfile.mkdtemp(prefix="{}-build.".format(PACKAGE_NAME))
else:
return tempfile.mkdtemp(prefix=prefix)
def increment_minor_version(version):
"""Return the version with the minor version incremented and patch
version set to zero.
"""
ver_list = version.split('.')
if len(ver_list) != 3:
logging.warn("Could not determine how to increment version '{}', will just use provided version.".format(version))
return version
ver_list[1] = str(int(ver_list[1]) + 1)
ver_list[2] = str(0)
inc_version = '.'.join(ver_list)
logging.debug("Incremented version from '{}' to '{}'.".format(version, inc_version))
return inc_version
def get_current_version_tag():
"""Retrieve the raw git version tag.
"""
version = run("git describe --exact-match --tags 2>/dev/null",
allow_failure=True, shell=True)
return version
def get_current_version():
"""Parse version information from git tag output.
"""
version_tag = get_current_version_tag()
if not version_tag:
return None
# Remove leading 'v'
if version_tag[0] == 'v':
version_tag = version_tag[1:]
# Replace any '-'/'_' with '~'
if '-' in version_tag:
version_tag = version_tag.replace("-","~")
if '_' in version_tag:
version_tag = version_tag.replace("_","~")
return version_tag
def get_current_commit(short=False):
"""Retrieve the current git commit.
"""
command = None
if short:
command = "git log --pretty=format:'%h' -n 1"
else:
command = "git rev-parse HEAD"
out = run(command)
return out.strip('\'\n\r ')
def get_current_branch():
"""Retrieve the current git branch.
"""
command = "git rev-parse --abbrev-ref HEAD"
out = run(command)
return out.strip()
def local_changes():
"""Return True if there are local un-committed changes.
"""
output = run("git diff-files --ignore-submodules --").strip()
if len(output) > 0:
return True
return False
def get_system_arch():
"""Retrieve current system architecture.
"""
arch = os.uname()[4]
if arch == "x86_64":
arch = "amd64"
elif arch == "386":
arch = "i386"
elif "arm64" in arch:
arch = "arm64"
elif 'arm' in arch:
# Prevent uname from reporting full ARM arch (eg 'armv7l')
arch = "arm"
return arch
def get_system_platform():
"""Retrieve current system platform.
"""
if sys.platform.startswith("linux"):
return "linux"
else:
return sys.platform
def get_go_version():
"""Retrieve version information for Go.
"""
out = run("go version")
matches = re.search('go version go(\S+)', out)
if matches is not None:
return matches.groups()[0].strip()
return None
def check_path_for(b):
"""Check the the user's path for the provided binary.
"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
full_path = os.path.join(path, b)
if os.path.isfile(full_path) and os.access(full_path, os.X_OK):
return full_path
def check_environ(build_dir = None):
"""Check environment for common Go variables.
"""
logging.info("Checking environment...")
for v in [ "GOPATH", "GOBIN", "GOROOT" ]:
logging.debug("Using '{}' for {}".format(os.environ.get(v), v))
cwd = os.getcwd()
if build_dir is None and os.environ.get("GOPATH") and os.environ.get("GOPATH") not in cwd:
logging.warn("Your current directory is not under your GOPATH. This may lead to build failures.")
return True
def check_prereqs():
"""Check user path for required dependencies.
"""
logging.info("Checking for dependencies...")
for req in prereqs:
if not check_path_for(req):
logging.error("Could not find dependency: {}".format(req))
return False
return True
def upload_packages(packages, bucket_name=None, overwrite=False):
"""Upload provided package output to AWS S3.
"""
logging.debug("Uploading files to bucket '{}': {}".format(bucket_name, packages))
try:
import boto
from boto.s3.key import Key
from boto.s3.connection import OrdinaryCallingFormat
logging.getLogger("boto").setLevel(logging.WARNING)
except ImportError:
logging.warn("Cannot upload packages without 'boto' Python library!")
return False
logging.info("Connecting to AWS S3...")
# Up the number of attempts to 10 from default of 1
boto.config.add_section("Boto")
boto.config.set("Boto", "metadata_service_num_attempts", "10")
c = boto.connect_s3(calling_format=OrdinaryCallingFormat())
if bucket_name is None:
bucket_name = DEFAULT_BUCKET
bucket = c.get_bucket(bucket_name.split('/')[0])
for p in packages:
if '/' in bucket_name:
# Allow for nested paths within the bucket name (ex:
# bucket/folder). Assuming forward-slashes as path
# delimiter.
name = os.path.join('/'.join(bucket_name.split('/')[1:]),
os.path.basename(p))
else:
name = os.path.basename(p)
logging.debug("Using key: {}".format(name))
if bucket.get_key(name) is None or overwrite:
logging.info("Uploading file {}".format(name))
k = Key(bucket)
k.key = name
if overwrite:
n = k.set_contents_from_filename(p, replace=True)
else:
n = k.set_contents_from_filename(p, replace=False)
k.make_public()
else:
logging.warn("Not uploading file {}, as it already exists in the target bucket.".format(name))
return True
def go_list(vendor=False, relative=False):
"""
Return a list of packages
If vendor is False vendor package are not included
If relative is True the package prefix defined by PACKAGE_URL is stripped
"""
p = subprocess.Popen(["go", "list", "./..."], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
packages = out.split('\n')
if packages[-1] == '':
packages = packages[:-1]
if not vendor:
non_vendor = []
for p in packages:
if '/vendor/' not in p:
non_vendor.append(p)
packages = non_vendor
if relative:
relative_pkgs = []
for p in packages:
r = p.replace(PACKAGE_URL, '.')
if r != '.':
relative_pkgs.append(r)
packages = relative_pkgs
return packages
def build(version=None,
platform=None,
arch=None,
nightly=False,
race=False,
clean=False,
outdir=".",
tags=[],
static=False):
"""Build each target for the specified architecture and platform.
"""
logging.info("Starting build for {}/{}...".format(platform, arch))
logging.info("Using Go version: {}".format(get_go_version()))
logging.info("Using git branch: {}".format(get_current_branch()))
logging.info("Using git commit: {}".format(get_current_commit()))
if static:
logging.info("Using statically-compiled output.")
if race:
logging.info("Race is enabled.")
if len(tags) > 0:
logging.info("Using build tags: {}".format(','.join(tags)))
logging.info("Sending build output to: {}".format(outdir))
if not os.path.exists(outdir):
os.makedirs(outdir)
elif clean and outdir != '/' and outdir != ".":
logging.info("Cleaning build directory '{}' before building.".format(outdir))
shutil.rmtree(outdir)
os.makedirs(outdir)
logging.info("Using version '{}' for build.".format(version))
tmp_build_dir = create_temp_dir()
for target, path in targets.items():
logging.info("Building target: {}".format(target))
build_command = ""
# Handle static binary output
if static is True or "static_" in arch:
if "static_" in arch:
static = True
arch = arch.replace("static_", "")
build_command += "CGO_ENABLED=0 "
# Handle variations in architecture output
if arch == "i386" or arch == "i686":
arch = "386"
elif "arm64" in arch:
arch = "arm64"
elif "arm" in arch:
arch = "arm"
build_command += "GOOS={} GOARCH={} ".format(platform, arch)
if "arm" in arch:
if arch == "armel":
build_command += "GOARM=5 "
elif arch == "armhf" or arch == "arm":
build_command += "GOARM=6 "
elif arch == "arm64":
# TODO(rossmcdonald) - Verify this is the correct setting for arm64
build_command += "GOARM=7 "
else:
logging.error("Invalid ARM architecture specified: {}".format(arch))
logging.error("Please specify either 'armel', 'armhf', or 'arm64'.")
return False
if platform == 'windows':
target = target + '.exe'
build_command += "go build -o {} ".format(os.path.join(outdir, target))
if race:
build_command += "-race "
if len(tags) > 0:
build_command += "-tags {} ".format(','.join(tags))
ldflags = [
'-w', '-s',
'-X', 'main.branch={}'.format(get_current_branch()),
'-X', 'main.commit={}'.format(get_current_commit(short=True))]
if version:
ldflags.append('-X')
ldflags.append('main.version={}'.format(version))
build_command += ' -ldflags="{}" '.format(' '.join(ldflags))
if static:
build_command += " -a -installsuffix cgo "
build_command += path
start_time = datetime.utcnow()
run(build_command, shell=True)
end_time = datetime.utcnow()
logging.info("Time taken: {}s".format((end_time - start_time).total_seconds()))
return True
def generate_sha256_from_file(path):
"""Generate SHA256 hash signature based on the contents of the file at path.
"""
m = hashlib.sha256()
with open(path, 'rb') as f:
m.update(f.read())
return m.hexdigest()
def generate_sig_from_file(path):
"""Generate a detached GPG signature from the file at path.
"""
logging.debug("Generating GPG signature for file: {}".format(path))
gpg_path = check_path_for('gpg')
if gpg_path is None:
logging.warn("gpg binary not found on path! Skipping signature creation.")
return False
if os.environ.get("GNUPG_HOME") is not None:
run('gpg --homedir {} --armor --yes --detach-sign {}'.format(os.environ.get("GNUPG_HOME"), path))
else:
run('gpg --armor --detach-sign --yes {}'.format(path))
return True
def package(build_output, pkg_name, version, nightly=False, iteration=1, static=False, release=False):
"""Package the output of the build process.
"""
outfiles = []
tmp_build_dir = create_temp_dir()
logging.debug("Packaging for build output: {}".format(build_output))
logging.info("Using temporary directory: {}".format(tmp_build_dir))
try:
for platform in build_output:
# Create top-level folder displaying which platform (linux, etc)
os.makedirs(os.path.join(tmp_build_dir, platform))
for arch in build_output[platform]:
logging.info("Creating packages for {}/{}".format(platform, arch))
# Create second-level directory displaying the architecture (amd64, etc)
current_location = build_output[platform][arch]
# Create directory tree to mimic file system of package
build_root = os.path.join(tmp_build_dir,
platform,
arch,
PACKAGE_NAME)
os.makedirs(build_root)
# Copy packaging scripts to build directory
if platform == "windows":
# For windows and static builds, just copy
# binaries to root of package (no other scripts or
# directories)
package_scripts(build_root, config_only=True, windows=True)
elif static or "static_" in arch:
package_scripts(build_root, config_only=True)
else:
create_package_fs(build_root)
package_scripts(build_root)
for binary in targets:
# Copy newly-built binaries to packaging directory
if platform == 'windows':
binary = binary + '.exe'
if platform == 'windows' or static or "static_" in arch:
# Where the binary should go in the package filesystem
to = os.path.join(build_root, binary)
# Where the binary currently is located
fr = os.path.join(current_location, binary)
else:
# Where the binary currently is located
fr = os.path.join(current_location, binary)
# Where the binary should go in the package filesystem
to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], binary)
shutil.copy(fr, to)
for package_type in supported_packages[platform]:
# Package the directory structure for each package type for the platform
logging.debug("Packaging directory '{}' as '{}'.".format(build_root, package_type))
name = pkg_name
# Reset version, iteration, and current location on each run
# since they may be modified below.
package_version = version
package_iteration = iteration
if "static_" in arch:
# Remove the "static_" from the displayed arch on the package
package_arch = arch.replace("static_", "")
elif package_type == "rpm" and arch == 'armhf':
package_arch = 'armv6hl'
else:
package_arch = arch
if not version:
package_version = "{}~{}".format(next_version, get_current_commit(short=True))
package_iteration = "0"
package_build_root = build_root
current_location = build_output[platform][arch]
if package_type in ['zip', 'tar']:
# For tars and zips, start the packaging one folder above
# the build root (to include the package name)
package_build_root = os.path.join('/', '/'.join(build_root.split('/')[:-1]))
if nightly:
if static or "static_" in arch:
name = '{}-static-nightly_{}_{}'.format(name,
platform,
package_arch)
else:
name = '{}-nightly_{}_{}'.format(name,
platform,
package_arch)
else:
if static or "static_" in arch:
name = '{}-{}-static_{}_{}'.format(name,
package_version,
platform,
package_arch)
else:
name = '{}-{}_{}_{}'.format(name,
package_version,
platform,
package_arch)
current_location = os.path.join(os.getcwd(), current_location)
if package_type == 'tar':
tar_command = "cd {} && tar -cvzf {}.tar.gz ./*".format(package_build_root, name)
run(tar_command, shell=True)
run("mv {}.tar.gz {}".format(os.path.join(package_build_root, name), current_location), shell=True)
outfile = os.path.join(current_location, name + ".tar.gz")
outfiles.append(outfile)
elif package_type == 'zip':
zip_command = "cd {} && zip -r {}.zip ./*".format(package_build_root, name)
run(zip_command, shell=True)
run("mv {}.zip {}".format(os.path.join(package_build_root, name), current_location), shell=True)
outfile = os.path.join(current_location, name + ".zip")
outfiles.append(outfile)
elif package_type not in ['zip', 'tar'] and static or "static_" in arch:
logging.info("Skipping package type '{}' for static builds.".format(package_type))
else:
if package_type == 'rpm' and release and '~' in package_version:
package_version, suffix = package_version.split('~', 1)
# The ~ indicatees that this is a prerelease so we give it a leading 0.
package_iteration = "0.%s" % suffix
fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format(
fpm_common_args,
name,
package_arch,
package_type,
package_version,
package_iteration,
package_build_root,
current_location)
if package_type == "rpm":
fpm_command += "--depends coreutils --depends shadow-utils --rpm-posttrans {}".format(POSTINST_SCRIPT)
out = run(fpm_command, shell=True)
matches = re.search(':path=>"(.*)"', out)
outfile = None
if matches is not None:
outfile = matches.groups()[0]
if outfile is None:
logging.warn("Could not determine output from packaging output!")
else:
if nightly:
# Strip nightly version from package name
new_outfile = outfile.replace("{}-{}".format(package_version, package_iteration), "nightly")
os.rename(outfile, new_outfile)
outfile = new_outfile
else:
if package_type == 'rpm':
# rpm's convert any dashes to underscores
package_version = package_version.replace("-", "_")
outfiles.append(os.path.join(os.getcwd(), outfile))
logging.debug("Produced package files: {}".format(outfiles))
return outfiles
finally:
# Cleanup
shutil.rmtree(tmp_build_dir)
def main(args):
global PACKAGE_NAME
if args.release and args.nightly:
logging.error("Cannot be both a nightly and a release.")
return 1
if args.nightly:
args.iteration = 0
# Pre-build checks
check_environ()
if not check_prereqs():
return 1
if args.build_tags is None:
args.build_tags = []
else:
args.build_tags = args.build_tags.split(',')
orig_commit = get_current_commit(short=True)
orig_branch = get_current_branch()
if args.platform not in supported_builds and args.platform != 'all':
logging.error("Invalid build platform: {}".format(args.platform))
return 1
build_output = {}
if args.branch != orig_branch and args.commit != orig_commit:
logging.error("Can only specify one branch or commit to build from.")
return 1
elif args.branch != orig_branch:
logging.info("Moving to git branch: {}".format(args.branch))
run("git checkout {}".format(args.branch))
elif args.commit != orig_commit:
logging.info("Moving to git commit: {}".format(args.commit))
run("git checkout {}".format(args.commit))
if not args.no_get:
if not go_get(args.branch, update=args.update, no_uncommitted=args.no_uncommitted):
return 1
if args.generate:
if not run_generate():
return 1
if args.test:
if not run_tests(args.race, args.parallel, args.timeout, args.no_vet):
return 1
platforms = []
single_build = True
if args.platform == 'all':
platforms = supported_builds.keys()
single_build = False
else:
platforms = [args.platform]
for platform in platforms:
build_output.update( { platform : {} } )
archs = []
if args.arch == "all":
single_build = False
archs = supported_builds.get(platform)
else:
archs = [args.arch]
for arch in archs:
od = args.outdir
if not single_build:
od = os.path.join(args.outdir, platform, arch)
if not build(version=args.version,
platform=platform,
arch=arch,
nightly=args.nightly,
race=args.race,
clean=args.clean,
outdir=od,
tags=args.build_tags,
static=args.static):
return 1
build_output.get(platform).update( { arch : od } )
# Build packages
if args.package:
if not check_path_for("fpm"):
logging.error("FPM ruby gem required for packaging. Stopping.")
return 1
packages = package(build_output,
args.name,
args.version,
nightly=args.nightly,
iteration=args.iteration,
static=args.static,
release=args.release)
if args.sign:
logging.debug("Generating GPG signatures for packages: {}".format(packages))
sigs = [] # retain signatures so they can be uploaded with packages
for p in packages:
if generate_sig_from_file(p):
sigs.append(p + '.asc')
else:
logging.error("Creation of signature for package [{}] failed!".format(p))
return 1
packages += sigs
if args.upload:
logging.debug("Files staged for upload: {}".format(packages))
if args.nightly:
args.upload_overwrite = True
if not upload_packages(packages, bucket_name=args.bucket, overwrite=args.upload_overwrite):
return 1
logging.info("Packages created:")
for filename in packages:
logging.info("%s (SHA256=%s)",
os.path.basename(filename),
generate_sha256_from_file(filename))
if orig_branch != get_current_branch():
logging.info("Moving back to original git branch: {}".format(args.branch))
run("git checkout {}".format(orig_branch))
return 0
if __name__ == '__main__':
LOG_LEVEL = logging.INFO
if '--debug' in sys.argv[1:]:
LOG_LEVEL = logging.DEBUG
log_format = '[%(levelname)s] %(funcName)s: %(message)s'
logging.basicConfig(level=LOG_LEVEL,
format=log_format)
parser = argparse.ArgumentParser(description='InfluxDB build and packaging script.')
parser.add_argument('--verbose','-v','--debug',
action='store_true',
help='Use debug output')
parser.add_argument('--outdir', '-o',
metavar='<output directory>',
default='./build/',
type=os.path.abspath,
help='Output directory')
parser.add_argument('--name', '-n',
metavar='<name>',
default=PACKAGE_NAME,
type=str,
help='Name to use for package name (when package is specified)')
parser.add_argument('--arch',
metavar='<amd64|i386|armhf|arm64|armel|all>',
type=str,
default=get_system_arch(),
help='Target architecture for build output')
parser.add_argument('--platform',
metavar='<linux|darwin|windows|all>',
type=str,
default=get_system_platform(),
help='Target platform for build output')
parser.add_argument('--branch',
metavar='<branch>',
type=str,
default=get_current_branch(),
help='Build from a specific branch')
parser.add_argument('--commit',
metavar='<commit>',
type=str,
default=get_current_commit(short=True),
help='Build from a specific commit')
parser.add_argument('--version',
metavar='<version>',
type=str,
default=get_current_version(),
help='Version information to apply to build output (ex: 0.12.0)')
parser.add_argument('--iteration',
metavar='<package iteration>',
type=str,
default="1",
help='Package iteration to apply to build output (defaults to 1)')
parser.add_argument('--stats',
action='store_true',
help='Emit build metrics (requires InfluxDB Python client)')
parser.add_argument('--stats-server',
metavar='<hostname:port>',
type=str,
help='Send build stats to InfluxDB using provided hostname and port')
parser.add_argument('--stats-db',
metavar='<database name>',
type=str,
help='Send build stats to InfluxDB using provided database name')
parser.add_argument('--nightly',
action='store_true',
help='Mark build output as nightly build (will incremement the minor version)')
parser.add_argument('--update',
action='store_true',
help='Update build dependencies prior to building')
parser.add_argument('--package',
action='store_true',
help='Package binary output')
parser.add_argument('--release',
action='store_true',
help='Mark build output as release')
parser.add_argument('--clean',
action='store_true',
help='Clean output directory before building')
parser.add_argument('--no-get',
action='store_true',
help='Do not retrieve pinned dependencies when building')
parser.add_argument('--no-uncommitted',
action='store_true',
help='Fail if uncommitted changes exist in the working directory')
parser.add_argument('--upload',
action='store_true',
help='Upload output packages to AWS S3')
parser.add_argument('--upload-overwrite','-w',
action='store_true',
help='Upload output packages to AWS S3')
parser.add_argument('--bucket',
metavar='<S3 bucket name>',
type=str,
default=DEFAULT_BUCKET,
help='Destination bucket for uploads')
parser.add_argument('--generate',
action='store_true',
help='Run "go generate" before building')
parser.add_argument('--build-tags',
metavar='<tags>',
help='Optional build tags to use for compilation')
parser.add_argument('--static',
action='store_true',
help='Create statically-compiled binary output')
parser.add_argument('--sign',
action='store_true',
help='Create GPG detached signatures for packages (when package is specified)')
parser.add_argument('--test',
action='store_true',
help='Run tests (does not produce build output)')
parser.add_argument('--no-vet',
action='store_true',
help='Do not run "go vet" when running tests')
parser.add_argument('--race',
action='store_true',
help='Enable race flag for build output')
parser.add_argument('--parallel',
metavar='<num threads>',
type=int,
help='Number of tests to run simultaneously')
parser.add_argument('--timeout',
metavar='<timeout>',
type=str,
help='Timeout for tests before failing')
args = parser.parse_args()
print_banner()
sys.exit(main(args))
| |
"""
pghoard
Copyright (c) 2015 Ohmu Ltd
See LICENSE for details
"""
from __future__ import print_function
from .common import lzma_decompressor, lzma_open_read, default_log_format_str
from .errors import Error
from psycopg2.extensions import adapt
from requests import Session
import argparse
import datetime
import logging
import os
import random
import shutil
import socket
import sys
import tarfile
import time
try:
from . object_storage import google as google_storage
except ImportError as ex:
google_storage = ex
try:
from . object_storage import azure as azure_storage
except ImportError as ex:
azure_storage = ex
try:
from . object_storage import s3 as s3_storage
except ImportError as ex:
s3_storage = ex
class RestoreError(Exception):
"""Restore error"""
def store_response_to_file(filepath, response):
decompressor = lzma_decompressor()
with open(filepath, "wb") as f:
for chunk in response.iter_content(chunk_size=8192):
if chunk: # filter out keep-alive new chunks
f.write(decompressor.decompress(chunk))
def create_pgdata_dir(pgdata):
if not os.path.exists(pgdata):
os.makedirs(pgdata)
os.chmod(pgdata, 0o700)
def create_recovery_conf(dirpath, site, primary_conninfo):
content = """# pghoard created recovery.conf
standby_mode = 'on'
primary_conninfo = {primary_conninfo}
trigger_file = {trigger_file}
restore_command = 'pghoard_restore get %f %p --site {site}'
recovery_target_timeline = 'latest'
""".format(primary_conninfo=adapt(primary_conninfo),
trigger_file=adapt(os.path.join(dirpath, "trigger_file")),
site=site)
filepath = os.path.join(dirpath, "recovery.conf")
open(filepath, "w").write(content)
class Restore(object):
def __init__(self):
self.storage = None
self.log = logging.getLogger("PGHoardRestore")
def missing_libs(self, arg):
raise RestoreError("Command not available: {}: {}".format(arg.ex.__class__.__name__, arg.ex))
def add_cmd(self, sub, method, precondition=None):
cmd = sub.add_parser(method.__name__.replace("_", "-"), help=method.__doc__)
if isinstance(precondition, Exception):
cmd.set_defaults(func=self.missing_libs, ex=precondition)
else:
cmd.set_defaults(func=method)
return cmd
def create_parser(self):
parser = argparse.ArgumentParser()
sub = parser.add_subparsers(help="sub-command help")
def host_port_site_args():
cmd.add_argument("--host", help="pghoard repository host", default="localhost")
cmd.add_argument("--port", help="pghoard repository port", default=16000)
cmd.add_argument("--site", help="pghoard site", default="default")
cmd = self.add_cmd(sub, self.get)
host_port_site_args()
cmd.add_argument("--path-prefix", help="path_prefix (useful for testing)")
def target_args():
cmd.add_argument("--basebackup", help="pghoard basebackup", default="latest")
cmd.add_argument("--primary-conninfo", help="replication.conf primary_conninfo", default="")
cmd.add_argument("--target-dir", help="pghoard restore target 'pgdata' dir", required=True)
cmd.add_argument("--overwrite", help="overwrite existing target directory",
default=False, action="store_true")
def azure_args():
cmd.add_argument("--account", help="Azure storage account name [AZURE_STORAGE_ACCOUNT]",
default=os.environ.get("AZURE_STORAGE_ACCOUNT"))
cmd.add_argument("--access-key", help="Azure storage access key [AZURE_STORAGE_ACCESS_KEY]",
default=os.environ.get("AZURE_STORAGE_ACCESS_KEY"))
cmd.add_argument("--container", help="Azure container name", default="pghoard")
cmd.add_argument("--site", help="pghoard site", default="default")
cmd = self.add_cmd(sub, self.get_basebackup_azure, precondition=azure_storage)
azure_args()
target_args()
cmd = self.add_cmd(sub, self.list_basebackups_azure, precondition=azure_storage)
azure_args()
def google_args():
cmd.add_argument("--project-id", help="Google Cloud project ID", required=True)
cmd.add_argument("--credentials-file", metavar="FILE", help="Google credential file path [GOOGLE_APPLICATION_CREDENTIALS]",
default=os.environ.get("GOOGLE_APPLICATION_CREDENTIALS"))
cmd.add_argument("--bucket", help="Google Cloud container name", default="pghoard")
cmd.add_argument("--site", help="pghoard site", default="default")
cmd = self.add_cmd(sub, self.get_basebackup_google, precondition=google_storage)
google_args()
target_args()
cmd = self.add_cmd(sub, self.list_basebackups_google, precondition=google_storage)
google_args()
cmd = self.add_cmd(sub, self.get_basebackup_http)
host_port_site_args()
target_args()
cmd = self.add_cmd(sub, self.list_basebackups_http)
host_port_site_args()
def aws_args():
cmd.add_argument("--aws-access-key-id", help="AWS Access Key ID [AWS_ACCESS_KEY_ID]", default=os.environ.get("AWS_ACCESS_KEY_ID"))
cmd.add_argument("--aws-secret-access-key", help="AWS Secret Access Key [AWS_SECRET_ACCESS_KEY]", default=os.environ.get("AWS_SECRET_ACCESS_KEY"))
cmd.add_argument("--region", help="AWS S3 region", default="eu-west-1")
cmd.add_argument("--bucket", help="AWS S3 bucket name", required=True)
cmd.add_argument("--site", help="pghoard site", default="default")
cmd.add_argument("--host", help="S3 host address (non-AWS S3 implementations)")
cmd.add_argument("--port", help="S3 port (non-AWS S3 implementations)", default=9090, type=int)
cmd.add_argument("--insecure", help="Use plaintext HTTP (non-AWS S3 implementations)", action="store_true", default=False)
cmd = self.add_cmd(sub, self.get_basebackup_s3, precondition=s3_storage)
aws_args()
target_args()
cmd = self.add_cmd(sub, self.list_basebackups_s3, precondition=s3_storage)
aws_args()
return parser
def get(self, arg):
"""Download a basebackup archive from a HTTP server"""
self.storage = HTTPRestore(arg.host, arg.port, arg.site)
if not self.storage.get_archive_file(arg.filename, arg.target_path, arg.path_prefix):
return 1
def get_basebackup_azure(self, arg):
"""Download a basebackup from Microsoft Azure"""
try:
self.storage = AzureRestore(arg.account, arg.access_key, arg.container, pgdata=arg.target_dir, site=arg.site)
self.get_basebackup(arg.target_dir, arg.basebackup, arg.site, arg.primary_conninfo, overwrite=arg.overwrite)
except socket.gaierror as ex:
raise RestoreError("{}: {} (wrong account name?)".format(ex.__class__.__name__, ex))
except (Error, azure_storage.WindowsAzureError) as ex:
raise RestoreError("{}: {!r}".format(ex.__class__.__name__, ex))
def list_basebackups_azure(self, arg):
"""List available basebackups from Microsoft Azure"""
try:
self.storage = AzureRestore(arg.account, arg.access_key, arg.container, arg.site)
self.storage.show_basebackup_list()
except socket.gaierror as ex:
raise RestoreError("{}: {} (wrong account name?)".format(ex.__class__.__name__, ex))
except (Error, azure_storage.WindowsAzureError) as ex:
raise RestoreError("{}: {!r}".format(ex.__class__.__name__, ex))
def get_basebackup_google(self, arg):
"""Download a basebackup from Google Cloud"""
try:
self.storage = GoogleRestore(arg.project_id, arg.credentials_file, arg.bucket, pgdata=arg.target_dir, site=arg.site)
self.get_basebackup(arg.target_dir, arg.basebackup, arg.site, arg.primary_conninfo, overwrite=arg.overwrite)
except google_storage.OAuth2Error as ex:
raise RestoreError("{}: {} (invalid GOOGLE_APPLICATION_CREDENTIALS file?)".format(ex.__class__.__name__, ex))
except (Error, google_storage.HttpError) as ex:
raise RestoreError("{}: {}".format(ex.__class__.__name__, ex))
def list_basebackups_google(self, arg):
"""List available basebackups from Google Cloud"""
try:
self.storage = GoogleRestore(arg.project_id, arg.credentials_file, arg.bucket, site=arg.site)
self.storage.show_basebackup_list()
except google_storage.OAuth2Error as ex:
raise RestoreError("{}: {} (invalid GOOGLE_APPLICATION_CREDENTIALS file?)".format(ex.__class__.__name__, ex))
except (Error, google_storage.HttpError) as ex:
raise RestoreError("{}: {}".format(ex.__class__.__name__, ex))
def get_basebackup_http(self, arg):
"""Download a basebackup from Google Cloud"""
self.storage = HTTPRestore(arg.host, arg.port, arg.site, arg.target_dir)
self.get_basebackup(arg.target_dir, arg.basebackup, arg.site, arg.primary_conninfo, overwrite=arg.overwrite)
def list_basebackups_http(self, arg):
"""List available basebackups from a HTTP source"""
self.storage = HTTPRestore(arg.host, arg.port, arg.site)
self.storage.show_basebackup_list()
def get_basebackup_s3(self, arg):
"""Download a basebackup from S3"""
try:
self.storage = S3Restore(arg.aws_access_key_id, arg.aws_secret_access_key, arg.region, arg.bucket, arg.site,
host=arg.host, port=arg.port, is_secure=(not arg.insecure), pgdata=arg.target_dir)
self.get_basebackup(arg.target_dir, arg.basebackup, arg.site, arg.primary_conninfo, overwrite=arg.overwrite)
except (Error, s3_storage.boto.exception.BotoServerError) as ex:
raise RestoreError("{}: {}".format(ex.__class__.__name__, ex))
def list_basebackups_s3(self, arg):
"""List available basebackups from S3"""
try:
self.storage = S3Restore(arg.aws_access_key_id, arg.aws_secret_access_key, arg.region, arg.bucket, arg.site,
host=arg.host, port=arg.port, is_secure=(not arg.insecure), pgdata=None)
self.storage.show_basebackup_list()
except (Error, s3_storage.boto.exception.BotoServerError) as ex:
raise RestoreError("{}: {}".format(ex.__class__.__name__, ex))
def get_basebackup(self, pgdata, basebackup, site, primary_conninfo, overwrite=False):
# If basebackup that we want it set as latest, figure out which one it is
if basebackup == "latest":
basebackups = self.storage.list_basebackups() # pylint: disable=protected-access
if not basebackups:
print("No basebackups found, exiting")
sys.exit(-1)
basebackup = max(entry["name"] for entry in basebackups)
print("Found: {} basebackups, selecting: {} for restore".format(basebackups, basebackup))
if os.path.exists(pgdata):
if overwrite:
shutil.rmtree(pgdata)
else:
raise Error("Target directory '{}' exists and --overwrite not specified, aborting.".format(pgdata))
create_pgdata_dir(pgdata)
_, tar_path, tar = self.storage.get_basebackup_file(basebackup)
tar.extractall(pgdata)
tar.close()
os.unlink(tar_path)
create_recovery_conf(pgdata, site, primary_conninfo)
print("Basebackup complete.")
print("You can start PostgreSQL by running pg_ctl -D %s start" % pgdata)
print("On systemd based systems you can run systemctl start postgresql")
print("On SYSV Init based systems you can run /etc/init.d/postgresql start")
def run(self):
parser = self.create_parser()
args = parser.parse_args()
if not hasattr(args, "func"):
parser.print_help()
return 1
try:
exit_code = args.func(args)
return exit_code
except KeyboardInterrupt:
print("*** interrupted by keyboard ***")
return 1
class ObjectStore(object):
def __init__(self, storage, site, pgdata):
self.storage = storage
self.site = site
self.pgdata = pgdata
self.log = logging.getLogger(self.__class__.__name__)
def list_basebackups(self):
return self.storage.list_path(self.site + "/basebackup/")
def show_basebackup_list(self):
result = self.list_basebackups()
line = "Available %r basebackups:" % self.site
print(line)
print("=" * len(line))
print("basebackup\t\t\tsize\tlast_modified\t\t\tmetadata")
for r in result:
print("%s\t%s\t%s\t%s" % (r["name"], r["size"], r["last_modified"], r["metadata"]))
def get_basebackup_file(self, basebackup):
metadata = self.storage.get_metadata_for_key(basebackup)
basebackup_path = os.path.join(self.pgdata, "base-{}-{:08x}.tar.xz".format(
datetime.datetime.utcnow().strftime("%Y%m%d%H%M%S"),
random.getrandbits(32),
))
self.storage.get_contents_to_file(basebackup, basebackup_path)
tar = tarfile.TarFile(fileobj=lzma_open_read(basebackup_path, "rb"))
return metadata["start-wal-segment"], basebackup_path, tar
class AzureRestore(ObjectStore):
def __init__(self, account_name, account_key, container, site, pgdata=None):
storage = azure_storage.AzureTransfer(account_name, account_key, container)
ObjectStore.__init__(self, storage, site, pgdata)
class GoogleRestore(ObjectStore):
def __init__(self, project_id, credential_file, bucket, site, pgdata=None):
storage = google_storage.GoogleTransfer(project_id=project_id, bucket_name=bucket, credential_file=credential_file)
ObjectStore.__init__(self, storage, site, pgdata)
class S3Restore(ObjectStore):
def __init__(self, aws_access_key_id, aws_secret_access_key, region, bucket_name, site, host=None, port=None, is_secure=None, pgdata=None):
storage = s3_storage.S3Transfer(aws_access_key_id, aws_secret_access_key, region, bucket_name, host=host, port=port, is_secure=is_secure)
ObjectStore.__init__(self, storage, site, pgdata)
class HTTPRestore(object):
def __init__(self, host, port, site, pgdata=None):
self.log = logging.getLogger("HTTPRestore")
self.host = host
self.port = port
self.site = site
self.pgdata = pgdata
self.session = Session()
def list_basebackups(self):
uri = "http://" + self.host + ":" + str(self.port) + "/" + self.site + "/basebackups"
response = self.session.get(uri)
basebackups = []
for basebackup, values in response.json()["basebackups"].items():
basebackups.append({"name": basebackup, "size": values["size"]})
return basebackups
def show_basebackup_list(self):
basebackups = self.list_basebackups()
line = "Available %r basebackups:" % self.site
print(line)
print("=" * len(line))
print("basebackup\t\tsize")
for r in basebackups:
print("{}\t{}".format(r["name"], r["size"]))
def get_basebackup_file(self, basebackup):
uri = "http://" + self.host + ":" + str(self.port) + "/" + self.site + "/basebackups/" + basebackup
response = self.session.get(uri, stream=True)
if response.status_code != 200:
raise Error("Incorrect basebackup: %{!r} or site: {!r} defined".format(basebackup, self.site))
basebackup_path = os.path.join(self.pgdata, "base.tar.xz")
store_response_to_file(basebackup_path, response)
tar = tarfile.TarFile(fileobj=open(basebackup_path, "rb"))
return response.headers["x-pghoard-start-wal-segment"], basebackup_path, tar
def get_archive_file(self, filename, target_path, path_prefix=None):
start_time = time.time()
self.log.debug("Getting archived file: %r, target_path: %r, path_prefix: %r",
filename, target_path, path_prefix)
uri = "http://" + self.host + ":" + str(self.port) + "/" + self.site + "/" + filename
if not path_prefix:
final_target_path = os.path.join(os.getcwd(), target_path)
else:
final_target_path = os.path.join(path_prefix, target_path)
headers = {"x-pghoard-target-path": final_target_path}
response = self.session.get(uri, headers=headers, stream=True)
self.log.debug("Got archived file: %r, %r status_code: %r took: %.2fs", filename, target_path,
response.status_code, time.time() - start_time)
return response.status_code in (200, 206)
def main():
logging.basicConfig(level=logging.INFO, format=default_log_format_str)
try:
restore = Restore()
return restore.run()
except RestoreError as ex:
print("FATAL: {}: {}".format(ex.__class__.__name__, ex))
return 1
if __name__ == "__main__":
sys.exit(main() or 0)
| |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
'''
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
from six import raise_from, b
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import os
import io
import signal
import warnings
from os.path import abspath, devnull
from tempfile import mkstemp
# External imports
# Bokeh imports
from ..embed import file_html
from ..resources import INLINE
from ..util.dependencies import import_required, detect_phantomjs
from ..util.string import decode_utf8
from .util import default_filename
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def export_png(obj, filename=None, height=None, width=None, webdriver=None):
''' Export the LayoutDOM object or document as a PNG.
If the filename is not given, it is derived from the script name
(e.g. ``/foo/myplot.py`` will create ``/foo/myplot.png``)
Args:
obj (LayoutDOM or Document) : a Layout (Row/Column), Plot or Widget
object or Document to export.
filename (str, optional) : filename to save document under (default: None)
If None, infer from the filename.
height (int) : the desired height of the exported layout obj only if
it's a Plot instance. Otherwise the height kwarg is ignored.
width (int) : the desired width of the exported layout obj only if
it's a Plot instance. Otherwise the width kwarg is ignored.
webdriver (selenium.webdriver) : a selenium webdriver instance to use
to export the image.
Returns:
filename (str) : the filename where the static file is saved.
.. warning::
Responsive sizing_modes may generate layouts with unexpected size and
aspect ratios. It is recommended to use the default ``fixed`` sizing mode.
'''
image = get_screenshot_as_png(obj, height=height, width=width, driver=webdriver)
if filename is None:
filename = default_filename("png")
image.save(filename)
return abspath(filename)
def export_svgs(obj, filename=None, height=None, width=None, webdriver=None):
''' Export the SVG-enabled plots within a layout. Each plot will result
in a distinct SVG file.
If the filename is not given, it is derived from the script name
(e.g. ``/foo/myplot.py`` will create ``/foo/myplot.svg``)
Args:
obj (LayoutDOM object) : a Layout (Row/Column), Plot or Widget object to display
filename (str, optional) : filename to save document under (default: None)
If None, infer from the filename.
height (int) : the desired height of the exported layout obj only if
it's a Plot instance. Otherwise the height kwarg is ignored.
width (int) : the desired width of the exported layout obj only if
it's a Plot instance. Otherwise the width kwarg is ignored.
webdriver (selenium.webdriver) : a selenium webdriver instance to use
to export the image.
Returns:
filenames (list(str)) : the list of filenames where the SVGs files
are saved.
.. warning::
Responsive sizing_modes may generate layouts with unexpected size and
aspect ratios. It is recommended to use the default ``fixed`` sizing mode.
'''
svgs = get_svgs(obj, height=height, width=width, driver=webdriver)
if len(svgs) == 0:
log.warn("No SVG Plots were found.")
return
if filename is None:
filename = default_filename("svg")
filenames = []
for i, svg in enumerate(svgs):
if i == 0:
filename = filename
else:
idx = filename.find(".svg")
filename = filename[:idx] + "_{}".format(i) + filename[idx:]
with io.open(filename, mode="w", encoding="utf-8") as f:
f.write(svg)
filenames.append(filename)
return filenames
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
def get_screenshot_as_png(obj, driver=None, **kwargs):
''' Get a screenshot of a LayoutDOM object.
Args:
obj (LayoutDOM or Document) : a Layout (Row/Column), Plot or Widget
object or Document to export.
driver (selenium.webdriver) : a selenium webdriver instance to use
to export the image.
Returns:
cropped_image (PIL.Image.Image) : a pillow image loaded from PNG.
.. warning::
Responsive sizing_modes may generate layouts with unexpected size and
aspect ratios. It is recommended to use the default ``fixed`` sizing mode.
'''
Image = import_required('PIL.Image',
'To use bokeh.io.export_png you need pillow ' +
'("conda install pillow" or "pip install pillow")')
with _tmp_html() as tmp:
html = get_layout_html(obj, **kwargs)
with io.open(tmp.path, mode="w", encoding="utf-8") as file:
file.write(decode_utf8(html))
web_driver = driver if driver is not None else create_webdriver()
try:
web_driver.get("file:///" + tmp.path)
web_driver.maximize_window()
## resize for PhantomJS compat
web_driver.execute_script("document.body.style.width = '100%';")
wait_until_render_complete(web_driver)
png = web_driver.get_screenshot_as_png()
b_rect = web_driver.execute_script(_BOUNDING_RECT_SCRIPT)
finally:
if driver is None: # only quit webdriver if not passed in as arg
terminate_webdriver(web_driver)
image = Image.open(io.BytesIO(png))
cropped_image = _crop_image(image, **b_rect)
return cropped_image
def get_svgs(obj, driver=None, **kwargs):
'''
'''
with _tmp_html() as tmp:
html = get_layout_html(obj, **kwargs)
with io.open(tmp.path, mode="wb") as file:
file.write(b(html))
web_driver = driver if driver is not None else create_webdriver()
try:
web_driver.get("file:///" + tmp.path)
wait_until_render_complete(web_driver)
svgs = web_driver.execute_script(_SVG_SCRIPT)
finally:
if driver is None: # only quit webdriver if not passed in as arg
terminate_webdriver(web_driver)
return svgs
def get_layout_html(obj, resources=INLINE, **kwargs):
'''
'''
resize = False
if kwargs.get('height') is not None or kwargs.get('width') is not None:
# Defer this import, it is expensive
from ..models.plots import Plot
if not isinstance(obj, Plot):
warnings.warn("Export method called with height or width kwargs on a non-Plot layout. The size values will be ignored.")
else:
resize = True
old_height = obj.plot_height
old_width = obj.plot_width
obj.plot_height = kwargs.get('height', old_height)
obj.plot_width = kwargs.get('width', old_width)
html = file_html(obj, resources, title="")
if resize:
obj.plot_height = old_height
obj.plot_width = old_width
return html
def wait_until_render_complete(driver):
'''
'''
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException
def is_bokeh_loaded(driver):
return driver.execute_script('''
const b = window.Bokeh;
return b && b.documents && b.documents.length > 0;
''')
try:
WebDriverWait(driver, 5, poll_frequency=0.1).until(is_bokeh_loaded)
except TimeoutException as e:
raise_from(RuntimeError('Bokeh was not loaded in time. Something may have gone wrong.'), e)
driver.execute_script(_WAIT_SCRIPT)
def is_bokeh_render_complete(driver):
return driver.execute_script('return window._bokeh_render_complete;')
try:
WebDriverWait(driver, 5, poll_frequency=0.1).until(is_bokeh_render_complete)
except TimeoutException:
log.warn("The webdriver raised a TimeoutException while waiting for \
a 'bokeh:idle' event to signify that the layout has rendered. \
Something may have gone wrong.")
finally:
browser_logs = driver.get_log('browser')
severe_errors = [l for l in browser_logs if l.get('level') == 'SEVERE']
if len(severe_errors) > 0:
log.warn("There were severe browser errors that may have affected your export: {}".format(severe_errors))
def create_webdriver():
with warnings.catch_warnings():
warnings.filterwarnings("ignore", ".*", UserWarning, "selenium.webdriver.phantomjs.webdriver")
webdriver = import_required('selenium.webdriver',
'To use bokeh.io image export functions you need selenium ' +
'("conda install -c bokeh selenium" or "pip install selenium")')
phantomjs_path = detect_phantomjs()
return webdriver.PhantomJS(executable_path=phantomjs_path, service_log_path=devnull)
def terminate_webdriver(driver):
if driver.name == "phantomjs":
# https://github.com/seleniumhq/selenium/issues/767
driver.service.process.send_signal(signal.SIGTERM)
try:
driver.quit()
except (IOError, OSError): # IOError for Python 2.7
pass
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
_BOUNDING_RECT_SCRIPT = """
return document.getElementsByClassName('bk-root')[0].children[0].getBoundingClientRect()
"""
_SVG_SCRIPT = """
var serialized_svgs = [];
var svgs = document.getElementsByClassName('bk-root')[0].getElementsByTagName("svg");
for (var i = 0; i < svgs.length; i++) {
var source = (new XMLSerializer()).serializeToString(svgs[i]);
serialized_svgs.push(source);
};
return serialized_svgs
"""
_WAIT_SCRIPT = """
// add private window prop to check that render is complete
window._bokeh_render_complete = false;
function done() {
window._bokeh_render_complete = true;
}
var doc = window.Bokeh.documents[0];
if (doc.is_idle)
done();
else
doc.idle.connect(done);
"""
def _crop_image(image, left=0, top=0, right=0, bottom=0, **kwargs):
''' Crop the border from the layout
'''
return image.crop((left, top, right, bottom))
class _TempFile(object):
_closed = False
def __init__(self, prefix="tmp", suffix=""):
self.fd, self.path = mkstemp(prefix=prefix, suffix=suffix)
def __enter__(self):
return self
def __exit__(self, exc, value, tb):
self.close()
def __del__(self):
self.close()
def close(self):
if self._closed:
return
try:
os.close(self.fd)
except (OSError, IOError):
pass
finally:
self.fd = None
try:
os.unlink(self.path)
except (OSError, IOError):
pass
finally:
self.path = None
self._closed = True
def _tmp_html():
return _TempFile(prefix="bokeh", suffix=".html")
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| |
##
# Copyright 2009-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing GAMESS-US, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: Toon Willems (Ghent University)
@author: Pablo Escobar (sciCORE, SIB, University of Basel)
@author: Benjamin Roberts (The University of Auckland)
"""
import fileinput
import glob
import os
import re
import shutil
import sys
import tempfile
import easybuild.tools.toolchain as toolchain
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM, MANDATORY
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import mkdir, read_file, write_file
from easybuild.tools.modules import get_software_root, get_software_version
from easybuild.tools.run import run_cmd, run_cmd_qa
from easybuild.tools.systemtools import get_platform_name
from easybuild.tools import toolchain
class EB_GAMESS_minus_US(EasyBlock):
"""Support for building/installing GAMESS-US."""
@staticmethod
def extra_options():
"""Define custom easyconfig parameters for GAMESS-US."""
extra_vars = {
'ddi_comm': ['mpi', "DDI communication layer to use", CUSTOM],
'maxcpus': [None, "Maximum number of cores per node", MANDATORY],
'maxnodes': [None, "Maximum number of nodes", MANDATORY],
'runtest': [True, "Run GAMESS-US tests", CUSTOM],
'scratch_dir': ['$TMPDIR', "Scratch dir to be used in rungms script", CUSTOM],
}
return EasyBlock.extra_options(extra_vars)
def __init__(self, *args, **kwargs):
"""Easyblock constructor, enable building in installation directory."""
super(EB_GAMESS_minus_US, self).__init__(*args, **kwargs)
self.build_in_installdir = True
self.testdir = None
if self.cfg['runtest']:
self.testdir = tempfile.mkdtemp()
# make sure test dir doesn't contain [ or ], rungms csh script doesn't handle that well ("set: No match")
if re.search(r'[\[\]]', self.testdir):
raise EasyBuildError("Temporary dir for tests '%s' will cause problems with rungms csh script", self.testdir)
def extract_step(self):
"""Extract sources."""
# strip off 'gamess' part to avoid having everything in a 'gamess' subdirectory
self.cfg['unpack_options'] = "--strip-components=1"
super(EB_GAMESS_minus_US, self).extract_step()
def configure_step(self):
"""Configure GAMESS-US build via provided interactive 'config' script."""
# machine type
platform_name = get_platform_name()
x86_64_linux_re = re.compile('^x86_64-.*$')
if x86_64_linux_re.match(platform_name):
machinetype = "linux64"
else:
raise EasyBuildError("Build target %s currently unsupported", platform_name)
# compiler config
comp_fam = self.toolchain.comp_family()
fortran_comp, fortran_ver = None, None
if comp_fam == toolchain.INTELCOMP:
fortran_comp = 'ifort'
(out, _) = run_cmd("ifort -v", simple=False)
res = re.search(r"^ifort version ([0-9]+)\.[0-9.]+$", out)
if res:
fortran_ver = res.group(1)
else:
raise EasyBuildError("Failed to determine ifort major version number")
elif comp_fam == toolchain.GCC:
fortran_comp = 'gfortran'
fortran_ver = '.'.join(get_software_version('GCC').split('.')[:2])
else:
raise EasyBuildError("Compiler family '%s' currently unsupported.", comp_fam)
# math library config
known_mathlibs = ['imkl', 'OpenBLAS', 'ATLAS', 'ACML']
mathlib, mathlib_root = None, None
for mathlib in known_mathlibs:
mathlib_root = get_software_root(mathlib)
if mathlib_root is not None:
break
if mathlib_root is None:
raise EasyBuildError("None of the known math libraries (%s) available, giving up.", known_mathlibs)
if mathlib == 'imkl':
mathlib = 'mkl'
mathlib_root = os.path.join(mathlib_root, 'mkl')
else:
mathlib = mathlib.lower()
# verify selected DDI communication layer
known_ddi_comms = ['mpi', 'mixed', 'shmem', 'sockets']
if not self.cfg['ddi_comm'] in known_ddi_comms:
raise EasyBuildError("Unsupported DDI communication layer specified (known: %s): %s",
known_ddi_comms, self.cfg['ddi_comm'])
# MPI library config
mpilib, mpilib_root, mpilib_path = None, None, None
if self.cfg['ddi_comm'] == 'mpi':
known_mpilibs = ['impi', 'OpenMPI', 'MVAPICH2', 'MPICH2']
for mpilib in known_mpilibs:
mpilib_root = get_software_root(mpilib)
if mpilib_root is not None:
break
if mpilib_root is None:
raise EasyBuildError("None of the known MPI libraries (%s) available, giving up.", known_mpilibs)
mpilib_path = mpilib_root
if mpilib == 'impi':
mpilib_path = os.path.join(mpilib_root, 'intel64')
else:
mpilib = mpilib.lower()
# run interactive 'config' script to generate install.info file
cmd = "%(preconfigopts)s ./config %(configopts)s" % {
'preconfigopts': self.cfg['preconfigopts'],
'configopts': self.cfg['configopts'],
}
qa = {
"After the new window is open, please hit <return> to go on.": '',
"please enter your target machine name: ": machinetype,
"Version? [00] ": self.version,
"Please enter your choice of FORTRAN: ": fortran_comp,
"hit <return> to continue to the math library setup.": '',
"MKL pathname? ": mathlib_root,
"MKL version (or 'skip')? ": 'skip',
"please hit <return> to compile the GAMESS source code activator": '',
"please hit <return> to set up your network for Linux clusters.": '',
"communication library ('sockets' or 'mpi')? ": self.cfg['ddi_comm'],
"Enter MPI library (impi, mvapich2, mpt, sockets):": mpilib,
"Please enter your %s's location: " % mpilib: mpilib_root,
"Do you want to try LIBCCHEM? (yes/no): ": 'no',
"Enter full path to OpenBLAS libraries (without 'lib' subdirectory):": mathlib_root,
}
stdqa = {
r"GAMESS directory\? \[.*\] ": self.builddir,
r"GAMESS build directory\? \[.*\] ": self.installdir, # building in install directory
r"Enter only the main version number, such as .*\nVersion\? ": fortran_ver,
r"gfortran version.\nPlease enter only the first decimal place, such as .*:": fortran_ver,
"Enter your choice of 'mkl' or .* 'none': ": mathlib,
}
run_cmd_qa(cmd, qa=qa, std_qa=stdqa, log_all=True, simple=True)
self.log.debug("Contents of install.info:\n%s" % read_file(os.path.join(self.builddir, 'install.info')))
# patch hardcoded settings in rungms to use values specified in easyconfig file
rungms = os.path.join(self.builddir, 'rungms')
extra_gmspath_lines = "set ERICFMT=$GMSPATH/auxdata/ericfmt.dat\nset MCPPATH=$GMSPATH/auxdata/MCP\n"
try:
for line in fileinput.input(rungms, inplace=1, backup='.orig'):
line = re.sub(r"^(\s*set\s*TARGET)=.*", r"\1=%s" % self.cfg['ddi_comm'], line)
line = re.sub(r"^(\s*set\s*GMSPATH)=.*", r"\1=%s\n%s" % (self.installdir, extra_gmspath_lines), line)
line = re.sub(r"(null\) set VERNO)=.*", r"\1=%s" % self.version, line)
line = re.sub(r"^(\s*set DDI_MPI_CHOICE)=.*", r"\1=%s" % mpilib, line)
line = re.sub(r"^(\s*set DDI_MPI_ROOT)=.*%s.*" % mpilib.lower(), r"\1=%s" % mpilib_path, line)
line = re.sub(r"^(\s*set GA_MPI_ROOT)=.*%s.*" % mpilib.lower(), r"\1=%s" % mpilib_path, line)
# comment out all adjustments to $LD_LIBRARY_PATH that involves hardcoded paths
line = re.sub(r"^(\s*)(setenv\s*LD_LIBRARY_PATH\s*/.*)", r"\1#\2", line)
if self.cfg['scratch_dir']:
line = re.sub(r"^(\s*set\s*SCR)=.*", r"\1=%s" % self.cfg['scratch_dir'], line)
line = re.sub(r"^(\s*set\s*USERSCR)=.*", r"\1=%s" % self.cfg['scratch_dir'], line)
sys.stdout.write(line)
except IOError, err:
raise EasyBuildError("Failed to patch %s: %s", rungms, err)
def build_step(self):
"""Custom build procedure for GAMESS-US: using compddi, compall and lked scripts."""
compddi = os.path.join(self.cfg['start_dir'], 'ddi', 'compddi')
run_cmd(compddi, log_all=True, simple=True)
# make sure the libddi.a library is present
libddi = os.path.join(self.cfg['start_dir'], 'ddi', 'libddi.a')
if not os.path.isfile(libddi):
raise EasyBuildError("The libddi.a library (%s) was never built", libddi)
else:
self.log.info("The libddi.a library (%s) was successfully built." % libddi)
compall_cmd = os.path.join(self.cfg['start_dir'], 'compall')
compall = "%s %s %s" % (self.cfg['prebuildopts'], compall_cmd, self.cfg['buildopts'])
run_cmd(compall, log_all=True, simple=True)
cmd = "%s gamess %s" % (os.path.join(self.cfg['start_dir'], 'lked'), self.version)
run_cmd(cmd, log_all=True, simple=True)
def test_step(self):
"""Run GAMESS-US tests (if 'runtest' easyconfig parameter is set to True)."""
# don't use provided 'runall' script for tests, since that only runs the tests single-core
if self.cfg['runtest']:
try:
cwd = os.getcwd()
os.chdir(self.testdir)
except OSError, err:
raise EasyBuildError("Failed to move to temporary directory for running tests: %s", err)
# copy input files for exam<id> standard tests
for test_input in glob.glob(os.path.join(self.installdir, 'tests', 'standard', 'exam*.inp')):
try:
shutil.copy2(test_input, os.getcwd())
except OSError, err:
raise EasyBuildError("Failed to copy %s to %s: %s", test_input, os.getcwd(), err)
rungms = os.path.join(self.installdir, 'rungms')
test_env_vars = ['TMPDIR=%s' % self.testdir]
if self.toolchain.mpi_family() == toolchain.INTELMPI:
test_env_vars.extend([
'I_MPI_FALLBACK=enable', # enable fallback in case first fabric fails (see $I_MPI_FABRICS_LIST)
'I_MPI_HYDRA_BOOTSTRAP=fork', # tests are only run locally (2 processes), so no SSH required
])
# run all exam<id> tests, dump output to exam<id>.log
n_tests = 47
for i in range(1, n_tests+1):
test_cmd = ' '.join(test_env_vars + [rungms, 'exam%02d' % i, self.version, '1', '2'])
(out, _) = run_cmd(test_cmd, log_all=True, simple=False)
write_file('exam%02d.log' % i, out)
# verify output of tests
check_cmd = os.path.join(self.installdir, 'tests', 'standard', 'checktst')
(out, _) = run_cmd(check_cmd, log_all=True, simple=False)
success_regex = re.compile("^All %d test results are correct" % n_tests, re.M)
if success_regex.search(out):
self.log.info("All tests ran successfully!")
else:
raise EasyBuildError("Not all tests ran successfully...")
# cleanup
os.chdir(cwd)
try:
shutil.rmtree(self.testdir)
except OSError, err:
raise EasyBuildError("Failed to remove test directory %s: %s", self.testdir, err)
def install_step(self):
"""Skip install step, since we're building in the install directory."""
pass
def sanity_check_step(self):
"""Custom sanity check for GAMESS-US."""
custom_paths = {
'files': ['gamess.%s.x' % self.version, 'rungms'],
'dirs': [],
}
super(EB_GAMESS_minus_US, self).sanity_check_step(custom_paths=custom_paths)
def make_module_extra(self):
"""Define GAMESS-US specific variables in generated module file, i.e. $GAMESSUSROOT."""
txt = super(EB_GAMESS_minus_US, self).make_module_extra()
txt += self.module_generator.set_environment('GAMESSUSROOT', self.installdir)
txt += self.module_generator.prepend_paths("PATH", [''])
return txt
| |
#!/usr/bin/env python
#
# rbssh.py -- A custom SSH client for use in Review Board.
#
# This is used as an ssh replacement that can be used across platforms with
# a custom .ssh directory. OpenSSH doesn't respect $HOME, instead reading
# /etc/passwd directly, which causes problems for us. Using rbssh, we can
# work around this.
#
#
# Copyright (c) 2010-2011 Beanbag, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import unicode_literals
import getpass
import logging
import os
import select
import sys
from optparse import OptionParser
if 'RBSITE_PYTHONPATH' in os.environ:
for path in reversed(os.environ['RBSITE_PYTHONPATH'].split(':')):
sys.path.insert(1, path)
import paramiko
from reviewboard import get_version_string
from reviewboard.scmtools.core import SCMTool
from reviewboard.ssh.client import SSHClient
DEBUG = os.getenv('DEBUG_RBSSH')
DEBUG_LOGDIR = os.getenv('RBSSH_LOG_DIR')
SSH_PORT = 22
options = None
class PlatformHandler(object):
def __init__(self, channel):
self.channel = channel
def shell(self):
raise NotImplementedError
def transfer(self):
raise NotImplementedError
def process_channel(self, channel):
if channel.closed:
return False
logging.debug('!! process_channel\n')
if channel.recv_ready():
data = channel.recv(4096)
if not data:
logging.debug('!! stdout empty\n')
return False
sys.stdout.write(data)
sys.stdout.flush()
if channel.recv_stderr_ready():
data = channel.recv_stderr(4096)
if not data:
logging.debug('!! stderr empty\n')
return False
sys.stderr.write(data)
sys.stderr.flush()
if channel.exit_status_ready():
logging.debug('!!! exit_status_ready\n')
return False
return True
def process_stdin(self, channel):
logging.debug('!! process_stdin\n')
try:
buf = os.read(sys.stdin.fileno(), 1)
except OSError:
buf = None
if not buf:
logging.debug('!! stdin empty\n')
return False
channel.send(buf)
return True
class PosixHandler(PlatformHandler):
def shell(self):
import termios
import tty
oldtty = termios.tcgetattr(sys.stdin)
try:
tty.setraw(sys.stdin.fileno())
tty.setcbreak(sys.stdin.fileno())
self.handle_communications()
finally:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty)
def transfer(self):
import fcntl
fd = sys.stdin.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
self.handle_communications()
def handle_communications(self):
while True:
rl, wl, el = select.select([self.channel, sys.stdin], [], [])
if self.channel in rl:
if not self.process_channel(self.channel):
break
if sys.stdin in rl:
if not self.process_stdin(self.channel):
self.channel.shutdown_write()
break
class WindowsHandler(PlatformHandler):
def shell(self):
self.handle_communications()
def transfer(self):
self.handle_communications()
def handle_communications(self):
import threading
logging.debug('!! begin_windows_transfer\n')
self.channel.setblocking(0)
def writeall(channel):
while self.process_channel(channel):
pass
logging.debug('!! Shutting down reading\n')
channel.shutdown_read()
writer = threading.Thread(target=writeall, args=(self.channel,))
writer.start()
try:
while self.process_stdin(self.channel):
pass
except EOFError:
pass
logging.debug('!! Shutting down writing\n')
self.channel.shutdown_write()
def print_version(option, opt, value, parser):
parser.print_version()
sys.exit(0)
def parse_options(args):
global options
hostname = None
parser = OptionParser(usage='%prog [options] [user@]hostname [command]',
version='%prog ' + get_version_string())
parser.disable_interspersed_args()
parser.add_option('-l',
dest='username', metavar='USERNAME', default=None,
help='the user to log in as on the remote machine')
parser.add_option('-p', '--port',
type='int', dest='port', metavar='PORT', default=None,
help='the port to connect to')
parser.add_option('-q', '--quiet',
action='store_true', dest='quiet', default=False,
help='suppress any unnecessary output')
parser.add_option('-s',
dest='subsystem', metavar='SUBSYSTEM', default=None,
nargs=2,
help='the subsystem to use (ssh or sftp)')
parser.add_option('-V',
action='callback', callback=print_version,
help='display the version information and exit')
parser.add_option('--rb-disallow-agent',
action='store_false', dest='allow_agent',
default=os.getenv('RBSSH_ALLOW_AGENT') != '0',
help='disable using the SSH agent for authentication')
parser.add_option('--rb-local-site',
dest='local_site_name', metavar='NAME',
default=os.getenv('RB_LOCAL_SITE'),
help='the local site name containing the SSH keys to '
'use')
(options, args) = parser.parse_args(args)
if options.subsystem:
if len(options.subsystem) != 2:
parser.error('-s requires a hostname and a valid subsystem')
elif options.subsystem[1] not in ('sftp', 'ssh'):
parser.error('Invalid subsystem %s' % options.subsystem[1])
hostname, options.subsystem = options.subsystem
if len(args) == 0 and not hostname:
parser.print_help()
sys.exit(1)
if not hostname:
hostname = args[0]
args = args[1:]
if options.port:
port = options.port
else:
port = SSH_PORT
return hostname, port, args
def main():
if DEBUG:
pid = os.getpid()
log_filename = 'rbssh-%s.log' % pid
if DEBUG_LOGDIR:
log_path = os.path.join(DEBUG_LOGDIR, log_filename)
else:
log_path = log_filename
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-18s %(levelname)-8s '
'%(message)s',
datefmt='%m-%d %H:%M',
filename=log_path,
filemode='w')
logging.debug('%s' % sys.argv)
logging.debug('PID %s' % pid)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(logging.Formatter('%(message)s'))
ch.addFilter(logging.Filter('root'))
logging.getLogger('').addHandler(ch)
path, port, command = parse_options(sys.argv[1:])
if '://' not in path:
path = 'ssh://' + path
username, hostname = SCMTool.get_auth_from_uri(path, options.username)
if username is None:
username = getpass.getuser()
logging.debug('!!! %s, %s, %s' % (hostname, username, command))
client = SSHClient(namespace=options.local_site_name)
client.set_missing_host_key_policy(paramiko.WarningPolicy())
attempts = 0
password = None
key = client.get_user_key()
while True:
try:
client.connect(hostname, port, username=username,
password=password, pkey=key,
allow_agent=options.allow_agent)
break
except paramiko.AuthenticationException as e:
if attempts == 3 or not sys.stdin.isatty():
logging.error('Too many authentication failures for %s' %
username)
sys.exit(1)
attempts += 1
password = getpass.getpass("%s@%s's password: " %
(username, hostname))
except paramiko.SSHException as e:
logging.error('Error connecting to server: %s' % e)
sys.exit(1)
except Exception as e:
logging.error('Unknown exception during connect: %s (%s)' %
(e, type(e)))
sys.exit(1)
transport = client.get_transport()
channel = transport.open_session()
if sys.platform in ('cygwin', 'win32'):
logging.debug('!!! Using WindowsHandler')
handler = WindowsHandler(channel)
else:
logging.debug('!!! Using PosixHandler')
handler = PosixHandler(channel)
if options.subsystem == 'sftp':
logging.debug('!!! Invoking sftp subsystem')
channel.invoke_subsystem('sftp')
handler.transfer()
elif command:
logging.debug('!!! Sending command %s' % command)
channel.exec_command(' '.join(command))
handler.transfer()
else:
logging.debug('!!! Opening shell')
channel.get_pty()
channel.invoke_shell()
handler.shell()
logging.debug('!!! Done')
status = channel.recv_exit_status()
client.close()
return status
if __name__ == '__main__':
main()
# ... with blackjack, and hookers.
| |
'''Tests for VCDWriter.'''
import os
import sys
import timeit
import pytest
from vcd.writer import (
CompoundVectorVariable,
ScopeType,
TimescaleMagnitude,
TimescaleUnit,
Variable,
VCDPhaseError,
VCDWriter,
VectorVariable,
)
def split_lines(capsys):
return capsys.readouterr()[0].splitlines()
def test_vcd_init(capsys):
VCDWriter(sys.stdout, date='today')
with pytest.raises(ValueError):
VCDWriter(sys.stdout, default_scope_type='InVaLiD')
@pytest.mark.parametrize(
'timescale, expected',
[
('1 us', '1 us'),
('us', '1 us'),
((1, 'ns'), '1 ns'),
('100ps', '100 ps'),
((TimescaleMagnitude.ten, TimescaleUnit.femtosecond), '10 fs'),
],
)
def test_vcd_timescales(capsys, timescale, expected):
with VCDWriter(sys.stdout, date='', timescale=timescale):
pass
lines = split_lines(capsys)
assert lines == ['$timescale {} $end'.format(expected), '$enddefinitions $end']
@pytest.mark.parametrize(
'timescale, exc_type',
[
('2 us', ValueError),
('1 Gs', ValueError),
((), ValueError),
(('1', 'ns'), ValueError),
((1, 'us', 'ns'), ValueError),
(100, TypeError),
],
)
def test_vcd_timescale_invalid(capsys, timescale, exc_type):
with pytest.raises(exc_type):
VCDWriter(sys.stdout, timescale=timescale)
def test_vcd_init_empty_date(capsys):
with VCDWriter(sys.stdout, date=''):
pass
assert '$date' not in capsys.readouterr()[0]
def test_vcd_init_none_date(capsys):
with VCDWriter(sys.stdout, date=None):
pass
assert '$date' in capsys.readouterr()[0]
def test_vcd_flush(capsys):
vcd = VCDWriter(sys.stdout, date='today')
assert not split_lines(capsys)
vcd.flush(17)
lines = split_lines(capsys)
assert lines[-1] == '#17'
def test_vcd_close(capsys):
vcd = VCDWriter(sys.stdout, date='')
assert not split_lines(capsys)
vcd.close()
lines = split_lines(capsys)
assert lines == ['$timescale 1 us $end', '$enddefinitions $end']
with pytest.raises(VCDPhaseError):
vcd.register_var('a', 'b', 'integer')
vcd.close() # Idempotency test
assert not split_lines(capsys)
def test_vcd_change_after_close(capsys):
vcd = VCDWriter(sys.stdout, date='')
var = vcd.register_var('a', 'b', 'integer')
assert not split_lines(capsys)
vcd.close()
with pytest.raises(VCDPhaseError):
vcd.change(var, 1, 1)
with pytest.raises(VCDPhaseError):
vcd.flush()
def test_vcd_alias_after_close(capsys):
vcd = VCDWriter(sys.stdout)
var = vcd.register_var('a', 'b', 'integer')
assert not split_lines(capsys)
vcd.close()
with pytest.raises(VCDPhaseError):
vcd.register_alias('c', 'd', var)
def test_vcd_no_scopes(capsys):
with VCDWriter(sys.stdout, date='today', version='some\nversion', comment='hello'):
pass
lines = split_lines(capsys)
expected_lines = [
'$comment hello $end',
'$date today $end',
'$timescale 1 us $end',
'$version',
'\tsome',
'\tversion',
'$end',
'$enddefinitions $end',
]
assert expected_lines == lines
def test_vcd_one_var(capsys):
with VCDWriter(sys.stdout, date='today') as vcd:
var = vcd.register_var('sss', 'nnn', 'integer', 32)
vcd.change(var, 0, 0)
vcd.change(var, 1, 10)
lines = split_lines(capsys)
assert '$var integer 32 ! nnn $end' in lines
assert lines[-1] == 'b1010 !'
def test_vcd_invalid_vector_init():
with VCDWriter(sys.stdout) as vcd:
with pytest.raises(ValueError):
vcd.register_var('scope', 'a', 'integer', 8, init='eight')
with pytest.raises(ValueError):
vcd.register_var('scope', 'b', 'integer', 8, init=8.0)
def test_vcd_no_duplicates(capsys):
with VCDWriter(sys.stdout, date='today') as vcd:
var = vcd.register_var('sss', 'nnn', 'integer', 32)
vcd.change(var, 0, 'x')
vcd.change(var, 1, 10)
vcd.change(var, 2, 10)
vcd.change(var, 3, 10)
vcd.change(var, 4, 15)
vcd.change(var, 5, 15)
vcd.change(var, 6, 10)
assert split_lines(capsys) == [
'$date today $end',
'$timescale 1 us $end',
'$scope module sss $end',
'$var integer 32 ! nnn $end',
'$upscope $end',
'$enddefinitions $end',
'#0',
'$dumpvars',
'bx !',
'$end',
'#1',
'b1010 !',
'#4',
'b1111 !',
'#6',
'b1010 !',
]
def test_vcd_aliases(capsys):
with VCDWriter(sys.stdout, date='today') as vcd:
var = vcd.register_var('sss', 'nnn', 'integer', 32)
vcd.register_alias('sss', 'mmm', var)
vcd.register_alias('ttt', 'nnn', var)
vcd.change(var, 0, 'x')
vcd.change(var, 1, 10)
vcd.change(var, 2, 11)
assert split_lines(capsys) == [
'$date today $end',
'$timescale 1 us $end',
'$scope module sss $end',
'$var integer 32 ! nnn $end',
'$var integer 32 ! mmm $end',
'$upscope $end',
'$scope module ttt $end',
'$var integer 32 ! nnn $end',
'$upscope $end',
'$enddefinitions $end',
'#0',
'$dumpvars',
'bx !',
'$end',
'#1',
'b1010 !',
'#2',
'b1011 !',
]
def test_vcd_scopes(capsys):
with VCDWriter(sys.stdout, date='today') as vcd:
vcd.set_scope_type('eee.fff.ggg', ScopeType.task)
vcd.register_var('aaa.bbb', 'nn0', 'integer', 8, init='z')
vcd.register_var('aaa.bbb', 'nn1', 'integer', 8)
vcd.register_var('aaa', 'nn2', 'integer', 8)
vcd.register_var('aaa.bbb.ccc', 'nn3', 'integer', 8)
vcd.register_var('aaa.bbb.ddd', 'nn4', 'integer', 8)
vcd.register_var('eee.fff.ggg', 'nn5', 'integer', 8)
vcd.set_scope_type('aaa.bbb', 'fork')
with pytest.raises(TypeError):
vcd.set_scope_type({'a', 'b', 'c'}, 'module')
expected_lines = [
'$date',
'$timescale',
'$scope module aaa',
'$var',
'$scope fork bbb',
'$var',
'$var',
'$scope module ccc',
'$var',
'$upscope',
'$scope module ddd',
'$var',
'$upscope',
'$upscope',
'$upscope',
'$scope module eee',
'$scope module fff',
'$scope task ggg',
'$var',
'$upscope',
'$upscope',
'$upscope',
'$enddefinitions',
'#0',
'$dumpvars',
'bz !',
'bx "',
'bx #',
'bx $',
'bx %',
'bx &',
'$end',
]
for line, expected in zip(split_lines(capsys), expected_lines):
print(line, '|', expected)
assert line.startswith(expected)
def test_vcd_init_timestamp(capsys):
with VCDWriter(sys.stdout, date='today', init_timestamp=123) as vcd:
vcd.register_var('a', 'n', 'integer', 8, init='z')
expected_lines = [
'$date',
'$timescale',
'$scope module a',
'$var integer 8 ! n $end',
'$upscope',
'$enddefinitions',
'#123',
'$dumpvars',
'bz !',
'$end',
]
for line, expected in zip(split_lines(capsys), expected_lines):
print(line, '|', expected)
assert line.startswith(expected)
def test_vcd_scope_tuple(capsys):
with VCDWriter(sys.stdout, date='today') as vcd:
vcd.register_var(('aaa',), 'nn0', 'integer', 8)
vcd.register_var(('aaa', 'bbb'), 'nn1', 'integer', 8)
vcd.register_var('aaa.bbb.ccc', 'nn2', 'integer', 8)
lines = split_lines(capsys)
for line, expected in zip(
lines,
[
'$date',
'$timescale',
'$scope module aaa',
'$var',
'$scope module bbb',
'$var',
'$scope module ccc',
'$var',
],
):
assert line.startswith(expected)
def test_vcd_late_registration(capsys):
with VCDWriter(sys.stdout, date='today') as vcd:
var0 = vcd.register_var('aaa.bbb', 'nn0', 'integer', 8)
vcd.change(var0, 0, 123)
# Still at t0, registration okay...
vcd.register_var('aaa.bbb', 'nn1', 'integer', 8)
vcd.change(var0, 1, 210)
with pytest.raises(VCDPhaseError):
vcd.register_var('aaa.bbb', 'nn2', 'integer', 8)
def test_vcd_late_alias_registration(capsys):
with VCDWriter(sys.stdout, date='today') as vcd:
var0 = vcd.register_var('aaa.bbb', 'nn0', 'integer', 8)
vcd.change(var0, 0, 123)
# Still at t0, registration okay...
vcd.register_alias('aaa.bbb', 'nn1', var0)
vcd.change(var0, 1, 210)
with pytest.raises(VCDPhaseError):
vcd.register_alias('aaa.bbb', 'nn2', var0)
def test_vcd_missing_size(capsys):
with VCDWriter(sys.stdout, date='today') as vcd:
with pytest.raises(ValueError):
vcd.register_var('a.b.c', 'name', 'wire', size=None)
def test_vcd_invalid_var_type(capsys):
with VCDWriter(sys.stdout, date='today') as vcd:
with pytest.raises(ValueError):
vcd.register_var('aaa.bbb', 'nn0', 'InVaLiD', 8)
def test_vcd_invalid_scope_type(capsys):
with VCDWriter(sys.stdout, date='today') as vcd:
with pytest.raises(ValueError):
vcd.set_scope_type('aaa.bbb', 'InVaLiD')
def test_vcd_duplicate_var_name(capsys):
with VCDWriter(sys.stdout, date='today') as vcd:
vcd.register_var('aaa.bbb', 'nn0', 'integer', 8)
with pytest.raises(KeyError):
vcd.register_var('aaa.bbb', 'nn0', 'wire', 1)
def test_vcd_duplicate_alias(capsys):
with VCDWriter(sys.stdout, date='today') as vcd:
var = vcd.register_var('aaa.bbb', 'nn0', 'integer', 8)
vcd.register_alias('aaa.bbb', 'nn1', var)
with pytest.raises(KeyError):
vcd.register_alias('aaa.bbb', 'nn0', var)
with pytest.raises(KeyError):
vcd.register_alias('aaa.bbb', 'nn1', var)
def test_vcd_change_out_of_order(capsys):
with VCDWriter(sys.stdout, date='') as vcd:
var = vcd.register_var('scope', 'a', 'wire', 1)
vcd.change(var, 3, True)
with pytest.raises(VCDPhaseError):
vcd.change(var, 1, False)
def test_vcd_register_int(capsys):
with VCDWriter(sys.stdout, date='') as vcd:
vcd.register_var('scope', 'a', 'integer')
out = capsys.readouterr()[0]
assert '$var integer 64 ! a $end' in out
assert 'bx !' in out
def test_vcd_register_int_tuple(capsys):
with VCDWriter(sys.stdout, date='') as vcd:
vcd.register_var('scope', 'a', 'integer', (8, 4, 1))
out = capsys.readouterr()[0]
assert '$var integer 13 ! a $end' in out
assert 'bx !' in out
def test_vcd_register_int_tuple_invalid_init_type():
with VCDWriter(sys.stdout, date='') as vcd:
with pytest.raises(ValueError):
vcd.register_var('scope', 'a', 'integer', (8, 4, 1), 0)
def test_vcd_register_int_tuple_invalid_init_len():
with VCDWriter(sys.stdout, date='') as vcd:
with pytest.raises(ValueError):
vcd.register_var('scope', 'a', 'integer', (8, 4, 1), (0, 0, 0, 0))
def test_vcd_register_int_tuple_invalid_init_values():
with VCDWriter(sys.stdout, date='') as vcd:
with pytest.raises(ValueError):
vcd.register_var('scope', 'a', 'integer', (8, 4, 1), (1.0, 0, 0))
def test_vcd_register_real(capsys):
with VCDWriter(sys.stdout, date='') as vcd:
vcd.register_var('scope', 'a', 'real')
vcd.register_var('scope', 'b', 'real', init=123)
vcd.register_var('scope', 'c', 'real', init=1.23)
with pytest.raises(ValueError):
vcd.register_var('scope', 'f', 'real', init='real')
expected_last = [
'$scope module scope $end',
'$var real 64 ! a $end',
'$var real 64 " b $end',
'$var real 64 # c $end',
'$upscope $end',
'$enddefinitions $end',
'#0',
'$dumpvars',
'r0 !',
'r123 "',
'r1.23 #',
'$end',
]
lines = split_lines(capsys)
assert lines[-len(expected_last) :] == expected_last
def test_vcd_register_event(capsys):
with VCDWriter(sys.stdout, date='') as vcd:
vcd.register_var('scope', 'a', 'event')
vcd.register_var('scope', 'b', 'event', init=True)
with pytest.raises(ValueError):
vcd.register_var('scope', 'f', 'event', init='yes')
expected_last = [
'$scope module scope $end',
'$var event 1 ! a $end',
'$var event 1 " b $end',
'$upscope $end',
'$enddefinitions $end',
'#0',
'$dumpvars',
'$end',
]
lines = split_lines(capsys)
assert lines[-len(expected_last) :] == expected_last
def test_vcd_bad_event():
with VCDWriter(sys.stdout, date='') as vcd:
var = vcd.register_var('scope', 'a', 'event')
vcd.change(var, 1, True)
with pytest.raises(ValueError):
vcd.change(var, 2, False)
def test_vcd_multiple_events(capsys):
with VCDWriter(sys.stdout, date='') as vcd:
var = vcd.register_var('scope', 'a', 'event')
vcd.change(var, 1, True)
vcd.change(var, 2, True)
vcd.change(var, 2, True)
vcd.change(var, 2, True)
vcd.change(var, 3, True)
expected_lines = [
'$timescale 1 us $end',
'$scope module scope $end',
'$var event 1 ! a $end',
'$upscope $end',
'$enddefinitions $end',
'#0',
'$dumpvars',
'$end',
'#1',
'1!',
'#2',
'1!',
'1!',
'1!',
'#3',
'1!',
]
assert expected_lines == split_lines(capsys)
def test_vcd_scalar_var(capsys):
with VCDWriter(sys.stdout, date='today') as vcd:
v0 = vcd.register_var('aaa', 'nn0', 'integer', 1)
vcd.register_var('aaa', 'nn1', 'integer', 1, False)
with pytest.raises(ValueError):
vcd.register_var('aaa', 'fff', 'integer', 1, init=1.23)
vcd.change(v0, 1, True)
vcd.change(v0, 2, False)
vcd.change(v0, 3, 'z')
vcd.change(v0, 4, 'x')
vcd.change(v0, 5, 0)
vcd.change(v0, 6, 1)
with pytest.raises(ValueError):
vcd.change(v0, 7, 'bogus')
vcd.change(v0, 7, None)
lines = split_lines(capsys)
expected = [
'$enddefinitions $end',
'#0',
'$dumpvars',
'x!',
'0"',
'$end',
'#1',
'1!',
'#2',
'0!',
'#3',
'z!',
'#4',
'x!',
'#5',
'0!',
'#6',
'1!',
'#7',
'z!',
]
assert lines[-len(expected) :] == expected
def test_vcd_real_var(capsys):
with VCDWriter(sys.stdout, date='today') as vcd:
v0 = vcd.register_var('aaa', 'nn0', 'real', 32)
v1 = vcd.register_var('aaa', 'nn1', 'real', 64)
vcd.change(v0, 1, 1234.5)
vcd.change(v1, 1, 5432.1)
vcd.change(v0, 2, 0)
vcd.change(v1, 2, 1)
vcd.change(v0, 3, 999.9)
vcd.change(v1, 3, -999.9)
with pytest.raises(ValueError):
vcd.change(v0, 4, 'z')
with pytest.raises(ValueError):
vcd.change(v0, 4, 'x')
with pytest.raises(ValueError):
vcd.change(v0, 4, 'InVaLiD')
lines = split_lines(capsys)
expected_last = [
'#1',
'r1234.5 !',
'r5432.1 "',
'#2',
'r0 !',
'r1 "',
'#3',
'r999.9 !',
'r-999.9 "',
]
assert lines[-len(expected_last) :] == expected_last
def test_vcd_integer_var(capsys):
with VCDWriter(sys.stdout, date='today') as vcd:
v0 = vcd.register_var('aaa', 'nn0', 'integer', 16)
v1 = vcd.register_var('aaa', 'nn1', 'integer', 8)
vcd.change(v0, 1, 4)
vcd.change(v1, 1, -4)
vcd.change(v0, 2, 'z')
vcd.change(v1, 2, 'X')
vcd.change(v1, 3, None)
vcd.change(v0, 3, '1010')
with pytest.raises(ValueError):
vcd.change(v1, 4, -129)
with pytest.raises(ValueError):
vcd.change(v1, 4, '111100001') # Too long
with pytest.raises(ValueError):
vcd.change(v1, 4, 1.234)
expected_last = [
'#1',
'b100 !',
'b11111100 "',
'#2',
'bz !',
'bX "',
'#3',
'bz "',
'b1010 !',
]
lines = split_lines(capsys)
assert lines[-len(expected_last) :] == expected_last
def test_vcd_dump_on_no_op(capsys):
with VCDWriter(sys.stdout, date='today') as vcd:
v0 = vcd.register_var('scope', 'a', 'integer', 8)
vcd.dump_on(0) # Should be a no-op
vcd.change(v0, 1, 1)
vcd.dump_on(2) # Also a no-op
expected_lines = [
'$date today $end',
'$timescale 1 us $end',
'$scope module scope $end',
'$var integer 8 ! a $end',
'$upscope $end',
'$enddefinitions $end',
'#0',
'$dumpvars',
'bx !',
'$end',
'#1',
'b1 !',
]
assert expected_lines == split_lines(capsys)
def test_vcd_dump_off_early(capsys):
with VCDWriter(sys.stdout, date='today') as vcd:
v0 = vcd.register_var('scope', 'a', 'integer', 8, init=7)
vcd.dump_off(0)
vcd.change(v0, 5, 1)
vcd.dump_on(10)
vcd.change(v0, 15, 2)
expected_lines = [
'$date today $end',
'$timescale 1 us $end',
'$scope module scope $end',
'$var integer 8 ! a $end',
'$upscope $end',
'$enddefinitions $end',
'#0',
'$dumpvars',
'b111 !',
'$end',
'$dumpoff',
'bx !',
'$end',
'#10',
'$dumpon',
'b1 !',
'$end',
'#15',
'b10 !',
]
assert expected_lines == split_lines(capsys)
def test_vcd_dump_off_real(capsys):
with VCDWriter(sys.stdout, date='') as vcd:
v0 = vcd.register_var('scope', 'a', 'real')
vcd.change(v0, 1, 1.0)
vcd.dump_off(2)
vcd.change(v0, 3, 3.0)
vcd.dump_on(4)
vcd.change(v0, 5, 5.0)
assert v0.ident == '!'
expected_lines = [
'$timescale 1 us $end',
'$scope module scope $end',
'$var real 64 ! a $end',
'$upscope $end',
'$enddefinitions $end',
'#0',
'$dumpvars',
'r0 !',
'$end',
'#1',
'r1 !',
'#2',
'$dumpoff',
'$end',
'#4',
'$dumpon',
'r3 !',
'$end',
'#5',
'r5 !',
]
assert expected_lines == split_lines(capsys)
def test_vcd_dump_off_on(capsys):
with VCDWriter(sys.stdout, date='today') as vcd:
v0 = vcd.register_var('scope', 'a', 'integer', 8)
v1 = vcd.register_var('scope', 'b', 'wire', 1)
v2 = vcd.register_var('scope', 'c', 'event')
v3 = vcd.register_var('scope', 'd', 'real', init=1.23)
vcd.change(v0, 1, 10)
vcd.change(v1, 2, True)
vcd.dump_off(4)
vcd.dump_off(5) # Idempotent
vcd.change(v0, 6, 11)
vcd.change(v1, 7, False)
vcd.change(v2, 8, True) # should not show up in next dump
vcd.dump_on(9)
vcd.dump_off(10)
vcd.dump_on(10)
vcd.change(v0, 11, 12)
vcd.change(v1, 11, True)
vcd.change(v3, 11, 3.21)
expected_lines = [
'$date today $end',
'$timescale 1 us $end',
'$scope module scope $end',
'$var integer 8 ! a $end',
'$var wire 1 " b $end',
'$var event 1 # c $end',
'$var real 64 $ d $end',
'$upscope $end',
'$enddefinitions $end',
'#0',
'$dumpvars',
'bx !',
'x"',
'r1.23 $',
'$end',
'#1',
'b1010 !',
'#2',
'1"',
'#4',
'$dumpoff',
'bx !',
'x"',
'$end',
'#9',
'$dumpon',
'b1011 !',
'0"',
'r1.23 $',
'$end',
'#10',
'$dumpoff',
'bx !',
'x"',
'$end',
'$dumpon',
'b1011 !',
'0"',
'r1.23 $',
'$end',
'#11',
'b1100 !',
'1"',
'r3.21 $',
]
assert expected_lines == split_lines(capsys)
def test_vcd_dump_off_time_order(capsys):
with VCDWriter(sys.stdout, date='today') as vcd:
v0 = vcd.register_var('scope', 'a', 'integer', 8)
vcd.dump_off(1)
with pytest.raises(VCDPhaseError):
vcd.dump_off(0)
assert v0.value == 'x'
vcd.change(v0, 1, 10)
expected_lines = [
'$date today $end',
'$timescale 1 us $end',
'$scope module scope $end',
'$var integer 8 ! a $end',
'$upscope $end',
'$enddefinitions $end',
'#0',
'$dumpvars',
'bx !',
'$end',
'#1',
'$dumpoff',
'bx !',
'$end',
]
assert expected_lines == split_lines(capsys)
def test_variable():
var = Variable('ident0', 'integer', 16, 0)
with pytest.raises(NotImplementedError):
var.format_value(0)
@pytest.mark.parametrize(
'expected, unsigned, signed',
[
('b0 v', 0, 0),
('b1 v', 1, 1),
('b10 v', 2, 2),
('b11 v', 3, 3),
('b100 v', 4, -4),
('b101 v', 5, -3),
('b110 v', 6, -2),
('b111 v', 7, -1),
],
)
def test_vector_var_3bit(expected, unsigned, signed):
var = VectorVariable('v', 'integer', 3, unsigned)
assert expected == var.format_value(unsigned)
assert expected == var.format_value(signed)
def test_vector_var_3bit_invalid():
var = VectorVariable('v', 'integer', 3, 0)
with pytest.raises(ValueError):
var.format_value(8)
with pytest.raises(ValueError):
var.format_value(-5)
@pytest.mark.parametrize(
'size, value, expected',
[
((8, 4, 1), (0, 0, 0), 'b0 v'),
((8, 4, 1), (1, 0, 0), 'b100000 v'),
((8, 4, 1), (0, 0, 1), 'b1 v'),
((8, 4, 1), (1, 1, 1), 'b100011 v'),
((8, 4, 1), ('z', 'x', '-'), 'bzxxxx- v'),
((8, 4, 1), ('0', '1', None), 'b1z v'),
((8, 4, 1), (0xF, 0, 1), 'b111100001 v'),
((8, 4, 1), (None, 'x', None), 'bzxxxxz v'),
((8,), (1,), 'b1 v'),
((8, 32), (0b1010, 0xFF00FF00), 'b101011111111000000001111111100000000 v'),
],
)
def test_compound_vector(size, value, expected):
var = CompoundVectorVariable('v', 'integer', size, value)
assert expected == var.format_value(value)
@pytest.mark.parametrize(
'size, value', [((1, 2, 3), (0, 0)), ((1, 2, 3), (0, 0, 0, 0)), ((1,), (0, 0))]
)
def test_compound_vector_invalid_values(size, value):
var = CompoundVectorVariable('v', 'integer', size, None)
with pytest.raises(ValueError):
var.format_value(value)
def test_dump_off_compound_vector(capsys):
with VCDWriter(sys.stdout) as vcd:
v0 = vcd.register_var('aaa', 'n0', 'integer', size=(4, 4, 8), init=None)
vcd.register_var('aaa', 'n1', 'integer', size=(4, 4, 8), init=('z', 'x', '-'))
vcd.register_var('aaa', 'n2', 'integer', size=(1, 1), init=(True, False))
v3 = vcd.register_var('aaa', 'n3', 'integer', size=(1, 2, 3), init='xxx')
with pytest.raises(ValueError):
vcd.register_var('aaa', 'n4', 'integer', size=(1, 2, 3), init=(1, 2))
vcd.change(v0, 1, (0, 0, 0))
vcd.change(v0, 2, (15, 0, 0xFF))
vcd.dump_off(3)
vcd.change(v3, 4, '1-1')
vcd.dump_on(5)
expected = [
'$var integer 16 ! n0 $end',
'$var integer 16 " n1 $end',
'$var integer 2 # n2 $end',
'$var integer 6 $ n3 $end',
'$upscope $end',
'$enddefinitions $end',
'#0',
'$dumpvars',
'bx !',
'bzxxxx-------- "',
'b10 #',
'bx $',
'$end',
'#1',
'b0 !',
'#2',
'b1111000011111111 !',
'#3',
'$dumpoff',
'bx !',
'bx "',
'bx #',
'bx $',
'$end',
'#5',
'$dumpon',
'b1111000011111111 !',
'bzxxxx-------- "',
'b10 #',
'b1--001 $',
'$end',
]
lines = split_lines(capsys)
assert expected == lines[-len(expected) :]
def test_vcd_string_var(capsys):
with VCDWriter(sys.stdout, date='today') as vcd:
v0 = vcd.register_var('aaa', 'nn0', 'string')
vcd.register_var('aaa', 'nn1', 'string', init='foobar')
with pytest.raises(ValueError):
vcd.register_var('aaa', 'fff', 'string', init=123)
vcd.change(v0, 1, 'hello')
vcd.change(v0, 2, '')
vcd.change(v0, 3, 'world')
vcd.change(v0, 4, 'spaces are\tok')
vcd.change(v0, 5, 'newlines\r\ntoo')
vcd.change(v0, 6, 'slash\\slash')
vcd.change(v0, 7, None)
vcd.change(v0, 8, '!')
with pytest.raises(ValueError) as e:
vcd.change(v0, 9, 42)
assert 'Invalid string value' in e.value.args[0]
vcd.dump_off(8)
expected = [
'#0',
'$dumpvars',
's !',
'sfoobar "',
'$end',
'#1',
'shello !',
'#2',
's !',
'#3',
'sworld !',
'#4',
'sspaces\\x20are\\tok !',
'#5',
'snewlines\\r\\ntoo !',
'#6',
'sslash\\\\slash !',
'#7',
's !',
'#8',
's! !',
'$dumpoff',
'$end',
]
lines = split_lines(capsys)
assert expected == lines[-len(expected) :]
def test_execution_speed():
"""Manual test for how fast we can write to a VCD file
See https://github.com/westerndigitalcorporation/pyvcd/issues/9
pytest -vvs -k test_execution_speed
"""
t0 = timeit.default_timer()
with open(os.devnull, 'w') as f:
with VCDWriter(f, timescale=(10, 'ns'), date='today') as writer:
counter_var = writer.register_var('a.b.c', 'counter', 'integer', size=8)
compound_var = writer.register_var(
'a.b.c', 'compound', 'integer', size=(1, 3, 4)
)
for i in range(1000, 300000, 300):
for timestamp, value in enumerate(range(10, 200, 2)):
writer.change(counter_var, i + timestamp, value)
writer.change(
compound_var,
i + timestamp,
(timestamp & 0b1, timestamp & 0b111, timestamp & 0b1111),
)
elapsed = timeit.default_timer() - t0
print('Elapsed:', elapsed)
| |
'''@file trainer.py
neural network trainer environment'''
import os
import time
import cPickle as pickle
from abc import ABCMeta, abstractmethod
from math import ceil
import tensorflow as tf
from tensorflow.python.client import device_lib
from nabu.processing import input_pipeline
from nabu.neuralnetworks.trainers import loss_functions
from nabu.neuralnetworks.models.model import Model
from nabu.neuralnetworks.evaluators import evaluator_factory
from nabu.neuralnetworks.components import hooks, ops, constraints
from nabu.tools.default_conf import apply_defaults
class Trainer(object):
'''General class outlining the training environment of a model.'''
__metaclass__ = ABCMeta
def __init__(self,
conf,
dataconf,
modelconf,
evaluatorconf,
expdir,
server,
task_index):
'''
NnetTrainer constructor, creates the training graph
Args:
conf: the trainer config as a ConfigParser
dataconf: the data configuration as a ConfigParser
modelconf: the neural net model configuration
evaluatorconf: the evaluator configuration for evaluating
if None no evaluation will be done
expdir: directory where the summaries will be written
server: optional server to be used for distributed training
task_index: optional index of the worker task in the cluster
'''
#save some inputs
self.conf = dict(conf.items('trainer'))
#apply default configuration
default = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'defaults',
type(self).__name__.lower() + '.cfg')
apply_defaults(self.conf, default)
self.dataconf = dataconf
self.evaluatorconf = evaluatorconf
self.expdir = expdir
self.server = server
self.task_index = task_index
if ('norm_constraint' in self.conf
and self.conf['norm_constraint'] != 'None'):
constraint = constraints.MaxNorm(int(self.conf['norm_constraint']))
else:
constraint = None
#create the model
self.model = Model(
conf=modelconf,
trainlabels=int(self.conf['trainlabels']),
constraint=constraint)
def _create_graph(self):
'''
create the trainer computational graph
Returns:
- a dictionary of graph outputs
'''
cluster = tf.train.ClusterSpec(self.server.server_def.cluster)
#the outputs of the graph
outputs = {}
device, chief_ps = self._device(cluster)
#a variable to hold the amount of steps already taken
outputs['global_step'] = tf.get_variable(
name='global_step',
shape=[],
dtype=tf.int32,
initializer=tf.constant_initializer(0),
trainable=False)
outputs['increment_step'] = outputs['global_step'].assign_add(1).op
should_terminate = tf.get_variable(
name='should_terminate',
shape=[],
dtype=tf.bool,
initializer=tf.constant_initializer(False),
trainable=False)
outputs['terminate'] = should_terminate.assign(True).op
#create an op that measures the memory usage
if [x for x in device_lib.list_local_devices()
if x.device_type == 'GPU']:
outputs['memory_usage']\
= tf.contrib.memory_stats.MaxBytesInUse()
outputs['memory_limit'] = tf.contrib.memory_stats.BytesLimit()
else:
outputs['memory_usage'] = outputs['memory_limit'] = tf.no_op()
with tf.device(device):
#training part
with tf.variable_scope('train'):
#create the op to execute when done
outputs['done'] = self._done(cluster)
#get a batch of data
(inputs,
input_seq_length,
targets,
target_seq_length,
num_steps,
outputs['read_data'],
outputs['local_steps']) = self._data(chief_ps)
outputs['num_steps'] \
= num_steps*int(self.conf['num_epochs'])
#create a check if training should continue
outputs['should_stop'] = tf.logical_or(
tf.greater_equal(
outputs['global_step'],
outputs['num_steps']),
should_terminate)
#compute the training outputs of the model
logits, logit_seq_length = self.model(
inputs=inputs,
input_seq_length=input_seq_length,
targets=targets,
target_seq_length=target_seq_length,
is_training=True)
#a variable to scale the learning rate (used to reduce the
#learning rate in case validation performance drops)
learning_rate_fact = tf.get_variable(
name='learning_rate_fact',
shape=[],
initializer=tf.constant_initializer(1.0),
trainable=False)
#compute the learning rate with exponential decay and scale
#with the learning rate factor
outputs['learning_rate'] = (tf.train.exponential_decay(
learning_rate=float(self.conf['initial_learning_rate']),
global_step=outputs['global_step'],
decay_steps=outputs['num_steps'],
decay_rate=float(self.conf['learning_rate_decay']))
* learning_rate_fact)
#compute the loss
outputs['loss'] = loss_functions.factory(
self.conf['loss'])(
targets,
logits,
logit_seq_length,
target_seq_length)
aditional_loss = self.aditional_loss()
if aditional_loss is not None:
outputs['loss'] += aditional_loss
#add te resularization losses
outputs['loss'] += tf.reduce_sum(
tf.losses.get_regularization_losses())
outputs['update_op'] = self._update(
loss=outputs['loss'],
learning_rate=outputs['learning_rate'],
cluster=cluster)
if self.evaluatorconf.get('evaluator', 'evaluator') != 'None':
#validation part
with tf.variable_scope('validate'):
#create a variable to save the last step where the model
#was validated
validated_step = tf.get_variable(
name='validated_step',
shape=[],
dtype=tf.int32,
initializer=tf.constant_initializer(
-int(self.conf['valid_frequency'])),
trainable=False)
#a check if validation is due
outputs['should_validate'] = tf.greater_equal(
outputs['global_step'] - validated_step,
int(self.conf['valid_frequency']))
with tf.variable_scope('validation'):
outputs['validation_loss'], outputs['update_loss'], \
outputs['valbatches'] = self._validate()
#update the learning rate factor
outputs['half_lr'] = learning_rate_fact.assign(
learning_rate_fact/2).op
#create an operation to updated the validated step
outputs['update_validated_step'] \
= validated_step.assign(
outputs['global_step']).op
#variable to hold the best validation loss so far
outputs['best_validation'] = tf.get_variable(
name='best_validation',
shape=[],
dtype=tf.float32,
initializer=tf.constant_initializer(1.79e+308),
trainable=False)
#op to update the best velidation loss
outputs['update_best'] \
= outputs['best_validation'].assign(
outputs['validation_loss']).op
#a variable that holds the amount of workers at the
#validation point
waiting_workers = tf.get_variable(
name='waiting_workers',
shape=[],
dtype=tf.int32,
initializer=tf.constant_initializer(0),
trainable=False)
#an operation to signal a waiting worker
outputs['waiting'] = waiting_workers.assign_add(1).op
#an operation to set the waiting workers to zero
outputs['reset_waiting'] = waiting_workers.initializer
#an operation to check if all workers are waiting
if 'local' in cluster.as_dict():
outputs['all_waiting'] = tf.constant(True)
else:
outputs['all_waiting'] = tf.equal(
waiting_workers,
len(cluster.as_dict()['worker'])-1)
outputs['val_loss_summary'] = tf.summary.scalar(
'validation loss',
outputs['validation_loss'])
#create an operation to initialize validation
outputs['init_validation'] = tf.variables_initializer(
tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
'validate/validation'))
else:
outputs['update_loss'] = None
tf.summary.scalar('learning rate', outputs['learning_rate'],
collections=['training_summaries'])
#create a histogram for all trainable parameters
for param in tf.trainable_variables():
tf.summary.histogram(param.name, param,
collections=['training_summaries'])
outputs['training_summaries'] = tf.summary.merge_all(
'training_summaries')
outputs['eval_summaries'] = tf.summary.merge_all('eval_summaries')
if outputs['eval_summaries'] is None:
outputs['eval_summaries'] = tf.no_op()
return outputs
def _data(self, chief_ps):
'''
create the input pipeline
args:
-chief_ps: the chief parameter server device
returns:
- the inputs
- the input sequence lengths
- the targets
- the target sequence lengths
- the number of global steps in an epoch
- an operation to read a batch data
- the number of local steps in this step
'''
with tf.name_scope('get_batch'):
#get the database configurations
input_names = self.model.conf.get('io', 'inputs').split(' ')
if input_names == ['']:
input_names = []
input_sections = [self.conf[i].split(' ') for i in input_names]
input_dataconfs = []
for sectionset in input_sections:
input_dataconfs.append([])
for section in sectionset:
input_dataconfs[-1].append(
dict(self.dataconf.items(section)))
output_names = self.conf['targets'].split(' ')
if output_names == ['']:
output_names = []
target_sections = [self.conf[o].split(' ') for o in output_names]
target_dataconfs = []
for sectionset in target_sections:
target_dataconfs.append([])
for section in sectionset:
target_dataconfs[-1].append(
dict(self.dataconf.items(section)))
#check if running in distributed model
if chief_ps is None:
#get the filenames
data_queue_elements, _ = input_pipeline.get_filenames(
input_dataconfs + target_dataconfs)
#create the data queue and queue runners
data_queue = tf.train.string_input_producer(
string_tensor=data_queue_elements,
shuffle=True,
seed=None,
capacity=int(self.conf['batch_size'])*2,
shared_name='data_queue')
else:
with tf.device(chief_ps):
#get the data queue
data_queue = tf.FIFOQueue(
capacity=int(self.conf['batch_size'])*2,
shared_name='data_queue',
name='data_queue',
dtypes=[tf.string],
shapes=[[]])
#create the input pipeline
data, seq_length, num_steps, max_length = \
input_pipeline.input_pipeline(
data_queue=data_queue,
batch_size=int(self.conf['batch_size']),
numbuckets=int(self.conf['numbuckets']),
dataconfs=input_dataconfs + target_dataconfs,
variable_batch_size=(
self.conf['variable_batch_size'] == 'True')
)
if int(self.conf['cut_sequence_length']):
#make sure that all the sequence lengths are the same
assertops = [tf.assert_equal(seq_length[0], l)
for l in seq_length]
with tf.control_dependencies(assertops):
#cut each data component
read_ops = []
components = []
component_lengths = []
for i, batch in enumerate(data):
cut, cut_length, read_op, num_local_steps = \
_cut_sequence(
batch,
seq_length[i],
int(self.conf['cut_sequence_length']),
max_length)
components.append(cut)
component_lengths.append(cut_length)
read_ops.append(read_op)
else:
num_local_steps = tf.constant(1)
queues = [tf.FIFOQueue(1, b.dtype) for b in data]
length_queues = [tf.FIFOQueue(1, b.dtype) for b in seq_length]
components = [q.dequeue() for q in queues]
component_lengths = [q.dequeue() for q in length_queues]
for i, c in enumerate(components):
c.set_shape(data[i].shape)
component_lengths[i].set_shape(seq_length[i].shape)
#create an op to read the data into the queues
read_ops = [q.enqueue(data[i]) for i, q in enumerate(queues)]
read_ops += [q.enqueue(seq_length[i])
for i, q in enumerate(length_queues)]
#create an op for reading the data
read_data = tf.group(*read_ops)
inputs = {
input_names[i]: d
for i, d in enumerate(components[:len(input_sections)])}
input_seq_length = {
input_names[i]: d
for i, d in enumerate(component_lengths[:len(input_sections)])}
targets = {
output_names[i]: d
for i, d in enumerate(components[len(input_sections):])}
target_seq_length = {
output_names[i]: d
for i, d in enumerate(component_lengths[len(input_sections):])}
return (inputs,
input_seq_length,
targets,
target_seq_length,
num_steps,
read_data,
num_local_steps)
def _done(self, cluster):
'''
create the op to run when finished
args:
cluster: the tf cluster
returns: the done op
'''
if 'local' in cluster.as_dict():
done = tf.no_op()
else:
#get the done queues
num_servers = len(cluster.as_dict()['ps'])
num_replicas = len(cluster.as_dict()['worker'])
done_ops = []
for i in range(num_servers):
with tf.device('job:ps/task:%d' % i):
done_queue = tf.FIFOQueue(
capacity=num_replicas,
dtypes=[tf.bool],
shapes=[[]],
shared_name='done_queue%d' % i,
name='done_queue%d' % i
)
done_ops.append(done_queue.enqueue(True))
done = tf.group(*done_ops)
return done
def _validate(self):
'''
get the validation loss
returns:
- the validation loss
- an op to update the validation loss
- the number of validation batches
'''
#create the evaluator
evaltype = self.evaluatorconf.get('evaluator', 'evaluator')
if evaltype != 'None':
evaluator = evaluator_factory.factory(evaltype)(
conf=self.evaluatorconf,
dataconf=self.dataconf,
model=self.model
)
return evaluator.evaluate()
def _device(self, cluster):
'''
get the device
args:
cluster: a tf cluster
returns:
- the device specification
- the chief paramater server device
'''
if 'local' in cluster.as_dict():
device = tf.DeviceSpec(job='local')
chief_ps = None
else:
#distributed training
num_servers = len(cluster.as_dict()['ps'])
ps_strategy = tf.contrib.training.GreedyLoadBalancingStrategy(
num_tasks=num_servers,
load_fn=tf.contrib.training.byte_size_load_fn
)
device = tf.train.replica_device_setter(
ps_tasks=num_servers,
ps_strategy=ps_strategy,
worker_device='/job:worker/task:%d' % self.task_index,
cluster=cluster)
chief_ps = tf.DeviceSpec(
job='ps',
task=0)
return device, chief_ps
def _update(self, loss, learning_rate, cluster):
'''
create the op to update the model
args:
loss: the loss to minimize
learning_rate: the learning rate
cluster: the tf cluster
returns: the update op
'''
#create the optimizer
optimizer = tf.train.AdamOptimizer(learning_rate)
#create an optimizer that aggregates gradients
if int(self.conf['numbatches_to_aggregate']) > 0:
if 'local' in cluster.as_dict():
num_workers = 1
else:
num_workers = len(cluster.as_dict()['worker'])
optimizer = tf.train.SyncReplicasOptimizer(
opt=optimizer,
replicas_to_aggregate=int(
self.conf['numbatches_to_aggregate']),
total_num_replicas=num_workers)
tf.summary.scalar('training_loss', loss,
collections=['training_summaries'])
#get the list of trainable variables
trainable = tf.trainable_variables()
#get the list of variables to be removed from the trainable
#variables
untrainable = tf.get_collection('untrainable')
#remove the variables
trainable = [var for var in trainable
if var not in untrainable]
#compute the gradients
grads_and_vars = optimizer.compute_gradients(
loss=loss,
var_list=trainable)
with tf.variable_scope('clip'):
#clip the gradients
grads_and_vars = [(tf.clip_by_value(grad, -1., 1.), var)
for grad, var in grads_and_vars]
#opperation to apply the gradients
apply_gradients_op = optimizer.apply_gradients(
grads_and_vars=grads_and_vars,
name='apply_gradients')
#all remaining operations with the UPDATE_OPS GraphKeys
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
#create an operation to update the gradients, the batch_loss
#and do all other update ops
update_op = tf.group(
*([apply_gradients_op] + update_ops),
name='update')
return update_op
def train(self, testing=False):
'''train the model
args:
testing: if true only the graph will be created for debugging
purposes
'''
#look for the master if distributed training is done
master = self.server.target
#start the session and standart servises
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
#number of times validation performance was worse
num_tries = 0
#check if this is the chief worker
is_chief = self.task_index == 0
#create the graph
graph = tf.Graph()
with graph.as_default():
outputs = self._create_graph()
scaffold = tf.train.Scaffold()
if testing:
return
with graph.as_default():
#create a hook for saving the final model
save_hook = hooks.SaveAtEnd(
os.path.join(self.expdir, 'model', 'network.ckpt'),
self.model.variables)
#create a hook for saving and restoring the validated model
validation_hook = hooks.ValidationSaveHook(
os.path.join(self.expdir, 'logdir', 'validated.ckpt'),
self.model)
with tf.train.MonitoredTrainingSession(
master=master,
is_chief=is_chief,
checkpoint_dir=os.path.join(self.expdir, 'logdir'),
scaffold=scaffold,
hooks=[hooks.StopHook(outputs['done'])] + self.hooks(outputs),
chief_only_hooks=[save_hook, validation_hook] \
+ self.chief_only_hooks(outputs),
config=config) as sess:
#create the summary writer
summary_writer = tf.summary.FileWriter(
os.path.join(self.expdir, 'logdir'), graph)
#start the training loop
#pylint: disable=E1101
while not (sess.should_stop() or
outputs['should_stop'].eval(session=sess)):
#check if validation is due
if (outputs['update_loss'] is not None
and outputs['should_validate'].eval(session=sess)):
if is_chief:
print ('WORKER %d: validating model'
% self.task_index)
#get the previous validation loss
prev_val_loss = outputs['best_validation'].eval(
session=sess)
#initialize validation
outputs['init_validation'].run(session=sess)
#compute the validation loss
for i in range(outputs['valbatches']):
_, summary = sess.run(fetches=[
outputs['update_loss'],
outputs['eval_summaries']])
if summary is not None:
summary_writer.add_summary(summary, i)
summary, global_step = sess.run(fetches=[
outputs['val_loss_summary'],
outputs['global_step']
])
summary_writer.add_summary(summary, global_step)
#get the current validation loss
validation_loss = outputs['validation_loss'].eval(
session=sess)
print ('WORKER %d: validation loss: %f' %
(self.task_index, validation_loss))
#check if the validation loss is better
if validation_loss >= prev_val_loss:
print ('WORKER %d: validation loss is worse' %
self.task_index)
#check how many times validation performance was
#worse
if self.conf['num_tries'] != 'None':
if num_tries == int(self.conf['num_tries']):
validation_hook.restore()
print ('WORKER %d: terminating training'
% self.task_index)
outputs['terminate'].run(session=sess)
break
num_tries += 1
if self.conf['go_back'] == 'True':
#wait untill all workers are at validation
#point
while not outputs['all_waiting'].eval(
session=sess):
time.sleep(1)
outputs['reset_waiting'].run(session=sess)
print ('WORKER %d: loading previous model'
% self.task_index)
#load the previous model
validation_hook.restore()
else:
outputs['update_validated_step'].run(
session=sess)
if self.conf['valid_adapt'] == 'True':
print ('WORKER %d: halving learning rate'
% self.task_index)
outputs['half_lr'].run(session=sess)
validation_hook.save()
else:
if self.conf['reset_tries'] == 'True':
num_tries = 0
#set the validated step
outputs['update_validated_step'].run(
session=sess)
outputs['update_best'].run(session=sess)
outputs['reset_waiting'].run(session=sess)
#store the validated model
validation_hook.save()
else:
if (self.conf['go_back'] == 'True'
and self.update_loss is not None):
outputs['waiting'].run(session=sess)
while (
outputs['should_validate'].eval(
session=sess)
and not
outputs['should_stop'].eval(
session=sess)):
time.sleep(1)
if outputs['should_stop'].eval(session=sess):
break
#start time
start = time.time()
#read in the next batch of data
local_steps, _ = sess.run([outputs['local_steps'],
outputs['read_data']])
for _ in range(local_steps):
#update the model
_, loss, lr, global_step, memory, limit, summary = \
sess.run(
fetches=[outputs['update_op'],
outputs['loss'],
outputs['learning_rate'],
outputs['global_step'],
outputs['memory_usage'],
outputs['memory_limit'],
outputs['training_summaries']])
summary_writer.add_summary(summary, global_step)
if memory is not None:
memory_line = '\n\t peak memory usage: %d/%d MB' % (
memory/1e6,
limit/1e6
)
else:
memory_line = ''
print(('WORKER %d: step %d/%d loss: %f, learning rate:'
' %f \n\t time elapsed: %f sec%s')
%(self.task_index,
global_step,
outputs['num_steps'],
loss, lr, time.time()-start,
memory_line))
outputs['increment_step'].run(session=sess)
#store the model file
modelfile = os.path.join(self.expdir, 'model', 'model.pkl')
with open(modelfile, 'wb') as fid:
pickle.dump(self.model, fid)
@abstractmethod
def chief_only_hooks(self, outputs):
'''add hooks only for the chief worker
Args:
outputs: the outputs generated by the create graph method
Returns:
a list of hooks'''
@abstractmethod
def hooks(self, outputs):
'''add hooks for the session
Args:
outputs: the outputs generated by the create graph method
Returns:
a list of hooks
'''
@abstractmethod
def aditional_loss(self):
'''add an aditional loss
returns:
the aditional loss or None'''
class ParameterServer(object):
'''a class for parameter servers'''
def __init__(self,
conf,
modelconf,
dataconf,
server,
task_index):
'''
NnetTrainer constructor, creates the training graph
Args:
conf: the trainer config
modelconf: the model configuration
dataconf: the data configuration as a ConfigParser
server: optional server to be used for distributed training
task_index: optional index of the worker task in the cluster
'''
self.graph = tf.Graph()
self.server = server
self.task_index = task_index
#read the config
conf = dict(conf.items('trainer'))
#distributed training
cluster = tf.train.ClusterSpec(server.server_def.cluster)
num_replicas = len(cluster.as_dict()['worker'])
with self.graph.as_default():
#the chief parameter server should create the data queue
if task_index == 0:
#get the database configurations
input_names = modelconf.get('io', 'inputs').split(' ')
if input_names == ['']:
input_names = []
input_sections = [conf[i].split(' ') for i in input_names]
input_dataconfs = []
for sectionset in input_sections:
input_dataconfs.append([])
for section in sectionset:
input_dataconfs[-1].append(
dict(dataconf.items(section)))
output_names = conf['targets'].split(' ')
if output_names == ['']:
output_names = []
target_sections = [conf[o].split(' ') for o in output_names]
target_dataconfs = []
for sectionset in target_sections:
target_dataconfs.append([])
for section in sectionset:
target_dataconfs[-1].append(
dict(dataconf.items(section)))
#get the filenames
data_queue_elements, _ = input_pipeline.get_filenames(
input_dataconfs + target_dataconfs)
#create the data queue and queue runners
tf.train.string_input_producer(
string_tensor=data_queue_elements,
shuffle=True,
seed=None,
capacity=int(conf['batch_size'])*2,
shared_name='data_queue')
#create a queue for the workers to signiy that they are done
done_queue = tf.FIFOQueue(
capacity=num_replicas,
dtypes=[tf.bool],
shapes=[[]],
shared_name='done_queue%d' % task_index,
name='done_queue%d' % task_index
)
self.wait_op = done_queue.dequeue_many(num_replicas).op
self.scaffold = tf.train.Scaffold()
def join(self):
'''wait for the workers to finish'''
with self.graph.as_default():
with tf.train.MonitoredTrainingSession(
master=self.server.target,
is_chief=False,
scaffold=self.scaffold) as sess:
self.wait_op.run(session=sess)
def _cut_sequence(sequence, sequence_length, cut_length, max_length):
'''cut sequence in equal parts and put into tensorarray
args:
sequence: a [batch_size x time x ...] tensor
sequence_length: a [batch_size] tensor containing the sequence length
cut_length: int, the length of each sequence
max_length: int, the maximum length the sequence can be
returns:
- an element from the queue
- the sequence length from the element in the queue
- an op to enqueue the cut sequences
- the number of elements in the queue
'''
#pad the sequence to a multiple of cut_length
numcuts = tf.to_int32(tf.ceil(tf.shape(sequence)[1]/cut_length))
length = numcuts*cut_length
padded = ops.pad_to(sequence, length, 1)
#cut the data into equal parts into a TensorArray
element_shape = sequence.shape.concatenate(
tf.TensorShape([cut_length])).concatenate(sequence.shape[2:])
cut = tf.TensorArray(
dtype=sequence.dtype,
size=numcuts,
element_shape=element_shape
)
lengths = tf.fill([numcuts], cut_length)
cut = cut.split(padded, lengths)
#put the tensorarray values in the FIFOQueue.
queue = tf.FIFOQueue(
capacity=int(ceil(float(max_length)/cut_length)),
dtypes=sequence.dtype
)
enqueue_op = queue.enqueue_many(cut.stack())
element = queue.dequeue()
element.set_shape(element_shape)
#get the sequence lengths o the queue elemenents
batch_size = tf.size(sequence_length)
element_lengths = tf.fill([numcuts, batch_size], cut_length)
element_lengths = tf.cumsum(element_lengths)
element_lengths = tf.minimum(element_lengths, sequence_length)
element_lengths = element_lengths - tf.pad(element_lengths[:-1],
[[1, 0], [0, 0]])
length_queue = queue = tf.FIFOQueue(
capacity=int(ceil(float(max_length)/cut_length)),
dtypes=sequence_length.dtype
)
enqueue_length = length_queue.enqueue_many(element_lengths)
enqueue_op = tf.group(*[enqueue_op, enqueue_length])
element_length = length_queue.dequeue()
return element, element_length, enqueue_op, numcuts
| |
import cgi
from owslib.etree import etree
from datetime import datetime
from urllib import urlencode
from owslib import ows
from owslib.crs import Crs
from owslib.fes import FilterCapabilities
from owslib.util import openURL, testXMLValue, nspath_eval, nspath, extract_time
from owslib.namespaces import Namespaces
def get_namespaces():
n = Namespaces()
ns = n.get_namespaces(["ogc","sml","gml","om","sos","swe","xlink"])
ns["ows"] = n.get_namespace("ows110")
return ns
namespaces = get_namespaces()
class SensorObservationService_1_0_0(object):
"""
Abstraction for OGC Sensor Observation Service (SOS).
Implements ISensorObservationService.
"""
def __new__(self,url, version, xml=None, username=None, password=None):
"""overridden __new__ method"""
obj=object.__new__(self)
obj.__init__(url, version, xml, username, password)
return obj
def __getitem__(self,id):
''' check contents dictionary to allow dict like access to service observational offerings'''
if name in self.__getattribute__('contents').keys():
return self.__getattribute__('contents')[id]
else:
raise KeyError, "No Observational Offering with id: %s" % id
def __init__(self, url, version='1.0.0', xml=None, username=None, password=None):
"""Initialize."""
self.url = url
self.username = username
self.password = password
self.version = version
self._capabilities = None
# Authentication handled by Reader
reader = SosCapabilitiesReader(
version=self.version, url=self.url, username=self.username, password=self.password
)
if xml: # read from stored xml
self._capabilities = reader.read_string(xml)
else: # read from server
self._capabilities = reader.read(self.url)
# Avoid building metadata if the response is an Exception
if self._capabilities.tag == nspath_eval("ows:ExceptionReport", namespaces):
raise ows.ExceptionReport(self._capabilities)
# build metadata objects
self._build_metadata()
def getOperationByName(self, name):
"""Return a named content item."""
for item in self.operations:
if item.name == name:
return item
raise KeyError("No operation named %s" % name)
def _build_metadata(self):
"""
Set up capabilities metadata objects
"""
# ows:ServiceIdentification metadata
service_id_element = self._capabilities.find(nspath_eval('ows:ServiceIdentification', namespaces))
self.identification = ows.ServiceIdentification(service_id_element)
# ows:ServiceProvider metadata
service_provider_element = self._capabilities.find(nspath_eval('ows:ServiceProvider', namespaces))
self.provider = ows.ServiceProvider(service_provider_element)
# ows:OperationsMetadata metadata
self.operations=[]
for elem in self._capabilities.findall(nspath_eval('ows:OperationsMetadata/ows:Operation', namespaces)):
self.operations.append(ows.OperationsMetadata(elem))
# sos:FilterCapabilities
filters = self._capabilities.find(nspath_eval('sos:Filter_Capabilities', namespaces))
if filters is not None:
self.filters = FilterCapabilities(filters)
else:
self.filters = None
# sos:Contents metadata
self.contents = {}
self.offerings = []
for offering in self._capabilities.findall(nspath_eval('sos:Contents/sos:ObservationOfferingList/sos:ObservationOffering', namespaces)):
off = SosObservationOffering(offering)
self.contents[off.id] = off
self.offerings.append(off)
def describe_sensor(self, outputFormat=None,
procedure=None,
method='Get',
**kwargs):
try:
base_url = next((m.get('url') for m in self.getOperationByName('DescribeSensor').methods if m.get('type').lower() == method.lower()))
except StopIteration:
base_url = self.url
request = {'service': 'SOS', 'version': self.version, 'request': 'DescribeSensor'}
# Required Fields
assert isinstance(outputFormat, str)
request['outputFormat'] = outputFormat
assert isinstance(procedure, str)
request['procedure'] = procedure
# Optional Fields
if kwargs:
for kw in kwargs:
request[kw]=kwargs[kw]
data = urlencode(request)
response = openURL(base_url, data, method, username=self.username, password=self.password).read()
tr = etree.fromstring(response)
if tr.tag == nspath_eval("ows:ExceptionReport", namespaces):
raise ows.ExceptionReport(tr)
return response
def get_observation(self, responseFormat=None,
offerings=None,
observedProperties=None,
eventTime=None,
method='Get',
**kwargs):
"""
Parameters
----------
format : string
Output format. Provide one that is available for all offerings
method : string
Optional. HTTP DCP method name: Get or Post. Must
**kwargs : extra arguments
anything else e.g. vendor specific parameters
"""
try:
base_url = next((m.get('url') for m in self.getOperationByName('GetObservation').methods if m.get('type').lower() == method.lower()))
except StopIteration:
base_url = self.url
request = {'service': 'SOS', 'version': self.version, 'request': 'GetObservation'}
# Required Fields
assert isinstance(offerings, list) and len(offerings) > 0
request['offering'] = ','.join(offerings)
assert isinstance(observedProperties, list) and len(observedProperties) > 0
request['observedProperty'] = ','.join(observedProperties)
assert isinstance(responseFormat, str)
request['responseFormat'] = responseFormat
# Optional Fields
if eventTime is not None:
request['eventTime'] = eventTime
if kwargs:
for kw in kwargs:
request[kw]=kwargs[kw]
data = urlencode(request)
response = openURL(base_url, data, method, username=self.username, password=self.password).read()
try:
tr = etree.fromstring(response)
if tr.tag == nspath_eval("ows:ExceptionReport", namespaces):
raise ows.ExceptionReport(tr)
else:
return response
except ows.ExceptionReport:
raise
except BaseException:
return response
def get_operation_by_name(self, name):
"""
Return a Operation item by name, case insensitive
"""
for item in self.operations:
if item.name.lower() == name.lower():
return item
raise KeyError, "No Operation named %s" % name
class SosObservationOffering(object):
def __init__(self, element):
self._root = element
self.id = testXMLValue(self._root.attrib.get(nspath_eval('gml:id', namespaces)), True)
self.description = testXMLValue(self._root.find(nspath_eval('gml:description', namespaces)))
self.name = testXMLValue(self._root.find(nspath_eval('gml:name', namespaces)))
val = testXMLValue(self._root.find(nspath_eval('gml:srsName', namespaces)))
if val is not None:
self.srs = Crs(val)
# LOOK: Check on GML boundedBy to make sure we handle all of the cases
# gml:boundedBy
try:
envelope = self._root.find(nspath_eval('gml:boundedBy/gml:Envelope', namespaces))
lower_left_corner = testXMLValue(envelope.find(nspath_eval('gml:lowerCorner', namespaces))).split()
upper_right_corner = testXMLValue(envelope.find(nspath_eval('gml:upperCorner', namespaces))).split()
# (left, bottom, right, top) in self.bbox_srs units
self.bbox = (float(lower_left_corner[1]), float(lower_left_corner[0]), float(upper_right_corner[1]), float(upper_right_corner[0]))
self.bbox_srs = Crs(testXMLValue(envelope.attrib.get('srsName'), True))
except Exception, err:
self.bbox = None
self.bbox_srs = None
# LOOK: Support all gml:TimeGeometricPrimitivePropertyType
# Right now we are just supporting gml:TimePeriod
# sos:Time
begin_position_element = self._root.find(nspath_eval('sos:time/gml:TimePeriod/gml:beginPosition', namespaces))
self.begin_position = extract_time(begin_position_element)
end_position_element = self._root.find(nspath_eval('sos:time/gml:TimePeriod/gml:endPosition', namespaces))
self.end_position = extract_time(end_position_element)
self.result_model = testXMLValue(self._root.find(nspath_eval('sos:resultModel', namespaces)))
self.procedures = []
for proc in self._root.findall(nspath_eval('sos:procedure', namespaces)):
self.procedures.append(testXMLValue(proc.attrib.get(nspath_eval('xlink:href', namespaces)), True))
# LOOK: Support swe:Phenomenon here
# this includes compound properties
self.observed_properties = []
for op in self._root.findall(nspath_eval('sos:observedProperty', namespaces)):
self.observed_properties.append(testXMLValue(op.attrib.get(nspath_eval('xlink:href', namespaces)), True))
self.features_of_interest = []
for fot in self._root.findall(nspath_eval('sos:featureOfInterest', namespaces)):
self.features_of_interest.append(testXMLValue(fot.attrib.get(nspath_eval('xlink:href', namespaces)), True))
self.response_formats = []
for rf in self._root.findall(nspath_eval('sos:responseFormat', namespaces)):
self.response_formats.append(testXMLValue(rf))
self.response_modes = []
for rm in self._root.findall(nspath_eval('sos:responseMode', namespaces)):
self.response_modes.append(testXMLValue(rm))
def __str__(self):
return 'Offering id: %s, name: %s' % (self.id, self.name)
class SosCapabilitiesReader(object):
def __init__(self, version="1.0.0", url=None, username=None, password=None):
self.version = version
self.url = url
self.username = username
self.password = password
def capabilities_url(self, service_url):
"""
Return a capabilities url
"""
qs = []
if service_url.find('?') != -1:
qs = cgi.parse_qsl(service_url.split('?')[1])
params = [x[0] for x in qs]
if 'service' not in params:
qs.append(('service', 'SOS'))
if 'request' not in params:
qs.append(('request', 'GetCapabilities'))
if 'acceptVersions' not in params:
qs.append(('acceptVersions', self.version))
urlqs = urlencode(tuple(qs))
return service_url.split('?')[0] + '?' + urlqs
def read(self, service_url):
"""
Get and parse a WMS capabilities document, returning an
elementtree instance
service_url is the base url, to which is appended the service,
acceptVersions, and request parameters
"""
getcaprequest = self.capabilities_url(service_url)
spliturl=getcaprequest.split('?')
u = openURL(spliturl[0], spliturl[1], method='Get', username=self.username, password=self.password)
return etree.fromstring(u.read())
def read_string(self, st):
"""
Parse a SOS capabilities document, returning an elementtree instance
st should be an XML capabilities document
"""
if not isinstance(st, str):
raise ValueError("String must be of type string, not %s" % type(st))
return etree.fromstring(st)
| |
#!/usr/bin/env python
# 6.034 Lab 5
# Neural Net
# - In this file we have an incomplete skeleton of
# a neural network implementation. Follow the online instructions
# and complete the NotImplemented methods below.
#
import math
import random
class ValuedElement(object):
"""
This is an abstract class that all Network elements inherit from
"""
def __init__(self,name,val):
self.my_name = name
self.my_value = val
def set_value(self,val):
self.my_value = val
def get_value(self):
return self.my_value
def get_name(self):
return self.my_name
def __repr__(self):
return "%s(%s)" %(self.my_name, self.my_value)
class DifferentiableElement(object):
"""
This is an abstract interface class implemented by all Network
parts that require some differentiable element.
"""
def output(self):
raise NotImplementedError, "This is an abstract method"
def dOutdX(self, elem):
raise NotImplementedError, "This is an abstract method"
def clear_cache(self):
"""clears any precalculated cached value"""
pass
class Input(ValuedElement,DifferentiableElement):
"""
Representation of an Input into the network.
These may represent variable inputs as well as fixed inputs
(Thresholds) that are always set to -1.
"""
def __init__(self,name,val):
ValuedElement.__init__(self,name,val)
DifferentiableElement.__init__(self)
def output(self):
"""
Returns the output of this Input node.
returns: number (float or int)
"""
raise NotImplementedError, "Implement me!"
def dOutdX(self, elem):
"""
Returns the derivative of this Input node with respect to
elem.
elem: an instance of Weight
returns: number (float or int)
"""
raise NotImplementedError, "Implement me!"
class Weight(ValuedElement):
"""
Representation of an weight into a Neural Unit.
"""
def __init__(self,name,val):
ValuedElement.__init__(self,name,val)
self.next_value = None
def set_next_value(self,val):
self.next_value = val
def update(self):
self.my_value = self.next_value
class Neuron(DifferentiableElement):
"""
Representation of a single sigmoid Neural Unit.
"""
def __init__(self, name, inputs, input_weights, use_cache=True):
assert len(inputs)==len(input_weights)
for i in range(len(inputs)):
assert isinstance(inputs[i],(Neuron,Input))
assert isinstance(input_weights[i],Weight)
DifferentiableElement.__init__(self)
self.my_name = name
self.my_inputs = inputs # list of Neuron or Input instances
self.my_weights = input_weights # list of Weight instances
self.use_cache = use_cache
self.clear_cache()
self.my_descendant_weights = None
def get_descendant_weights(self):
"""
Returns a mapping of the names of direct weights into this neuron,
to all descendant weights.
"""
if self.my_descendant_weights is None:
self.my_descendant_weights = {}
inputs = self.get_inputs()
weights = self.get_weights()
for i in xrange(len(weights)):
weight = weights[i]
weight_name = weight.get_name()
self.my_descendant_weights[weight_name] = set()
input = inputs[i]
if not isinstance(input, Input):
descendants = input.get_descendant_weights()
for name, s in descendants.items():
st = self.my_descendant_weights[weight_name]
st = st.union(s)
st.add(name)
self.my_descendant_weights[weight_name] = st
return self.my_descendant_weights
def isa_descendant_weight_of(self, target, weight):
"""
Checks if [target] is a indirect input weight into this Neuron
via the direct input weight [weight].
"""
weights = self.get_descendant_weights()
if weight.get_name() in weights:
return target.get_name() in weights[weight.get_name()]
else:
raise Exception("weight %s is not connect to this node: %s"
%(weight, self))
def has_weight(self, weight):
"""
Checks if [weight] is a direct input weight into this Neuron.
"""
weights = self.get_descendant_weights()
return weight.get_name() in self.get_descendant_weights()
def get_weight_nodes(self):
return self.my_weights
def clear_cache(self):
self.my_output = None
self.my_doutdx = {}
def output(self):
# Implement compute_output instead!!
if self.use_cache:
# caching optimization, saves previously computed dOutDx.
if self.my_output is None:
self.my_output = self.compute_output()
return self.my_output
return self.compute_output()
def compute_output(self):
"""
Returns the output of this Neuron node, using a sigmoid as
the threshold function.
returns: number (float or int)
"""
raise NotImplementedError, "Implement me!"
def dOutdX(self, elem):
# Implement compute_doutdx instead!!
if self.use_cache:
# caching optimization, saves previously computed dOutDx.
if elem not in self.my_doutdx:
self.my_doutdx[elem] = self.compute_doutdx(elem)
return self.my_doutdx[elem]
return self.compute_doutdx(elem)
def compute_doutdx(self, elem):
"""
Returns the derivative of this Neuron node, with respect to weight
elem, calling output() and/or dOutdX() recursively over the inputs.
elem: an instance of Weight
returns: number (float/int)
"""
raise NotImplementedError, "Implement me!"
def get_weights(self):
return self.my_weights
def get_inputs(self):
return self.my_inputs
def get_name(self):
return self.my_name
def __repr__(self):
return "Neuron(%s)" %(self.my_name)
class PerformanceElem(DifferentiableElement):
"""
Representation of a performance computing output node.
This element contains methods for setting the
desired output (d) and also computing the final
performance P of the network.
This implementation assumes a single output.
"""
def __init__(self,input,desired_value):
assert isinstance(input,(Input,Neuron))
DifferentiableElement.__init__(self)
self.my_input = input
self.my_desired_val = desired_value
def output(self):
"""
Returns the output of this PerformanceElem node.
returns: number (float/int)
"""
raise NotImplementedError, "Implement me!"
def dOutdX(self, elem):
"""
Returns the derivative of this PerformanceElem node with respect
to some weight, given by elem.
elem: an instance of Weight
returns: number (int/float)
"""
raise NotImplementedError, "Implement me!"
def set_desired(self,new_desired):
self.my_desired_val = new_desired
def get_input(self):
return self.my_input
def alphabetize(x,y):
if x.get_name()>y.get_name():
return 1
return -1
class Network(object):
def __init__(self,performance_node,neurons):
self.inputs = []
self.weights = []
self.performance = performance_node
self.output = performance_node.get_input()
self.neurons = neurons[:]
self.neurons.sort(cmp=alphabetize)
for neuron in self.neurons:
self.weights.extend(neuron.get_weights())
for i in neuron.get_inputs():
if isinstance(i,Input) and not i.get_name()=='i0' and not i in self.inputs:
self.inputs.append(i)
self.weights.reverse()
self.weights = []
for n in self.neurons:
self.weights += n.get_weight_nodes()
def clear_cache(self):
for n in self.neurons:
n.clear_cache()
def seed_random():
"""Seed the random number generator so that random
numbers are deterministically 'random'"""
random.seed(0)
def random_weight():
"""Generate a deterministic random weight"""
# We found that random.randrange(-1,2) to work well emperically
# even though it produces randomly 3 integer values -1, 0, and 1.
return random.randrange(-1, 2)
# Uncomment the following if you want to try a uniform distribuiton
# of random numbers compare and see what the difference is.
# return random.uniform(-1, 1)
def make_neural_net_basic():
"""
Constructs a 2-input, 1-output Network with a single neuron.
This network is used to test your network implementation
and a guide for constructing more complex networks.
Naming convention for each of the elements:
Input: 'i'+ input_number
Example: 'i1', 'i2', etc.
Conventions: Start numbering at 1.
For the -1 inputs, use 'i0' for everything
Weight: 'w' + from_identifier + to_identifier
Examples: 'w1A' for weight from Input i1 to Neuron A
'wAB' for weight from Neuron A to Neuron B
Neuron: alphabet_letter
Convention: Order names by distance to the inputs.
If equal distant, then order them left to right.
Example: 'A' is the neuron closest to the inputs.
All names should be unique.
You must follow these conventions in order to pass all the tests.
"""
i0 = Input('i0', -1.0) # this input is immutable
i1 = Input('i1', 0.0)
i2 = Input('i2', 0.0)
w1A = Weight('w1A', 1)
w2A = Weight('w2A', 1)
wA = Weight('wA', 1)
# Inputs must be in the same order as their associated weights
A = Neuron('A', [i1,i2,i0], [w1A,w2A,wA])
P = PerformanceElem(A, 0.0)
net = Network(P,[A])
return net
def make_neural_net_two_layer():
"""
Create a 2-input, 1-output Network with three neurons.
There should be two neurons at the first level, each receiving both inputs
Both of the first level neurons should feed into the second layer neuron.
See 'make_neural_net_basic' for required naming convention for inputs,
weights, and neurons.
"""
raise NotImplementedError, "Implement me!"
def make_neural_net_challenging():
"""
Design a network that can in-theory solve all 3 problems described in
the lab instructions. Your final network should contain
at most 5 neuron units.
See 'make_neural_net_basic' for required naming convention for inputs,
weights, and neurons.
"""
raise NotImplementedError, "Implement me!"
def make_neural_net_with_weights():
"""
In this method you are to use the network you designed earlier
and set pre-determined weights. Your goal is to set the weights
to values that will allow the "patchy" problem to converge quickly.
Your output network should be able to learn the "patchy"
dataset within 1000 iterations of back-propagation.
"""
# You can preset weights for the network by completing
# and uncommenting the init_weights dictionary below.
#
# init_weights = { 'w1A' : 0.0,
# 'w2A' : 0.0,
# 'w1B' : 0.0,
# 'w2B' : 0.0,
# .... # finish me!
#
raise NotImplementedError, "Implement me!"
return make_net_with_init_weights_from_dict(make_neural_net_challenging,
init_weights)
def make_net_with_init_weights_from_dict(net_fn,init_weights):
net = net_fn()
for w in net.weights:
w.set_value(init_weights[w.get_name()])
return net
def make_net_with_init_weights_from_list(net_fn,init_weights):
net = net_fn()
for i in range(len(net.weights)):
net.weights[i].set_value(init_weights[i])
return net
def abs_mean(values):
"""Compute the mean of the absolute values a set of numbers.
For computing the stopping condition for training neural nets"""
abs_vals = map(lambda x: abs(x), values)
total = sum(abs_vals)
return total / float(len(abs_vals))
def train(network,
data, # training data
rate=1.0, # learning rate
target_abs_mean_performance=0.0001,
max_iterations=10000,
verbose=False):
"""Run back-propagation training algorithm on a given network.
with training [data]. The training runs for [max_iterations]
or until [target_abs_mean_performance] is reached.
"""
iteration = 0
while iteration < max_iterations:
fully_trained = False
performances = [] # store performance on each data point
for datum in data:
# set network inputs
for i in xrange(len(network.inputs)):
network.inputs[i].set_value(datum[i])
# set network desired output
network.performance.set_desired(datum[-1])
# clear cached calculations
network.clear_cache()
# compute all the weight updates
for w in network.weights:
w.set_next_value(w.get_value() +
rate * network.performance.dOutdX(w))
# set the new weights
for w in network.weights:
w.update()
# save the performance value
performances.append(network.performance.output())
# clear cached calculations
network.clear_cache()
# compute the mean performance value
abs_mean_performance = abs_mean(performances)
if abs_mean_performance < target_abs_mean_performance:
if verbose:
print "iter %d: training complete.\n"\
"mean-abs-performance threshold %s reached (%1.6f)"\
%(iteration,
target_abs_mean_performance,
abs_mean_performance)
break
iteration += 1
if iteration % 1000 == 0 and verbose:
print "iter %d: mean-abs-performance = %1.6f"\
%(iteration,
abs_mean_performance)
def test(network, data, verbose=False):
"""Test the neural net on some given data."""
correct = 0
for datum in data:
for i in range(len(network.inputs)):
network.inputs[i].set_value(datum[i])
# clear cached calculations
network.clear_cache()
result = network.output.output()
network.clear_cache()
rounded_result = round(result)
if round(result)==datum[-1]:
correct+=1
if verbose:
print "test(%s) returned: %s => %s [%s]" %(str(datum),
str(result),
rounded_result,
"correct")
else:
if verbose:
print "test(%s) returned: %s => %s [%s]" %(str(datum),
str(result),
rounded_result,
"wrong")
return float(correct)/len(data)
| |
from __future__ import unicode_literals
import logging
from oauthlib.common import generate_token, urldecode
from oauthlib.oauth2 import WebApplicationClient, InsecureTransportError
from oauthlib.oauth2 import TokenExpiredError, is_secure_transport
import requests
log = logging.getLogger(__name__)
class TokenUpdated(Warning):
def __init__(self, token):
super(TokenUpdated, self).__init__()
self.token = token
class OAuth2Session(requests.Session):
"""Versatile OAuth 2 extension to :class:`requests.Session`.
Supports any grant type adhering to :class:`oauthlib.oauth2.Client` spec
including the four core OAuth 2 grants.
Can be used to create authorization urls, fetch tokens and access protected
resources using the :class:`requests.Session` interface you are used to.
- :class:`oauthlib.oauth2.WebApplicationClient` (default): Authorization Code Grant
- :class:`oauthlib.oauth2.MobileApplicationClient`: Implicit Grant
- :class:`oauthlib.oauth2.LegacyApplicationClient`: Password Credentials Grant
- :class:`oauthlib.oauth2.BackendApplicationClient`: Client Credentials Grant
Note that the only time you will be using Implicit Grant from python is if
you are driving a user agent able to obtain URL fragments.
"""
def __init__(self, client_id=None, client=None, auto_refresh_url=None,
auto_refresh_kwargs=None, scope=None, redirect_uri=None, token=None,
state=None, token_updater=None, **kwargs):
"""Construct a new OAuth 2 client session.
:param client_id: Client id obtained during registration
:param client: :class:`oauthlib.oauth2.Client` to be used. Default is
WebApplicationClient which is useful for any
hosted application but not mobile or desktop.
:param scope: List of scopes you wish to request access to
:param redirect_uri: Redirect URI you registered as callback
:param token: Token dictionary, must include access_token
and token_type.
:param state: State string used to prevent CSRF. This will be given
when creating the authorization url and must be supplied
when parsing the authorization response.
Can be either a string or a no argument callable.
:auto_refresh_url: Refresh token endpoint URL, must be HTTPS. Supply
this if you wish the client to automatically refresh
your access tokens.
:auto_refresh_kwargs: Extra arguments to pass to the refresh token
endpoint.
:token_updater: Method with one argument, token, to be used to update
your token database on automatic token refresh. If not
set a TokenUpdated warning will be raised when a token
has been refreshed. This warning will carry the token
in its token argument.
:param kwargs: Arguments to pass to the Session constructor.
"""
super(OAuth2Session, self).__init__(**kwargs)
self._client = client or WebApplicationClient(client_id, token=token)
self.token = token or {}
self.scope = scope
self.redirect_uri = redirect_uri
self.state = state or generate_token
self._state = state
self.auto_refresh_url = auto_refresh_url
self.auto_refresh_kwargs = auto_refresh_kwargs or {}
self.token_updater = token_updater
# Allow customizations for non compliant providers through various
# hooks to adjust requests and responses.
self.compliance_hook = {
'access_token_response': set(),
'refresh_token_response': set(),
'protected_request': set(),
}
def new_state(self):
"""Generates a state string to be used in authorizations."""
try:
self._state = self.state()
log.debug('Generated new state %s.', self._state)
except TypeError:
self._state = self.state
log.debug('Re-using previously supplied state %s.', self._state)
return self._state
@property
def client_id(self):
return getattr(self._client, "client_id", None)
@client_id.setter
def client_id(self, value):
self._client.client_id = value
@client_id.deleter
def client_id(self):
del self._client.client_id
@property
def token(self):
return getattr(self._client, "token", None)
@token.setter
def token(self, value):
self._client.token = value
self._client.populate_token_attributes(value)
@property
def access_token(self):
return getattr(self._client, "access_token", None)
@access_token.setter
def access_token(self, value):
self._client.access_token = value
@access_token.deleter
def access_token(self):
del self._client.access_token
@property
def authorized(self):
"""Boolean that indicates whether this session has an OAuth token
or not. If `self.authorized` is True, you can reasonably expect
OAuth-protected requests to the resource to succeed. If
`self.authorized` is False, you need the user to go through the OAuth
authentication dance before OAuth-protected requests to the resource
will succeed.
"""
return bool(self.access_token)
def authorization_url(self, url, state=None, **kwargs):
"""Form an authorization URL.
:param url: Authorization endpoint url, must be HTTPS.
:param state: An optional state string for CSRF protection. If not
given it will be generated for you.
:param kwargs: Extra parameters to include.
:return: authorization_url, state
"""
state = state or self.new_state()
return self._client.prepare_request_uri(url,
redirect_uri=self.redirect_uri,
scope=self.scope,
state=state,
**kwargs), state
def fetch_token(self, token_url, code=None, authorization_response=None,
body='', auth=None, username=None, password=None, method='POST',
timeout=None, headers=None, verify=True, proxies=None, **kwargs):
"""Generic method for fetching an access token from the token endpoint.
If you are using the MobileApplicationClient you will want to use
token_from_fragment instead of fetch_token.
:param token_url: Token endpoint URL, must use HTTPS.
:param code: Authorization code (used by WebApplicationClients).
:param authorization_response: Authorization response URL, the callback
URL of the request back to you. Used by
WebApplicationClients instead of code.
:param body: Optional application/x-www-form-urlencoded body to add the
include in the token request. Prefer kwargs over body.
:param auth: An auth tuple or method as accepted by requests.
:param username: Username used by LegacyApplicationClients.
:param password: Password used by LegacyApplicationClients.
:param method: The HTTP method used to make the request. Defaults
to POST, but may also be GET. Other methods should
be added as needed.
:param headers: Dict to default request headers with.
:param timeout: Timeout of the request in seconds.
:param verify: Verify SSL certificate.
:param kwargs: Extra parameters to include in the token request.
:return: A token dict
"""
if not is_secure_transport(token_url):
raise InsecureTransportError()
if not code and authorization_response:
self._client.parse_request_uri_response(authorization_response,
state=self._state)
code = self._client.code
elif not code and isinstance(self._client, WebApplicationClient):
code = self._client.code
if not code:
raise ValueError('Please supply either code or '
'authorization_response parameters.')
body = self._client.prepare_request_body(code=code, body=body,
redirect_uri=self.redirect_uri, username=username,
password=password, **kwargs)
client_id = kwargs.get('client_id', '')
if auth is None:
if client_id:
log.debug('Encoding client_id "%s" with client_secret as Basic auth credentials.', client_id)
client_secret = kwargs.get('client_secret', '')
client_secret = client_secret if client_secret is not None else ''
auth = requests.auth.HTTPBasicAuth(client_id, client_secret)
elif username:
if password is None:
raise ValueError('Username was supplied, but not password.')
log.debug('Encoding username, password as Basic auth credentials.')
auth = requests.auth.HTTPBasicAuth(username, password)
headers = headers or {
'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',
}
self.token = {}
if method.upper() == 'POST':
r = self.post(token_url, data=dict(urldecode(body)),
timeout=timeout, headers=headers, auth=auth,
verify=verify, proxies=proxies)
log.debug('Prepared fetch token request body %s', body)
elif method.upper() == 'GET':
# if method is not 'POST', switch body to querystring and GET
r = self.get(token_url, params=dict(urldecode(body)),
timeout=timeout, headers=headers, auth=auth,
verify=verify, proxies=proxies)
log.debug('Prepared fetch token request querystring %s', body)
else:
raise ValueError('The method kwarg must be POST or GET.')
log.debug('Request to fetch token completed with status %s.',
r.status_code)
log.debug('Request headers were %s', r.request.headers)
log.debug('Request body was %s', r.request.body)
log.debug('Response headers were %s and content %s.',
r.headers, r.text)
log.debug('Invoking %d token response hooks.',
len(self.compliance_hook['access_token_response']))
for hook in self.compliance_hook['access_token_response']:
log.debug('Invoking hook %s.', hook)
r = hook(r)
self._client.parse_request_body_response(r.text, scope=self.scope)
self.token = self._client.token
log.debug('Obtained token %s.', self.token)
return self.token
def token_from_fragment(self, authorization_response):
"""Parse token from the URI fragment, used by MobileApplicationClients.
:param authorization_response: The full URL of the redirect back to you
:return: A token dict
"""
self._client.parse_request_uri_response(authorization_response,
state=self._state)
self.token = self._client.token
return self.token
def refresh_token(self, token_url, refresh_token=None, body='', auth=None,
timeout=None, headers=None, verify=True, proxies=None, **kwargs):
"""Fetch a new access token using a refresh token.
:param token_url: The token endpoint, must be HTTPS.
:param refresh_token: The refresh_token to use.
:param body: Optional application/x-www-form-urlencoded body to add the
include in the token request. Prefer kwargs over body.
:param auth: An auth tuple or method as accepted by requests.
:param timeout: Timeout of the request in seconds.
:param verify: Verify SSL certificate.
:param kwargs: Extra parameters to include in the token request.
:return: A token dict
"""
if not token_url:
raise ValueError('No token endpoint set for auto_refresh.')
if not is_secure_transport(token_url):
raise InsecureTransportError()
refresh_token = refresh_token or self.token.get('refresh_token')
log.debug('Adding auto refresh key word arguments %s.',
self.auto_refresh_kwargs)
kwargs.update(self.auto_refresh_kwargs)
body = self._client.prepare_refresh_body(body=body,
refresh_token=refresh_token, scope=self.scope, **kwargs)
log.debug('Prepared refresh token request body %s', body)
if headers is None:
headers = {
'Accept': 'application/json',
'Content-Type': (
'application/x-www-form-urlencoded;charset=UTF-8'
),
}
r = self.post(token_url, data=dict(urldecode(body)), auth=auth,
timeout=timeout, headers=headers, verify=verify, withhold_token=True, proxies=proxies)
log.debug('Request to refresh token completed with status %s.',
r.status_code)
log.debug('Response headers were %s and content %s.',
r.headers, r.text)
log.debug('Invoking %d token response hooks.',
len(self.compliance_hook['refresh_token_response']))
for hook in self.compliance_hook['refresh_token_response']:
log.debug('Invoking hook %s.', hook)
r = hook(r)
self.token = self._client.parse_request_body_response(r.text, scope=self.scope)
if not 'refresh_token' in self.token:
log.debug('No new refresh token given. Re-using old.')
self.token['refresh_token'] = refresh_token
return self.token
def request(self, method, url, data=None, headers=None, withhold_token=False,
client_id=None, client_secret=None, **kwargs):
"""Intercept all requests and add the OAuth 2 token if present."""
if not is_secure_transport(url):
raise InsecureTransportError()
if self.token and not withhold_token:
log.debug('Invoking %d protected resource request hooks.',
len(self.compliance_hook['protected_request']))
for hook in self.compliance_hook['protected_request']:
log.debug('Invoking hook %s.', hook)
url, headers, data = hook(url, headers, data)
log.debug('Adding token %s to request.', self.token)
try:
url, headers, data = self._client.add_token(url,
http_method=method, body=data, headers=headers)
# Attempt to retrieve and save new access token if expired
except TokenExpiredError:
if self.auto_refresh_url:
log.debug('Auto refresh is set, attempting to refresh at %s.',
self.auto_refresh_url)
# We mustn't pass auth twice.
auth = kwargs.pop('auth', None)
if client_id and client_secret and (auth is None):
log.debug('Encoding client_id "%s" with client_secret as Basic auth credentials.', client_id)
auth = requests.auth.HTTPBasicAuth(client_id, client_secret)
token = self.refresh_token(
self.auto_refresh_url, auth=auth, **kwargs
)
if self.token_updater:
log.debug('Updating token to %s using %s.',
token, self.token_updater)
self.token_updater(token)
url, headers, data = self._client.add_token(url,
http_method=method, body=data, headers=headers)
else:
raise TokenUpdated(token)
else:
raise
log.debug('Requesting url %s using method %s.', url, method)
log.debug('Supplying headers %s and data %s', headers, data)
log.debug('Passing through key word arguments %s.', kwargs)
return super(OAuth2Session, self).request(method, url,
headers=headers, data=data, **kwargs)
def register_compliance_hook(self, hook_type, hook):
"""Register a hook for request/response tweaking.
Available hooks are:
access_token_response invoked before token parsing.
refresh_token_response invoked before refresh token parsing.
protected_request invoked before making a request.
If you find a new hook is needed please send a GitHub PR request
or open an issue.
"""
if hook_type not in self.compliance_hook:
raise ValueError('Hook type %s is not in %s.',
hook_type, self.compliance_hook)
self.compliance_hook[hook_type].add(hook)
| |
from os import path
import torch.autograd as autograd
import torch.cuda.comm as comm
from torch.autograd.function import once_differentiable
from torch.utils.cpp_extension import load
_src_path = path.join(path.dirname(path.abspath(__file__)), "src")
_backend = load(name="inplace_abn",
extra_cflags=["-O3"],
sources=[path.join(_src_path, f) for f in [
"inplace_abn.cpp",
"inplace_abn_cpu.cpp",
"inplace_abn_cuda.cu"
]],
extra_cuda_cflags=["--expt-extended-lambda"])
# Activation names
ACT_LEAKY_RELU = "leaky_relu"
ACT_ELU = "elu"
ACT_NONE = "none"
def _check(fn, *args, **kwargs):
success = fn(*args, **kwargs)
if not success:
raise RuntimeError("CUDA Error encountered in {}".format(fn))
def _broadcast_shape(x):
out_size = []
for i, s in enumerate(x.size()):
if i != 1:
out_size.append(1)
else:
out_size.append(s)
return out_size
def _reduce(x):
if len(x.size()) == 2:
return x.sum(dim=0)
else:
n, c = x.size()[0:2]
return x.contiguous().view((n, c, -1)).sum(2).sum(0)
def _count_samples(x):
count = 1
for i, s in enumerate(x.size()):
if i != 1:
count *= s
return count
def _act_forward(ctx, x):
if ctx.activation == ACT_LEAKY_RELU:
_backend.leaky_relu_forward(x, ctx.slope)
elif ctx.activation == ACT_ELU:
_backend.elu_forward(x)
elif ctx.activation == ACT_NONE:
pass
def _act_backward(ctx, x, dx):
if ctx.activation == ACT_LEAKY_RELU:
_backend.leaky_relu_backward(x, dx, ctx.slope)
elif ctx.activation == ACT_ELU:
_backend.elu_backward(x, dx)
elif ctx.activation == ACT_NONE:
pass
class InPlaceABN(autograd.Function):
@staticmethod
def forward(ctx, x, weight, bias, running_mean, running_var,
training=True, momentum=0.1, eps=1e-05, activation=ACT_LEAKY_RELU, slope=0.01):
# Save context
ctx.training = training
ctx.momentum = momentum
ctx.eps = eps
ctx.activation = activation
ctx.slope = slope
ctx.affine = weight is not None and bias is not None
# Prepare inputs
count = _count_samples(x)
x = x.contiguous()
weight = weight.contiguous() if ctx.affine else x.new_empty(0)
bias = bias.contiguous() if ctx.affine else x.new_empty(0)
if ctx.training:
mean, var = _backend.mean_var(x)
# Update running stats
running_mean.mul_((1 - ctx.momentum)).add_(ctx.momentum * mean)
running_var.mul_((1 - ctx.momentum)).add_(ctx.momentum * var * count / (count - 1))
# Mark in-place modified tensors
ctx.mark_dirty(x, running_mean, running_var)
else:
mean, var = running_mean.contiguous(), running_var.contiguous()
ctx.mark_dirty(x)
# BN forward + activation
_backend.forward(x, mean, var, weight, bias, ctx.affine, ctx.eps)
_act_forward(ctx, x)
# Output
ctx.var = var
ctx.save_for_backward(x, var, weight, bias)
return x
@staticmethod
@once_differentiable
def backward(ctx, dz):
z, var, weight, bias = ctx.saved_tensors
dz = dz.contiguous()
# Undo activation
_act_backward(ctx, z, dz)
if ctx.training:
edz, eydz = _backend.edz_eydz(z, dz, weight, bias, ctx.affine, ctx.eps)
else:
# TODO: implement simplified CUDA backward for inference mode
edz = dz.new_zeros(dz.size(1))
eydz = dz.new_zeros(dz.size(1))
dx, dweight, dbias = _backend.backward(z, dz, var, weight, bias, edz, eydz, ctx.affine, ctx.eps)
dweight = dweight if ctx.affine else None
dbias = dbias if ctx.affine else None
return dx, dweight, dbias, None, None, None, None, None, None, None
class InPlaceABNSync(autograd.Function):
@classmethod
def forward(cls, ctx, x, weight, bias, running_mean, running_var,
extra, training=True, momentum=0.1, eps=1e-05, activation=ACT_LEAKY_RELU, slope=0.01):
# Save context
cls._parse_extra(ctx, extra)
ctx.training = training
ctx.momentum = momentum
ctx.eps = eps
ctx.activation = activation
ctx.slope = slope
ctx.affine = weight is not None and bias is not None
# Prepare inputs
count = _count_samples(x) * (ctx.master_queue.maxsize + 1)
x = x.contiguous()
weight = weight.contiguous() if ctx.affine else x.new_empty(0)
bias = bias.contiguous() if ctx.affine else x.new_empty(0)
if ctx.training:
mean, var = _backend.mean_var(x)
if ctx.is_master:
means, vars = [mean.unsqueeze(0)], [var.unsqueeze(0)]
for _ in range(ctx.master_queue.maxsize):
mean_w, var_w = ctx.master_queue.get()
ctx.master_queue.task_done()
means.append(mean_w.unsqueeze(0))
vars.append(var_w.unsqueeze(0))
means = comm.gather(means)
vars = comm.gather(vars)
mean = means.mean(0)
var = (vars + (mean - means) ** 2).mean(0)
tensors = comm.broadcast_coalesced((mean, var), [mean.get_device()] + ctx.worker_ids)
for ts, queue in zip(tensors[1:], ctx.worker_queues):
queue.put(ts)
else:
ctx.master_queue.put((mean, var))
mean, var = ctx.worker_queue.get()
ctx.worker_queue.task_done()
# Update running stats
running_mean.mul_((1 - ctx.momentum)).add_(ctx.momentum * mean)
running_var.mul_((1 - ctx.momentum)).add_(ctx.momentum * var * count / (count - 1))
# Mark in-place modified tensors
ctx.mark_dirty(x, running_mean, running_var)
else:
mean, var = running_mean.contiguous(), running_var.contiguous()
ctx.mark_dirty(x)
# BN forward + activation
_backend.forward(x, mean, var, weight, bias, ctx.affine, ctx.eps)
_act_forward(ctx, x)
# Output
ctx.var = var
ctx.save_for_backward(x, var, weight, bias)
return x
@staticmethod
@once_differentiable
def backward(ctx, dz):
z, var, weight, bias = ctx.saved_tensors
dz = dz.contiguous()
# Undo activation
_act_backward(ctx, z, dz)
if ctx.training:
edz, eydz = _backend.edz_eydz(z, dz, weight, bias, ctx.affine, ctx.eps)
if ctx.is_master:
edzs, eydzs = [edz], [eydz]
for _ in range(len(ctx.worker_queues)):
edz_w, eydz_w = ctx.master_queue.get()
ctx.master_queue.task_done()
edzs.append(edz_w)
eydzs.append(eydz_w)
edz = comm.reduce_add(edzs) / (ctx.master_queue.maxsize + 1)
eydz = comm.reduce_add(eydzs) / (ctx.master_queue.maxsize + 1)
tensors = comm.broadcast_coalesced((edz, eydz), [edz.get_device()] + ctx.worker_ids)
for ts, queue in zip(tensors[1:], ctx.worker_queues):
queue.put(ts)
else:
ctx.master_queue.put((edz, eydz))
edz, eydz = ctx.worker_queue.get()
ctx.worker_queue.task_done()
else:
edz = dz.new_zeros(dz.size(1))
eydz = dz.new_zeros(dz.size(1))
dx, dweight, dbias = _backend.backward(z, dz, var, weight, bias, edz, eydz, ctx.affine, ctx.eps)
dweight = dweight if ctx.affine else None
dbias = dbias if ctx.affine else None
return dx, dweight, dbias, None, None, None, None, None, None, None, None
@staticmethod
def _parse_extra(ctx, extra):
ctx.is_master = extra["is_master"]
if ctx.is_master:
ctx.master_queue = extra["master_queue"]
ctx.worker_queues = extra["worker_queues"]
ctx.worker_ids = extra["worker_ids"]
else:
ctx.master_queue = extra["master_queue"]
ctx.worker_queue = extra["worker_queue"]
inplace_abn = InPlaceABN.apply
inplace_abn_sync = InPlaceABNSync.apply
__all__ = ["inplace_abn", "inplace_abn_sync", "ACT_LEAKY_RELU", "ACT_ELU", "ACT_NONE"]
| |
# -*- coding: utf8 -*-
from __future__ import print_function
import ast
import re
import warnings
import yaml # use yaml instead of json to get non unicode (works with ascii only data)
from rlp.utils import decode_hex, encode_hex
from ethereum import utils
from ethereum.utils import (
big_endian_to_int, ceil32, int_to_big_endian, encode_int, is_numeric, isnumeric, is_string,
rzpad, TT255, TT256, zpad,
)
# The number of bytes is encoded as a uint256
# Type used to encode a string/bytes length
INT256 = 'uint', '256', []
lentyp = INT256 # pylint: disable=invalid-name
class EncodingError(Exception):
pass
class ValueOutOfBounds(EncodingError):
pass
def json_decode(data):
return yaml.safe_load(data)
def split32(data):
""" Split data into pieces of 32 bytes. """
all_pieces = []
for position in range(0, len(data), 32):
piece = data[position:position + 32]
all_pieces.append(piece)
return all_pieces
def _canonical_type(name): # pylint: disable=too-many-return-statements
""" Replace aliases to the corresponding type to compute the ids. """
if name == 'int':
return 'int256'
if name == 'uint':
return 'uint256'
if name == 'fixed':
return 'fixed128x128'
if name == 'ufixed':
return 'ufixed128x128'
if name.startswith('int['):
return 'int256' + name[3:]
if name.startswith('uint['):
return 'uint256' + name[4:]
if name.startswith('fixed['):
return 'fixed128x128' + name[5:]
if name.startswith('ufixed['):
return 'ufixed128x128' + name[6:]
return name
def normalize_name(name):
""" Return normalized event/function name. """
if '(' in name:
return name[:name.find('(')]
return name
def method_id(name, encode_types):
""" Return the unique method id.
The signature is defined as the canonical expression of the basic
prototype, i.e. the function name with the parenthesised list of parameter
types. Parameter types are split by a single comma - no spaces are used.
The method id is defined as the first four bytes (left, high-order in
big-endian) of the Keccak (SHA-3) hash of the signature of the function.
"""
function_types = [
_canonical_type(type_)
for type_ in encode_types
]
function_signature = '{function_name}({canonical_types})'.format(
function_name=name,
canonical_types=','.join(function_types),
)
function_keccak = utils.sha3(function_signature)
first_bytes = function_keccak[:4]
return big_endian_to_int(first_bytes)
def event_id(name, encode_types):
""" Return the event id.
Defined as:
`keccak(EVENT_NAME+"("+EVENT_ARGS.map(canonical_type_of).join(",")+")")`
Where `canonical_type_of` is a function that simply returns the canonical
type of a given argument, e.g. for uint indexed foo, it would return
uint256). Note the lack of spaces.
"""
event_types = [
_canonical_type(type_)
for type_ in encode_types
]
event_signature = '{event_name}({canonical_types})'.format(
event_name=name,
canonical_types=','.join(event_types),
)
return big_endian_to_int(utils.sha3(event_signature))
def decint(n, signed=False): # pylint: disable=invalid-name,too-many-branches
''' Decode an unsigned/signed integer. '''
if isinstance(n, str):
n = utils.to_string(n)
if n is True:
return 1
if n is False:
return 0
if n is None:
return 0
if is_numeric(n):
if signed:
if not -TT255 <= n <= TT255 - 1:
raise EncodingError('Number out of range: %r' % n)
else:
if not 0 <= n <= TT256 - 1:
raise EncodingError('Number out of range: %r' % n)
return n
if is_string(n):
if len(n) > 32:
raise EncodingError('String too long: %r' % n)
if len(n) == 40:
int_bigendian = decode_hex(n)
else:
int_bigendian = n # pylint: disable=redefined-variable-type
result = big_endian_to_int(int_bigendian)
if signed:
if result >= TT255:
result -= TT256
if not -TT255 <= result <= TT255 - 1:
raise EncodingError('Number out of range: %r' % n)
else:
if not 0 <= result <= TT256 - 1:
raise EncodingError('Number out of range: %r' % n)
return result
raise EncodingError('Cannot decode integer: %r' % n)
def encode_single(typ, arg): # pylint: disable=too-many-return-statements,too-many-branches,too-many-statements,too-many-locals
''' Encode `arg` as `typ`.
`arg` will be encoded in a best effort manner, were necessary the function
will try to correctly define the underlying binary representation (ie.
decoding a hex-encoded address/hash).
Args:
typ (Tuple[(str, int, list)]): A 3-tuple defining the `arg` type.
The first element defines the type name.
The second element defines the type length in bits.
The third element defines if it's an array type.
Together the first and second defines the elementary type, the third
element must be present but is ignored.
Valid type names are:
- uint
- int
- bool
- ufixed
- fixed
- string
- bytes
- hash
- address
arg (object): The object to be encoded, it must be a python object
compatible with the `typ`.
Raises:
ValueError: when an invalid `typ` is supplied.
ValueOutOfBounds: when `arg` cannot be encoded as `typ` because of the
binary contraints.
Note:
This function don't work with array types, for that use the `enc`
function.
'''
base, sub, _ = typ
if base == 'uint':
sub = int(sub)
if not (0 < sub <= 256 and sub % 8 == 0):
raise ValueError('invalid unsigned integer bit length {}'.format(sub))
try:
i = decint(arg, signed=False)
except EncodingError:
# arg is larger than 2**256
raise ValueOutOfBounds(repr(arg))
if not 0 <= i < 2 ** sub:
raise ValueOutOfBounds(repr(arg))
value_encoded = int_to_big_endian(i)
return zpad(value_encoded, 32)
if base == 'int':
sub = int(sub)
bits = sub - 1
if not (0 < sub <= 256 and sub % 8 == 0):
raise ValueError('invalid integer bit length {}'.format(sub))
try:
i = decint(arg, signed=True)
except EncodingError:
# arg is larger than 2**255
raise ValueOutOfBounds(repr(arg))
if not -2 ** bits <= i < 2 ** bits:
raise ValueOutOfBounds(repr(arg))
value = i % 2 ** sub # convert negative to "equivalent" positive
value_encoded = int_to_big_endian(value)
return zpad(value_encoded, 32)
if base == 'bool':
if arg is True:
value_encoded = int_to_big_endian(1)
elif arg is False:
value_encoded = int_to_big_endian(0)
else:
raise ValueError('%r is not bool' % arg)
return zpad(value_encoded, 32)
if base == 'ufixed':
sub = str(sub) # pylint: disable=redefined-variable-type
high_str, low_str = sub.split('x')
high = int(high_str)
low = int(low_str)
if not (0 < high + low <= 256 and high % 8 == 0 and low % 8 == 0):
raise ValueError('invalid unsigned fixed length {}'.format(sub))
if not 0 <= arg < 2 ** high:
raise ValueOutOfBounds(repr(arg))
float_point = arg * 2 ** low
fixed_point = int(float_point)
return zpad(int_to_big_endian(fixed_point), 32)
if base == 'fixed':
sub = str(sub) # pylint: disable=redefined-variable-type
high_str, low_str = sub.split('x')
high = int(high_str)
low = int(low_str)
bits = high - 1
if not (0 < high + low <= 256 and high % 8 == 0 and low % 8 == 0):
raise ValueError('invalid unsigned fixed length {}'.format(sub))
if not -2 ** bits <= arg < 2 ** bits:
raise ValueOutOfBounds(repr(arg))
float_point = arg * 2 ** low
fixed_point = int(float_point)
value = fixed_point % 2 ** 256
return zpad(int_to_big_endian(value), 32)
if base == 'string':
if isinstance(arg, utils.unicode):
arg = arg.encode('utf8')
else:
try:
arg.decode('utf8')
except UnicodeDecodeError:
raise ValueError('string must be utf8 encoded')
if len(sub): # fixed length
if not 0 <= len(arg) <= int(sub):
raise ValueError('invalid string length {}'.format(sub))
if not 0 <= int(sub) <= 32:
raise ValueError('invalid string length {}'.format(sub))
return rzpad(arg, 32)
if not 0 <= len(arg) < TT256:
raise Exception('Integer invalid or out of range: %r' % arg)
length_encoded = zpad(int_to_big_endian(len(arg)), 32)
value_encoded = rzpad(arg, utils.ceil32(len(arg)))
return length_encoded + value_encoded
if base == 'bytes':
if not is_string(arg):
raise EncodingError('Expecting string: %r' % arg)
arg = utils.to_string(arg) # py2: force unicode into str
if len(sub): # fixed length
if not 0 <= len(arg) <= int(sub):
raise ValueError('string must be utf8 encoded')
if not 0 <= int(sub) <= 32:
raise ValueError('string must be utf8 encoded')
return rzpad(arg, 32)
if not 0 <= len(arg) < TT256:
raise Exception('Integer invalid or out of range: %r' % arg)
length_encoded = zpad(int_to_big_endian(len(arg)), 32)
value_encoded = rzpad(arg, utils.ceil32(len(arg)))
return length_encoded + value_encoded
if base == 'hash':
if not (int(sub) and int(sub) <= 32):
raise EncodingError('too long: %r' % arg)
if isnumeric(arg):
return zpad(encode_int(arg), 32)
if len(arg) == int(sub):
return zpad(arg, 32)
if len(arg) == int(sub) * 2:
return zpad(decode_hex(arg), 32)
raise EncodingError('Could not parse hash: %r' % arg)
if base == 'address':
assert sub == ''
if isnumeric(arg):
return zpad(encode_int(arg), 32)
if len(arg) == 20:
return zpad(arg, 32)
if len(arg) == 40:
return zpad(decode_hex(arg), 32)
if len(arg) == 42 and arg[:2] == '0x':
return zpad(decode_hex(arg[2:]), 32)
raise EncodingError('Could not parse address: %r' % arg)
raise EncodingError('Unhandled type: %r %r' % (base, sub))
class ContractTranslator(object):
def __init__(self, contract_interface):
if is_string(contract_interface):
contract_interface = json_decode(contract_interface)
self.constructor_data = None
self.function_data = {}
self.event_data = {}
for description in contract_interface:
encode_types = [
element['type']
for element in description['inputs']
]
signature = [
(element['type'], element['name'])
for element in description['inputs']
]
# type can be omitted, defaulting to function
if description.get('type', 'function') == 'function':
normalized_name = normalize_name(description['name'])
decode_types = [
element['type']
for element in description['outputs']
]
self.function_data[normalized_name] = {
'prefix': method_id(normalized_name, encode_types),
'encode_types': encode_types,
'decode_types': decode_types,
'is_constant': description.get('constant', False),
'signature': signature,
}
elif description['type'] == 'event':
normalized_name = normalize_name(description['name'])
indexed = [
element['indexed']
for element in description['inputs']
]
names = [
element['name']
for element in description['inputs']
]
# event_id == topics[0]
self.event_data[event_id(normalized_name, encode_types)] = {
'types': encode_types,
'name': normalized_name,
'names': names,
'indexed': indexed,
'anonymous': description.get('anonymous', False),
}
elif description['type'] == 'constructor':
if self.constructor_data is not None:
raise ValueError('Only one constructor is supported.')
self.constructor_data = {
'encode_types': encode_types,
'signature': signature,
}
else:
raise ValueError('Unknown type {}'.format(description['type']))
def encode(self, function_name, args):
warnings.warn('encode is deprecated, please use encode_function_call', DeprecationWarning)
return self.encode_function_call(function_name, args)
def decode(self, function_name, data):
warnings.warn('decode is deprecated, please use decode_function_result', DeprecationWarning)
return self.decode_function_result(function_name, data)
def encode_function_call(self, function_name, args):
""" Return the encoded function call.
Args:
function_name (str): One of the existing functions described in the
contract interface.
args (List[object]): The function arguments that wll be encoded and
used in the contract execution in the vm.
Return:
bin: The encoded function name and arguments so that it can be used
with the evm to execute a funcion call, the binary string follows
the Ethereum Contract ABI.
"""
if function_name not in self.function_data:
raise ValueError('Unkown function {}'.format(function_name))
description = self.function_data[function_name]
function_selector = zpad(encode_int(description['prefix']), 4)
arguments = encode_abi(description['encode_types'], args)
return function_selector + arguments
def decode_function_result(self, function_name, data):
""" Return the function call result decoded.
Args:
function_name (str): One of the existing functions described in the
contract interface.
data (bin): The encoded result from calling `function_name`.
Return:
List[object]: The values returned by the call to `function_name`.
"""
description = self.function_data[function_name]
arguments = decode_abi(description['decode_types'], data)
return arguments
def encode_constructor_arguments(self, args):
""" Return the encoded constructor call. """
if self.constructor_data is None:
raise ValueError("The contract interface didn't have a constructor")
return encode_abi(self.constructor_data['encode_types'], args)
def decode_event(self, log_topics, log_data):
""" Return a dictionary representation the log.
Note:
This function won't work with anonymous events.
Args:
log_topics (List[bin]): The log's indexed arguments.
log_data (bin): The encoded non-indexed arguments.
"""
# https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI#function-selector-and-argument-encoding
# topics[0]: keccak(EVENT_NAME+"("+EVENT_ARGS.map(canonical_type_of).join(",")+")")
# If the event is declared as anonymous the topics[0] is not generated;
if not len(log_topics) or log_topics[0] not in self.event_data:
raise ValueError('Unknow log type')
event_id_ = log_topics[0]
event = self.event_data[event_id_]
# data: abi_serialise(EVENT_NON_INDEXED_ARGS)
# EVENT_NON_INDEXED_ARGS is the series of EVENT_ARGS that are not
# indexed, abi_serialise is the ABI serialisation function used for
# returning a series of typed values from a function.
unindexed_types = [
type_
for type_, indexed in zip(event['types'], event['indexed'])
if not indexed
]
unindexed_args = decode_abi(unindexed_types, log_data)
# topics[n]: EVENT_INDEXED_ARGS[n - 1]
# EVENT_INDEXED_ARGS is the series of EVENT_ARGS that are indexed
indexed_count = 1 # skip topics[0]
result = {}
for name, type_, indexed in zip(event['names'], event['types'], event['indexed']):
if indexed:
topic_bytes = utils.zpad(
utils.encode_int(log_topics[indexed_count]),
32,
)
indexed_count += 1
value = decode_single(process_type(type_), topic_bytes)
else:
value = unindexed_args.pop(0)
result[name] = value
result['_event_type'] = utils.to_string(event['name'])
return result
def listen(self, log, noprint=True):
"""
Return a dictionary representation of the Log instance.
Note:
This function won't work with anonymous events.
Args:
log (processblock.Log): The Log instance that needs to be parsed.
noprint (bool): Flag to turn off priting of the decoded log instance.
"""
try:
result = self.decode_event(log.topics, log.data)
except ValueError:
return # api compatibility
if not noprint:
print(result)
return result
def process_type(typ):
# Crazy reg expression to separate out base type component (eg. uint),
# size (eg. 256, 128x128, none), array component (eg. [], [45], none)
regexp = '([a-z]*)([0-9]*x?[0-9]*)((\[[0-9]*\])*)'
base, sub, arr, _ = re.match(regexp, utils.to_string_for_regexp(typ)).groups()
arrlist = re.findall('\[[0-9]*\]', arr)
assert len(''.join(arrlist)) == len(arr), \
"Unknown characters found in array declaration"
# Check validity of string type
if base == 'string' or base == 'bytes':
assert re.match('^[0-9]*$', sub), \
"String type must have no suffix or numerical suffix"
assert not sub or int(sub) <= 32, \
"Maximum 32 bytes for fixed-length str or bytes"
# Check validity of integer type
elif base == 'uint' or base == 'int':
assert re.match('^[0-9]+$', sub), \
"Integer type must have numerical suffix"
assert 8 <= int(sub) <= 256, \
"Integer size out of bounds"
assert int(sub) % 8 == 0, \
"Integer size must be multiple of 8"
# Check validity of fixed type
elif base == 'ufixed' or base == 'fixed':
assert re.match('^[0-9]+x[0-9]+$', sub), \
"Real type must have suffix of form <high>x<low>, eg. 128x128"
high, low = [int(x) for x in sub.split('x')]
assert 8 <= (high + low) <= 256, \
"Real size out of bounds (max 32 bytes)"
assert high % 8 == 0 and low % 8 == 0, \
"Real high/low sizes must be multiples of 8"
# Check validity of hash type
elif base == 'hash':
assert re.match('^[0-9]+$', sub), \
"Hash type must have numerical suffix"
# Check validity of address type
elif base == 'address':
assert sub == '', "Address cannot have suffix"
return base, sub, [ast.literal_eval(x) for x in arrlist]
# Returns the static size of a type, or None if dynamic
def get_size(typ):
base, sub, arrlist = typ
if not len(arrlist):
if base in ('string', 'bytes') and not sub:
return None
return 32
if arrlist[-1] == []:
return None
o = get_size((base, sub, arrlist[:-1]))
if o is None:
return None
return arrlist[-1][0] * o
# Encodes a single value (static or dynamic)
def enc(typ, arg):
base, sub, arrlist = typ
type_size = get_size(typ)
if base in ('string', 'bytes') and not sub:
return encode_single(typ, arg)
# Encode dynamic-sized lists via the head/tail mechanism described in
# https://github.com/ethereum/wiki/wiki/Proposal-for-new-ABI-value-encoding
if type_size is None:
assert isinstance(arg, list), \
"Expecting a list argument"
subtyp = base, sub, arrlist[:-1]
subsize = get_size(subtyp)
myhead, mytail = b'', b''
if arrlist[-1] == []:
myhead += enc(INT256, len(arg))
else:
assert len(arg) == arrlist[-1][0], \
"Wrong array size: found %d, expecting %d" % \
(len(arg), arrlist[-1][0])
for i in range(len(arg)):
if subsize is None:
myhead += enc(INT256, 32 * len(arg) + len(mytail))
mytail += enc(subtyp, arg[i])
else:
myhead += enc(subtyp, arg[i])
return myhead + mytail
# Encode static-sized lists via sequential packing
else:
if arrlist == []:
return utils.to_string(encode_single(typ, arg))
else:
subtyp = base, sub, arrlist[:-1]
o = b''
for x in arg:
o += enc(subtyp, x)
return o
# Encodes multiple arguments using the head/tail mechanism
def encode_abi(types, args):
headsize = 0
proctypes = [process_type(typ) for typ in types]
sizes = [get_size(typ) for typ in proctypes]
for i, arg in enumerate(args):
if sizes[i] is None:
headsize += 32
else:
headsize += sizes[i]
myhead, mytail = b'', b''
for i, arg in enumerate(args):
if sizes[i] is None:
myhead += enc(INT256, headsize + len(mytail))
mytail += enc(proctypes[i], args[i])
else:
myhead += enc(proctypes[i], args[i])
return myhead + mytail
# Decodes a single base datum
def decode_single(typ, data):
base, sub, _ = typ
if base == 'address':
return encode_hex(data[12:])
elif base == 'hash':
return data[32 - int(sub):]
elif base == 'string' or base == 'bytes':
if len(sub):
return data[:int(sub)]
else:
l = big_endian_to_int(data[0:32])
return data[32:][:l]
elif base == 'uint':
return big_endian_to_int(data)
elif base == 'int':
o = big_endian_to_int(data)
return (o - 2 ** int(sub)) if o >= 2 ** (int(sub) - 1) else o
elif base == 'ufixed':
high, low = [int(x) for x in sub.split('x')]
return big_endian_to_int(data) * 1.0 // 2 ** low
elif base == 'fixed':
high, low = [int(x) for x in sub.split('x')]
o = big_endian_to_int(data)
i = (o - 2 ** (high + low)) if o >= 2 ** (high + low - 1) else o
return (i * 1.0 // 2 ** low)
elif base == 'bool':
return bool(int(encode_hex(data), 16))
# Decodes multiple arguments using the head/tail mechanism
def decode_abi(types, data):
# Process types
proctypes = [process_type(typ) for typ in types]
# Get sizes of everything
sizes = [get_size(typ) for typ in proctypes]
# Initialize array of outputs
outs = [None] * len(types)
# Initialize array of start positions
start_positions = [None] * len(types) + [len(data)]
# If a type is static, grab the data directly, otherwise record
# its start position
pos = 0
for i, typ in enumerate(types):
if sizes[i] is None:
start_positions[i] = big_endian_to_int(data[pos:pos + 32])
j = i - 1
while j >= 0 and start_positions[j] is None:
start_positions[j] = start_positions[i]
j -= 1
pos += 32
else:
outs[i] = data[pos:pos + sizes[i]]
pos += sizes[i]
# We add a start position equal to the length of the entire data
# for convenience.
j = len(types) - 1
while j >= 0 and start_positions[j] is None:
start_positions[j] = start_positions[len(types)]
j -= 1
assert pos <= len(data), "Not enough data for head"
# Grab the data for tail arguments using the start positions
# calculated above
for i, typ in enumerate(types):
if sizes[i] is None:
offset = start_positions[i]
next_offset = start_positions[i + 1]
outs[i] = data[offset:next_offset]
# Recursively decode them all
return [dec(proctypes[i], outs[i]) for i in range(len(outs))]
# Decode a single value (static or dynamic)
def dec(typ, arg):
base, sub, arrlist = typ
sz = get_size(typ)
# Dynamic-sized strings are encoded as <len(str)> + <str>
if base in ('string', 'bytes') and not sub:
L = big_endian_to_int(arg[:32])
assert len(arg[32:]) == ceil32(L), "Wrong data size for string/bytes object"
return arg[32:][:L]
# Dynamic-sized arrays
elif sz is None:
L = big_endian_to_int(arg[:32])
subtyp = base, sub, arrlist[:-1]
subsize = get_size(subtyp)
# If children are dynamic, use the head/tail mechanism. Fortunately,
# here the code is simpler since we do not have to worry about
# mixed dynamic and static children, as we do in the top-level multi-arg
# case
if subsize is None:
assert len(arg) >= 32 + 32 * L, "Not enough data for head"
start_positions = [big_endian_to_int(arg[32 + 32 * i: 64 + 32 * i])
for i in range(L)] + [len(arg)]
outs = [arg[start_positions[i]: start_positions[i + 1]]
for i in range(L)]
return [dec(subtyp, out) for out in outs]
# If children are static, then grab the data slice for each one and
# sequentially decode them manually
else:
return [dec(subtyp, arg[32 + subsize * i: 32 + subsize * (i + 1)])
for i in range(L)]
# Static-sized arrays: decode piece-by-piece
elif len(arrlist):
L = arrlist[-1][0]
subtyp = base, sub, arrlist[:-1]
subsize = get_size(subtyp)
return [dec(subtyp, arg[subsize * i:subsize * (i + 1)])
for i in range(L)]
else:
return decode_single(typ, arg)
| |
#!/usr/bin/env python
from datetime import datetime, timedelta
import logging
from optparse import OptionParser
import os
import re
from pheme.util.config import Config, configure_logging
from pheme.util.datefile import Datefile
from pheme.util.util import parseDate
from pheme.util.pg_access import AlchemyAccess, DirectAccess
from pheme.longitudinal.tables import Report
from pheme.webAPIclient.archive import document_find, document_store
from pheme.webAPIclient.transfer import PHINMS_client, Distribute_client
# Front end to generation of daily essence reports from database
usage = """ %prog [options] database date
Generates a daily report from the requested database for the requested
date. Includes the 24 hour period for that date. All admissions and
any subsequent updates for admissions from that date as best known at
current time. Also includes any updates to previous days admissions
that haven't previously been generated.
database - name of the database supporting the essence view.
date - YYYY-MM-DD or YYYYMMDD format for the date of the report.
Try `%prog --help` for more information.
"""
def strSansNone(item):
"""Don't want the 'None' string, return str(item) or empty"""
if item is None:
return ''
else:
return str(item)
def raiseValueError(message):
"""Callable used as default callback for error messages"""
raise ValueError(message)
class ReportCriteria(object):
"""Container to house common report criteria
Essentially a list of properties with minor intel for setting and
methods to fetch the criteria list for detailing decisions used in
generating a report.
"""
def __init__(self):
# Maintain a dictionary for all criteria
self._crit = dict()
# Some attributes shouldn't get reset after report_method is
# called. Maintain a trivial attr list to enforce
self._lock_attrs = ()
def credentials(self, user, password):
"""Set user/password credentials for property validation
The report criteria shouldn't depend on the db user, but some
of the properties require database query validation.
"""
self.user = user
self.password = password
@property
def error_callback(self):
"Registered error callback or function to raise ValueError"
if not hasattr(self, '_error_callback'):
return raiseValueError
else:
return self._error_callback
@error_callback.setter
def error_callback(self, func):
"""Provide a callback to report errors
It is expected a call to self.error_callback will halt
execution via a raised exception.
"""
if 'error_callback' in self._lock_attrs:
raise AttributeError("can't set attribute")
self._error_callback = func
@property
def reportable_region(self):
return self._crit.get('reportable_region')
@reportable_region.setter
def reportable_region(self, value):
if 'reportable_region' in self._lock_attrs:
raise AttributeError("can't set attribute")
self._crit['reportable_region'] = value
@property
def start_date(self):
return self._crit.get('start_date')
@start_date.setter
def start_date(self, value):
if 'start_date' in self._lock_attrs:
raise AttributeError("can't set attribute")
if isinstance(value, basestring):
value = parseDate(value)
self._crit['start_date'] = value
@property
def end_date(self):
return self._crit.get('end_date')
@end_date.setter
def end_date(self, value):
if 'end_date' in self._lock_attrs:
raise AttributeError("can't set attribute")
if isinstance(value, basestring):
value = parseDate(value)
self._crit['end_date'] = value
@property
def reportable_region(self):
return self._crit.get('reportable_region')
@reportable_region.setter
def reportable_region(self, value):
if 'reportable_region' in self._lock_attrs:
raise AttributeError("can't set attribute")
# Confirm the requested region is in the db.
if value:
connection = DirectAccess(database=self.database,
user=self.user,
password=self.password)
cursor = connection.raw_query("SELECT count(*) FROM "\
"internal_reportable_region "\
"WHERE region_name = '%s'" %
value)
if cursor.next()[0] < 1:
self.error_callback("%s region not found in "\
"internal_reportable_region table" %
value)
connection.close()
self._crit['reportable_region'] = value
@property
def include_vitals(self):
return self._crit.get('include_vitals')
@include_vitals.setter
def include_vitals(self, value):
if 'include_vitals' in self._lock_attrs:
raise AttributeError("can't set attribute")
self._crit['include_vitals'] = value
@property
def include_updates(self):
return self._crit.get('include_updates')
@include_updates.setter
def include_updates(self, value):
if 'include_updates' in self._lock_attrs:
raise AttributeError("can't set attribute")
self._crit['include_updates'] = value
@property
def database(self):
return self._crit.get('database')
@database.setter
def database(self, value):
if 'database' in self._lock_attrs:
raise AttributeError("can't set attribute")
self._crit['database'] = value
@property
def patient_class(self):
return self._crit.get('patient_class')
@patient_class.setter
def patient_class(self, value):
if 'patient_class' in self._lock_attrs:
raise AttributeError("can't set attribute")
if value and value not in ('E', 'I', 'O'):
self.error_callback("patient_class limited to one of [E,I,O]")
self._crit['patient_class'] = value
@property
def report_method(self):
"""Persisted in database report.report_method
Uniquely defines the report, used when including updates for
noting when last like report was run.
"""
# Look out for runtime changes, or errors where these
# are set after using the report_method(). The attributes
# used here should't change after this method is called, as
# that would be misleading.
self._lock_attrs = ('reportable_region',
'patient_class', 'include_vitals')
details = ['essence_report', GenerateReport.__version__]
for attr in self._lock_attrs:
# If the value is 'True', store the attr name for the sake
# of legibility
detail = strSansNone(getattr(self, attr))
if detail is True:
detail = attr
details.append(detail)
return ':'.join(details)
class GenerateReport(object):
""" Process options and generate the requested report. Optionally
persists the file to the filesystem, and uploads to the DOH sftp
server.
"""
__version__ = '0.2'
config = Config()
IGNORE_SITE = config.get('longitudinal', 'ignore_site', default='')
# Order matters, create a tuple of paired values (reportColumn,
# essenceColumn) - NB, the Diagnosis column is being bastardized.
# Previously there was an SQL function to do the subselect, but it
# ran way too slow. Now contains the foreign key to join w/ the
# diagnosis for the respective visit.
diagnosis_column_index = 7
patient_class_column_index = 11
columns = (('Hosp', 'hospital'),
('Reg Date', 'visit_date'),
('Time', 'visit_time'),
('Sex', 'gender'),
('Age', 'age'),
('Reason For Visit', 'chief_complaint'),
('Zip Code', 'zip'),
('Diagnosis', 'visit_pk'),
('Admit Status', 'gipse_disposition'),
('Medical Record No.', 'patient_id'),
('Visit Record No.', 'visit_id'),
('Service Area', 'patient_class'),)
assert(columns[diagnosis_column_index][1] == 'visit_pk')
assert(columns[patient_class_column_index][1] == 'patient_class')
def __init__(self, user=None, password=None, report_criteria=None,
datefile=None):
"""Initialize report generation.
:param user: database user
:param password: database password
:param report_criteria: ReportCriteria defining specifics
:param datefile: useful for persistent walks through time
"""
self.user = user
self.password = password
self.criteria = report_criteria
self.database = self.criteria.database
if datefile:
assert((self.criteria.start_date, self.criteria.end_date)
== datefile.get_date_range())
self.datePersistence = datefile
self._diags = {}
self._prepare_output_file()
self._prepare_columns()
self._set_transport()
def _prepare_columns(self):
# Don't include the patient_class column if splitting out by
# patient_class
if self.criteria.patient_class:
len_b4 = len(self.columns)
self.columns =\
self.columns[:self.patient_class_column_index] \
+ self.columns[self.patient_class_column_index + 1:]
assert(len(self.columns) + 1 == len_b4)
def _set_transport(self):
"""Plug in the appropriate transport mechanism"""
# Transport strategies differ for the different reports
if self.criteria.reportable_region:
self._transport = Distribute_client(zip_first=True)
else:
self._transport = PHINMS_client(zip_first=True)
def _generate_output_filename(self, start_date=None,
end_date=None):
start_date = self.criteria.start_date if start_date is None\
else start_date
end_date = self.criteria.end_date if end_date is None else end_date
datestr = end_date.strftime('%Y%m%d')
if start_date != end_date:
datestr = '-'.join((start_date.strftime('%Y%m%d'),
end_date.strftime('%Y%m%d')))
filename = self.criteria.report_method + '-' + datestr + '.txt'
config = Config()
tmp_dir = config.get('general', 'tmp_dir', default='/tmp')
filepath = os.path.join(tmp_dir, filename)
return filepath
def _prepare_output_file(self):
"""Open the local filesystem file for output"""
filepath = self.\
_generate_output_filename(start_date=self.criteria.start_date,
end_date=self.criteria.end_date)
# watch for oversight errors; notify if like report exists -
# unless it's size zero (from a previous failed run)
if os.path.exists(filepath) and os.path.getsize(filepath):
logging.warning("Found requested report file already "\
"exists - overwriting: '%s'"\
% filepath)
self.output = open(filepath, 'w')
self._output_filename = self.output.name
@property
def output_filename(self):
if not hasattr(self, '_output_filename'):
raise RuntimeError("prerequisite call to "\
"_prepare_output_file() "\
"didn't happen!")
return self._output_filename
def _header(self):
if self.criteria.include_vitals:
columns = [c[0] for c in self.columns]
columns += ('Measured Temperature', 'O2 Saturation',
'Self-Reported Influenza Vaccine',
'Self-Reported H1N1 Vaccine')
return '|'.join(columns)
else:
return '|'.join([c[0] for c in self.columns])
def _build_join_tables(self):
""" Scope continues to grow, build all join tables necessary
for the query. Some are only necessary with certain features
on.
"""
# Always need the list of reportable visits
self._build_visit_join_table()
if self.criteria.include_vitals:
self._build_vitals_join_table()
def _build_visit_join_table(self):
""" Helper in selection of visits for the report - this method
builds a temporary table and populates it with the visit_pks
that belong in the report. This should include all visit_pks
with the matching admit_datetime as well as any that have
received updates since the last like report was produced.
"""
# If include_vitals is on, we also need the visit_id to keep
# the joins managable. vitals don't have a patient class, so
# you can't join on the same values.
sql = "CREATE TEMPORARY TABLE reportable_pks (pk "\
"integer not null unique)"
selectCols = "fact_visit.pk"
self._getConn()
self.access.raw_query(sql)
# If we're only selecting those facilites in a region, the SQL
# is more complicated - build up the respective clauses.
joinClause = regionClause = ""
if self.criteria.reportable_region:
joinClause = "JOIN internal_reportable_region ON "\
"internal_reportable_region.dim_facility_pk = "\
"fact_visit.dim_facility_pk"
regionClause = "AND region_name = '%s'" %\
self.criteria.reportable_region
# Another HACK! One site is not even wanted by the state DOH,
# as it's being duplicated from another source, and ESSENCE
# can't help but count them twice. Remove this one site
# regardless
else:
joinClause = "JOIN internal_reportable_region ON "\
"internal_reportable_region.dim_facility_pk = "\
"fact_visit.dim_facility_pk"
regionClause = "AND region_name = '%s'" % self.IGNORE_SITE
# Limit by patient_class if requested. Note we may still end
# up with visit ids that have changed patient classes, so more
# pruning later is necessary.
pc_limit = ""
if self.criteria.patient_class:
pc_limit = "AND patient_class = '%c'" %\
self.criteria.patient_class
# Start with all visits for the requested date range
sql = "INSERT INTO reportable_pks SELECT %s FROM "\
"fact_visit %s WHERE admit_datetime BETWEEN '%s' AND "\
"'%s' %s %s" %\
(selectCols, joinClause, self.criteria.start_date,
self.criteria.end_date + timedelta(days=1),
pc_limit, regionClause)
self.access.raw_query(sql)
if self.criteria.include_updates:
# In this case, add all visits with updates since the
# last run, but no newer than the requested date (in case
# we're building reports forward from historical data)
sql = "SELECT max(processed_datetime) FROM internal_report "\
"WHERE report_method = '%s'" % self.criteria.report_method
cursor = self.access.raw_query(sql)
last_report_generated = cursor.fetchall()[0][0]
if last_report_generated is None:
last_report_generated = '2009-01-01' # our epoch
logging.debug("including updates, last_report_generated: "\
"%s", last_report_generated)
sql = "INSERT INTO reportable_pks SELECT %(sel_cols)s FROM "\
"fact_visit %(join_clause)s LEFT JOIN reportable_pks ON "\
"reportable_pks.pk = fact_visit.pk WHERE "\
"last_updated > '%(last_report)s' AND admit_datetime "\
"< '%(date)s' AND reportable_pks.pk IS NULL "\
"%(pc_limit)s %(region_clause)s" %\
{'sel_cols': selectCols,
'last_report': last_report_generated,
'date': self.criteria.end_date + timedelta(days=1),
'pc_limit': pc_limit,
'join_clause': joinClause,
'region_clause': regionClause}
self.access.raw_query(sql)
cursor = self.access.raw_query("SELECT COUNT(*) FROM "\
"reportable_pks")
logging.debug("%d visits to report on", cursor.fetchall()[0][0])
def _build_vitals_join_table(self):
"""When report is to include vitals - we use an additional
temporary table (visit_loinc_data) to hold the data for more
timely queries.
Like the rest of the report, the list of interesting visits is
limited to the rows in the reportable_pks - see
_build_join_table() for details.
"""
raise ValueError('not ported yet')
sql = """
CREATE TEMPORARY TABLE visit_loinc_data (
visit_id VARCHAR(255) not null,
patient_class CHAR(1) default null,
observation_id VARCHAR(255) not null,
observation_result VARCHAR(255) not null)
"""
self._getConn()
self.access.raw_query(sql)
sql = """
INSERT INTO visit_loinc_data (visit_id, patient_class,
observation_id, observation_result) SELECT visit.visit_id,
visit.patient_class, observation_id,
observation_result FROM visit JOIN hl7_visit ON
visit.visit_id = hl7_visit.visit_id JOIN hl7_obx ON
hl7_visit.hl7_msh_id = hl7_obx.hl7_msh_id JOIN
reportable_pks ON reportable_pks.visit_id = visit.visit_id
AND reportable_pks.patient_class = visit.patient_class
WHERE
observation_id in ('8310-5', '20564-1', '46077-4',
'29544-4')
"""
self.access.raw_query(sql)
def _select_from_essence_view(self):
"""Build up the SQL select statement to be used in gathering
the data for this report.
"""
stmt = """SELECT %s FROM essence e JOIN reportable_pks ri
ON e.visit_pk = ri.pk""" %\
(','.join(['e.' + c[1] for c in self.columns]))
return stmt
def _select_diagnosis(self):
""" Need to pull in all the diagnosis data for this report.
This is saved in an instance dictionary for use in
self._diagnosis to generate the list of diagnoses for each
respective visit.
A list of unique diagnoses ordered by rank is required.
"""
# We order descending on dx_datetime as the most recent should
# be best. Add any others as the persistence mechanism only
# saves a unique icd9 dx that has changed status.
stmt = "SELECT fact_visit_pk, rank, icd9 "\
"FROM assoc_visit_dx JOIN "\
"dim_dx ON dim_dx_pk = dim_dx.pk JOIN "\
"reportable_pks ON "\
"assoc_visit_dx.fact_visit_pk = reportable_pks.pk "\
"ORDER BY dx_datetime DESC"
cursor = self.access.raw_query(stmt)
for row in cursor.fetchall():
visit_pk = row[0]
if visit_pk in self._diags:
self._diags[visit_pk].add(row[0], row[1], row[2])
else:
self._diags[visit_pk] = \
SortedDiagnosis(row[0], row[1], row[2])
def _diagnosis(self, visit_pk):
if visit_pk in self._diags:
return [self._diags[visit_pk].__repr__(), ]
else:
return ['', ]
def _select_vitals(self):
""" Need to pull in all the vitals data for this report.
This is saved in an instance dictionary for use in
self._vitals_for_visit to generate the list of vitals for each
respective visit.
This is an effective NOP when self.criteria.include_vitals = False
"""
if not self.criteria.include_vitals:
return None
self._vitals = {}
stmt = """SELECT reportable_pks.visit_pk,
observation_id, observation_result
FROM visit_loinc_data JOIN reportable_pks ON
reportable_pks.visit_id = visit_loinc_data.visit_id"""
cursor = self.access.raw_query(stmt)
for row in cursor.fetchall():
visit_pk = row[0]
if visit_pk in self._vitals:
self._vitals[visit_pk].add(row[1], row[2])
else:
self._vitals[visit_pk] = \
Vitals(row[1], row[2])
def _vitals_for_visit(self, visit_pk):
"""Returns the list of vitals for the visit in question.
This is an effective NOP when self.criteria.include_vitals = False
"""
if not self.criteria.include_vitals:
return []
if visit_pk in self._vitals:
return self._vitals[visit_pk].__repr__()
else:
return Vitals().__repr__()
def _write_report(self, save_report=False):
""" Write out and potentially store the results.
Generate results via database queries and write the results to
self.output.
:param save_report: If set, persist the document and related
metadata to the mbds archive.
returns the document ID, the mbds archive key, if saved
"""
out = self.output
print >> out, self._header()
self._build_join_tables()
self._select_diagnosis()
self._select_vitals()
cursor = self.access.raw_query(self._select_from_essence_view())
for row in cursor.fetchall():
# Each row is the colums up to the diagnosis + the
# comma separated diagnosis + the rest of the columns
# and finally with vitals if configured for such
visit_pk = row[self.diagnosis_column_index] # yuck, but true
print >> out,\
'|'.join([strSansNone(column) for column in
row[:self.diagnosis_column_index]] +
self._diagnosis(visit_pk) +
[strSansNone(column) for column in
row[self.diagnosis_column_index + 1:]] +
self._vitals_for_visit(visit_pk))
# Close the file and persist to the document archive if
# requested
self.output.close()
if save_report:
metadata = {k: v for k, v in self.criteria._crit.items() if v
is not None}
# At this point, all documents are of 'essence' type
return document_store(document=self.output.name,
allow_duplicate_filename=True,
document_type='essence', **metadata)
def _record_report(self, report_oid):
"""Record the details from this report generation in the db"""
if not report_oid:
return
report = Report(processed_datetime=datetime.now(),
file_path=report_oid,
report_method=self.criteria.report_method)
alchemy = AlchemyAccess(database=self.database)
alchemy.session.add(report)
alchemy.session.commit()
alchemy.disconnect()
def _transmit_report(self, report):
"""Transmit report using self._transport()"""
logging.info("initiate upload of %s", report)
self._transport.transfer_file(report)
def _transmit_differences(self, report):
"""Compute differences from yesterday's like report; transport"""
# This option really only makes sense on date range reports,
# as updates hit older data than just 'yesterday'.
if self.criteria.start_date == self.criteria.end_date:
raise ValueError("difference calculation not supported on "\
"single day reports")
# See if we can find a similar report in the archive from
# yesterday
search_criteria = {'report_method':
self.criteria.report_method,
'start_date': self.criteria.start_date -
timedelta(days=1), 'end_date':
self.criteria.end_date - timedelta(days=1)}
old_doc = document_find(search_criteria, limit=1)
if old_doc is None:
logging.info("No comparable report found for difference "\
"generation")
self._transmit_report(report)
else:
target_filename = self.\
_generate_output_filename(start_date=self.criteria.start_date,
end_date=self.criteria.end_date)
# RemoveDuplicates not yet ported!!
raise ValueError("RemoveDuplicates not ported")
#from pheme.essence.remove_duplicates import RemoveDuplicates
#rd = RemoveDuplicates(new_report=report,
# old_report=old_doc,
# out=target_filename)
#rd.generate_report()
#logging.info("initiate upload of difference %s", target_filename)
#self._transport.transfer_file(target_filename)
def _getConn(self):
""" Local wrapper to get database connection
"""
if hasattr(self, 'access'):
return
self.access = DirectAccess(database=self.database,
user=self.user,
password=self.password)
def _closeConn(self):
""" Local wrapper to close database connection
"""
if hasattr(self, 'access'):
self.access.close()
def tearDown(self):
"Public interface to clean up internals"
self._closeConn()
def execute(self, save_report=False, transmit_report=False,
transmit_differences=False):
"""Execute the report generation
"""
logging.info("Initiate ESSENCE report generation [%s-%s] for %s",
self.criteria.start_date,
self.criteria.end_date,
self.criteria.report_method)
self._getConn()
report_oid = self._write_report(save_report)
self._record_report(report_oid)
if transmit_report:
self._transmit_report(report_oid)
if transmit_differences:
self._transmit_differences(report_oid)
self._closeConn()
if hasattr(self, 'datePersistence'):
self.datePersistence.bump_date()
logging.info("Completed ESSENCE report generation [%s-%s] for %s",
self.criteria.start_date,
self.criteria.end_date,
self.criteria.report_method)
class SortedDiagnosis(object):
""" Special class unlikely to have use beyond report generation -
this is used to build up a list of diagnosis for a visit,
maintaining order. Capable of spitting it back out in the
format as required by essence.
Extending to hide duplicate diagnoses, where a unique diagnosis is
defined by (icd9). If a duplicate is added, it is ignored.
"""
def __init__(self, visit_pk, rank, icd9):
self.visit_pk = visit_pk
self.ordered_list = []
self.ordered_list.append({'rank': rank,
'icd9': icd9})
self._contains = set()
self._contains.add(self._gen_key(icd9=icd9))
def _gen_key(self, icd9):
return icd9.__hash__()
def add(self, visit_pk, rank, icd9):
assert(self.visit_pk == visit_pk)
key = self._gen_key(icd9=icd9)
if key in self._contains:
return
placed = False
for i, elem in enumerate(self.ordered_list):
if rank < elem['rank']:
self.ordered_list.insert(i, {'rank': rank,
'icd9': icd9})
placed = True
break
if not placed:
self.ordered_list.append({'rank': rank,
'icd9': icd9})
self._contains.add(key)
def __repr__(self):
"""Return space delimited string of ordered ICD9 codes"""
return ' '.join([e['icd9'] for e in self.ordered_list])
obx5_5_1 = re.compile("<OBX.5><OBX.5.1>(.*?)</OBX.5.1></OBX.5>")
class Vitals(object):
"""Another helper class (like SortedDiagnosis) unlikely to have a
utility beyond report creation.
Manages taking query results and creating a very lightweigh object
for the vitals currently of interest.
Expected use case has an instance of this class for every visit_pk
in the report where vitals were present.
"""
def __init__(self, observation_id=None, observation_result=None):
self.coded_data = {}
if observation_id and observation_result:
self.add(observation_id, observation_result)
def add(self, observation_id, observation_result):
if observation_id in self.coded_data:
# Duplicate handling means keep the first value seen
return
self.coded_data[observation_id] =\
self.stripXML(observation_result)
def stripXML(self, observation_result):
m = obx5_5_1.match(observation_result)
if m:
if ('</' in m.groups()[0]):
raise ValueError("Smarter XML parser needed for '%s'"
% observation_result)
return m.groups()[0]
return ''
def __repr__(self):
"""Returns list representation of all vitals given. The order
must match that in the header, namely:
columns += ('Measured Temperature', 'O2 Saturation',
'Self-Reported Influenza Vaccine',
'Self-Reported H1N1 Vaccine')
Those map directly to the loinc codes (aka observation_ids):
('8310-5', '20564-1', '46077-4', '29544-4')
Empty strings returned for any non existing values.
"""
return [self.coded_data.get(loinc, '') for loinc in
('8310-5', '20564-1', '46077-4', '29544-4')]
class ReportCommandLineInterface(object):
"""Command line interface to generating reports
Collects arguments and assembles classes needed to generate any
report.
"""
def __init__(self):
"""initializer for CLI"""
# All criteria used to uniquely define a report
self.criteria = ReportCriteria()
# Any additional attributes collected but not necessarily
# unique to recreating a like report
self.verbosity = 0
self.datefile = None
self.user = None
self._password = None
self.save_report = False
self.transmit_report = False
self.transmit_differences = False
@property
def password(self):
return self._password
@password.setter
def password(self, value):
"""Password may be plain text or a file containing it"""
# If the password argment is a readable file, fetch the
# password from within
if value and os.path.exists(value):
passwordFile = open(value, 'r')
value = passwordFile.readline().rstrip()
passwordFile.close()
self._password = value
def process_args(self):
"""Process any optional arguments and possitional parameters
Using the values provided, assemble ReportCriteria and
Datefile instances to control report generation.
"""
parser = OptionParser(usage=usage)
# Provide the ReportCriteria instance an error callback so any
# command line errors provoke the standard graceful exit with
# warning text.
self.criteria.error_callback = parser.error
parser.add_option("-u", "--user", dest="user",
default=self.user, help="database user")
parser.add_option("-p", "--password", dest="password",
default=self.password,
help="database password, or file containing "\
"just the password")
parser.add_option("-c", "--countdown", dest="countdown",
default=None,
help="count {down,up} the start and end dates "\
"set to 'forwards' or 'backwards' "\
"if desired")
parser.add_option("-i", "--include-updates",
action='store_true', dest="includeUpdates",
default=False, help="include "\
"visits updated since last similar report")
parser.add_option("--include-vitals",
action='store_true', dest="includeVitals",
default=False, help="include "\
"vitals (measured temperature, O2 "\
"saturation, influenza and H1N1 vaccine "\
"data) as additional columns in the "\
"report")
parser.add_option("-k", "--patient-class",
dest="patient_class",
default=None, help="use "\
"to filter report on a specific patient "\
"class [E,I,O]")
parser.add_option("-r", "--region", dest="region",
default=None,
help="reportable region defining limited set "\
"of facilities to include, by default "\
"all facilities are included")
parser.add_option("-s", "--save-and-upload",
action='store_true', dest="save_upload",
default=False, help="save file and upload to "\
"DOH")
parser.add_option("-x", "--save-without-upload",
action='store_true', dest="save_only",
default=False, help="save file but don't upload")
parser.add_option("-d", "--upload-diff",
action='store_true', dest="upload_diff",
default=False, help="upload differences only "\
"(from yesterdays like report) to DOH")
parser.add_option("-t", "--thirty-days",
action='store_true', dest="thirty_days",
default=False, help="include 30 days up to "\
"requested date ")
parser.add_option("-v", "--verbose", dest="verbosity",
action="count", default=self.verbosity,
help="increase output verbosity")
(options, args) = parser.parse_args()
if len(args) != 2:
parser.error("incorrect number of arguments")
# Database to query
self.criteria.database = args[0]
self.user = options.user
self.password = options.password
self.criteria.credentials(user=self.user,
password=self.password)
# Potential region restriction
self.criteria.reportable_region = options.region
# Potential patient class restriction
self.criteria.patient_class = options.patient_class
# Potential to include vitals (not tied to gipse format)
self.criteria.include_vitals = options.includeVitals
# Potential inclusion of updates
self.criteria.include_updates = options.includeUpdates
# Report date(s) and potential step direction.
# NB - several options affect report_method and must be set
# first!
initial_date = parseDate(args[1])
config = Config()
ps_file = os.path.join(config.get('general', 'tmp_dir',
default='/tmp'),
self.criteria.report_method)
step = options.thirty_days and 30 or None
direction = options.countdown
self.datefile = Datefile(initial_date=initial_date,
persistence_file=ps_file,
direction=direction,
step=step)
self.criteria.start_date, self.criteria.end_date =\
self.datefile.get_date_range()
# What to do once report is completed. Complicated, protect
# user from themselves!
self.save_report = options.save_upload or \
options.save_only or options.upload_diff
self.transmit_report = options.save_upload
self.transmit_differences = options.upload_diff
if options.save_only and options.save_upload:
parser.error("save-without-upload and save-and-upload "\
"are mutually exclusive")
if options.save_only and options.upload_diff:
parser.error("save-without-upload and upload-diff "\
"are mutually exclusive")
if options.upload_diff and options.save_upload:
parser.error("upload-diff and save-and-upload"\
"are mutually exclusive")
# Can't transmit w/o saving
if options.save_upload or options.upload_diff:
assert(self.save_report)
# Sanity check
if options.save_only:
assert(self.save_report and not self.transmit_report and
not self.transmit_differences)
# How verbosely to log
self.verbosity = options.verbosity
def execute(self):
"""Use the collected info to launch execution"""
configure_logging(verbosity=self.verbosity,
logfile="%s.log" % self.criteria.report_method)
gr = GenerateReport(user=self.user,
password=self.password,
report_criteria=self.criteria,
datefile=self.datefile)
gr.execute(save_report=self.save_report,
transmit_report=self.transmit_report,
transmit_differences=self.transmit_differences)
def main():
cli = ReportCommandLineInterface()
cli.process_args()
cli.execute()
if __name__ == "__main__":
main()
| |
# text_type is 'unicode' for py2 and 'str' for py3
from napalm_base.utils.py23_compat import text_type
alive = {
'is_alive': bool
}
facts = {
'os_version': text_type,
'uptime': int,
'interface_list': list,
'vendor': text_type,
'serial_number': text_type,
'model': text_type,
'hostname': text_type,
'fqdn': text_type
}
interface = {
'is_up': bool,
'is_enabled': bool,
'description': text_type,
'last_flapped': float,
'speed': int,
'mac_address': text_type,
}
lldp_neighbors = {
'hostname': text_type,
'port': text_type,
}
interface_counters = {
'tx_errors': int,
'rx_errors': int,
'tx_discards': int,
'rx_discards': int,
'tx_octets': int,
'rx_octets': int,
'tx_unicast_packets': int,
'rx_unicast_packets': int,
'tx_multicast_packets': int,
'rx_multicast_packets': int,
'tx_broadcast_packets': int,
'rx_broadcast_packets': int,
}
temperature = {
'is_alert': bool,
'is_critical': bool,
'temperature': float,
}
power = {
'status': bool,
'output': float,
'capacity': float
}
memory = {
'used_ram': int,
'available_ram': int,
}
fan = {
'status': bool,
}
cpu = {
'%usage': float,
}
peer = {
'is_enabled': bool,
'uptime': int,
'remote_as': int,
'description': text_type,
'remote_id': text_type,
'local_as': int,
'is_up': bool,
'address_family': dict,
}
af = {
'sent_prefixes': int,
'accepted_prefixes': int,
'received_prefixes': int
}
lldp_neighbors_detail = {
'parent_interface': text_type,
'remote_port': text_type,
'remote_chassis_id': text_type,
'remote_port': text_type,
'remote_port_description': text_type,
'remote_system_name': text_type,
'remote_system_description': text_type,
'remote_system_capab': text_type,
'remote_system_enable_capab': text_type
}
bgp_config_group = {
'type': text_type,
'description': text_type,
'apply_groups': list,
'multihop_ttl': int,
'multipath': bool,
'local_address': text_type,
'local_as': int,
'remote_as': int,
'import_policy': text_type,
'export_policy': text_type,
'remove_private_as': bool,
'prefix_limit': dict,
'neighbors': dict
}
bgp_config_neighbor = {
'description': text_type,
'import_policy': text_type,
'export_policy': text_type,
'local_address': text_type,
'authentication_key': text_type,
'nhs': bool,
'route_reflector_client': bool,
'local_as': int,
'remote_as': int,
'prefix_limit': dict
}
peer_details = {
'up': bool,
'local_as': int,
'remote_as': int,
'router_id': text_type,
'local_address': text_type,
'routing_table': text_type,
'local_address_configured': bool,
'local_port': int,
'remote_address': text_type,
'remote_port': int,
'multihop': bool,
'multipath': bool,
'remove_private_as': bool,
'import_policy': text_type,
'export_policy': text_type,
'input_messages': int,
'output_messages': int,
'input_updates': int,
'output_updates': int,
'messages_queued_out': int,
'connection_state': text_type,
'previous_connection_state': text_type,
'last_event': text_type,
'suppress_4byte_as': bool,
'local_as_prepend': bool,
'holdtime': int,
'configured_holdtime': int,
'keepalive': int,
'configured_keepalive': int,
'active_prefix_count': int,
'received_prefix_count': int,
'accepted_prefix_count': int,
'suppressed_prefix_count': int,
'advertised_prefix_count': int,
'flap_count': int
}
arp_table = {
'interface': text_type,
'mac': text_type,
'ip': text_type,
'age': float
}
ntp_peer = {
# will populate it in the future wit potential keys
}
ntp_server = {
# will populate it in the future wit potential keys
}
ntp_stats = {
'remote': text_type,
'referenceid': text_type,
'synchronized': bool,
'stratum': int,
'type': text_type,
'when': text_type,
'hostpoll': int,
'reachability': int,
'delay': float,
'offset': float,
'jitter': float
}
interfaces_ip = {
'prefix_length': int,
'primary_key': bool
}
mac_address_table = {
'mac': text_type,
'interface': text_type,
'vlan': int,
'static': bool,
'active': bool,
'moves': int,
'last_move': float
}
route = {
'protocol': text_type,
'current_active': bool,
'last_active': bool,
'age': int,
'next_hop': text_type,
'outgoing_interface': text_type,
'selected_next_hop': bool,
'preference': int,
'inactive_reason': text_type,
'routing_table': text_type,
'protocol_attributes': dict
}
snmp = {
'chassis_id': text_type,
'community': dict,
'contact': text_type,
'location': text_type
}
snmp_community = {
'acl': text_type,
'mode': text_type,
}
probe_test = {
'probe_type': text_type,
'target': text_type,
'source': text_type,
'probe_count': int,
'test_interval': int
}
probe_test_results = {
'target': text_type,
'source': text_type,
'probe_type': text_type,
'probe_count': int,
'rtt': float,
'round_trip_jitter': float,
'last_test_loss': int,
'current_test_min_delay': float,
'current_test_max_delay': float,
'current_test_avg_delay': float,
'last_test_min_delay': float,
'last_test_max_delay': float,
'last_test_avg_delay': float,
'global_test_min_delay': float,
'global_test_max_delay': float,
'global_test_avg_delay': float
}
ping = {
'probes_sent': int,
'packet_loss': int,
'rtt_min': float,
'rtt_max': float,
'rtt_avg': float,
'rtt_stddev': float,
'results': list
}
ping_result = {
'ip_address': text_type,
'rtt': float
}
traceroute = {
'rtt': float,
'ip_address': text_type,
'host_name': text_type
}
users = {
'level': int,
'password': text_type,
'sshkeys': list
}
optics_state = {
'instant': float,
'avg': float,
'min': float,
'max': float
}
config = {
'running': text_type,
'startup': text_type,
'candidate': text_type,
}
network_instance = {
'name': text_type,
'type': text_type,
'state': dict,
'interfaces': dict,
}
network_instance_state = {
'route_distinguisher': text_type,
}
network_instance_interfaces = {
'interface': dict,
}
firewall_policies = {
'position': int,
'packet_hits': int,
'byte_hits': int,
'id': text_type,
'enabled': bool,
'schedule': text_type,
'log': text_type,
'l3_src': text_type,
'l3_dst': text_type,
'service': text_type,
'src_zone': text_type,
'dst_zone': text_type,
'action': text_type
}
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from neutron import manager
from neutron.openstack.common import lockutils # noqa
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from gbpservice.neutron.db.grouppolicy import group_policy_mapping_db as gpdb
from gbpservice.neutron.services.grouppolicy.common import constants as g_const
from gbpservice.neutron.services.grouppolicy.common import exceptions as gpexc
from gbpservice.neutron.services.grouppolicy.drivers import (
resource_mapping as api)
from gbpservice.neutron.services.grouppolicy.drivers.odl import odl_manager
LOG = logging.getLogger(__name__)
class ExternalSegmentNotSupportedOnOdlDriver(gpexc.GroupPolicyBadRequest):
message = _("External Segment currently not supported on ODL GBP "
"driver.")
class UpdateL3PolicyNotSupportedOnOdlDriver(gpexc.GroupPolicyBadRequest):
message = _("Update L3 Policy currently not supported on ODL GBP "
"driver.")
class UpdateL2PolicyNotSupportedOnOdlDriver(gpexc.GroupPolicyBadRequest):
message = _("Update L2 Policy currently not supported on ODL GBP "
"driver.")
class UpdatePTNotSupportedOnOdlDriver(gpexc.GroupPolicyBadRequest):
message = _("Update Policy Target currently not supported on ODL GBP "
"driver.")
class UpdatePTGNotSupportedOnOdlDriver(gpexc.GroupPolicyBadRequest):
message = _("Update Policy Target Group currently not supported on ODL "
"GBP driver.")
class L2PolicyMultiplePolicyTargetGroupNotSupportedOnOdlDriver(
gpexc.GroupPolicyBadRequest):
message = _("An L2 policy can't have multiple policy target groups on "
"ODL GBP driver.")
class UpdatePolicyActionNotSupportedOnOdlDriver(gpexc.GroupPolicyBadRequest):
message = _("Update Policy Action currently not supported on ODL GBP "
"driver.")
class RedirectActionNotSupportedOnOdlDriver(gpexc.GroupPolicyBadRequest):
message = _("Redirect action is currently not supported for ODL GBP "
"driver.")
class OnlyAllowActionSupportedOnOdlDriver(gpexc.GroupPolicyBadRequest):
message = _("Currently only allow action is supported for ODL GBP "
"driver.")
class UpdateClassifierNotSupportedOnOdlDriver(gpexc.GroupPolicyBadRequest):
message = _("Update Policy Classifier currently not supported on ODL GBP "
"driver.")
class PolicyRuleUpdateNotSupportedOnOdlDriver(gpexc.GroupPolicyBadRequest):
message = _("Policy rule update is not supported on for ODL GBP"
"driver.")
class ExactlyOneActionPerRuleIsSupportedOnOdlDriver(
gpexc.GroupPolicyBadRequest):
message = _("Exactly one action per rule is supported on ODL GBP driver.")
class ClassifierTcpUdpPortRangeNotSupportedOnOdlDriver(
gpexc.GroupPolicyBadRequest):
message = _("Tcp or Udp port range is not supported on ODL GBP driver.")
class ClassifierUnknownIPProtocolNotSupportedOnOdlDriver(
gpexc.GroupPolicyBadRequest):
message = _("Unknown IP Protocol is not supported on ODL GBP driver.")
class OdlMappingDriver(api.ResourceMappingDriver):
"""ODL Mapping driver for Group Policy plugin.
This driver implements group policy semantics by mapping group
policy resources to various other neutron resources, and leverages
ODL backend for enforcing the policies.
"""
me = None
manager = None
@staticmethod
def get_odl_manager():
if not OdlMappingDriver.manager:
OdlMappingDriver.manager = odl_manager.OdlManager()
return OdlMappingDriver.manager
def initialize(self):
super(OdlMappingDriver, self).initialize()
self.odl_manager = OdlMappingDriver.get_odl_manager()
self._gbp_plugin = None
OdlMappingDriver.me = self
@property
def gbp_plugin(self):
if not self._gbp_plugin:
self._gbp_plugin = (manager.NeutronManager.get_service_plugins()
.get("GROUP_POLICY"))
return self._gbp_plugin
@staticmethod
def get_initialized_instance():
return OdlMappingDriver.me
def create_dhcp_policy_target_if_needed(self, plugin_context, port):
session = plugin_context.session
if (self._port_is_owned(session, port['id'])):
# Nothing to do
return
# Retrieve PTG
# TODO(ywu): optimize later
subnets = self._core_plugin._get_subnets_by_network(
plugin_context, port['network_id']
)
ptg = (plugin_context.session.query(gpdb.PolicyTargetGroupMapping).
join(gpdb.PolicyTargetGroupMapping.subnets).
filter(gpdb.PTGToSubnetAssociation.subnet_id ==
subnets[0]['id']).
first())
# Create PolicyTarget
attrs = {'policy_target':
{'tenant_id': port['tenant_id'],
'name': 'dhcp-%s' % ptg['id'],
'description': ("Implicitly created DHCP policy "
"target"),
'policy_target_group_id': ptg['id'],
'port_id': port['id']}}
self.gbp_plugin.create_policy_target(plugin_context, attrs)
# TODO(ODL): security group is not required
# sg_id = self._ensure_default_security_group(plugin_context,
# port['tenant_id'])
# data = {'port': {'security_groups': [sg_id]}}
# self._core_plugin.update_port(plugin_context, port['id'], data)
def create_external_segment_precommit(self, context):
raise ExternalSegmentNotSupportedOnOdlDriver()
def update_external_segment_precommit(self, context):
raise ExternalSegmentNotSupportedOnOdlDriver()
def delete_external_segment_precommit(self, context):
raise ExternalSegmentNotSupportedOnOdlDriver()
def create_external_policy_precommit(self, context):
raise ExternalSegmentNotSupportedOnOdlDriver()
def update_external_policy_precommit(self, context):
raise ExternalSegmentNotSupportedOnOdlDriver()
def delete_external_policy_precommit(self, context):
raise ExternalSegmentNotSupportedOnOdlDriver()
def create_nat_pool_precommit(self, context):
raise ExternalSegmentNotSupportedOnOdlDriver()
def update_nat_pool_precommit(self, context):
raise ExternalSegmentNotSupportedOnOdlDriver()
def delete_nat_pool_precommit(self, context):
raise ExternalSegmentNotSupportedOnOdlDriver()
def create_policy_target_postcommit(self, context):
super(OdlMappingDriver, self).create_policy_target_postcommit(context)
pt = self._get_pt_detail(context)
ep = {
"endpoint-group": pt['ptg_id'],
"l2-context": pt['l2ctx_id'],
"l3-address": pt['l3_list'],
"mac-address": pt['mac_address'],
"port-name": pt['neutron_port_id'],
"tenant": pt['tenant_id']
}
self.odl_manager.register_endpoints([ep])
def update_policy_target_precommit(self, context):
raise UpdatePTNotSupportedOnOdlDriver()
def delete_policy_target_postcommit(self, context):
pt = self._get_pt_detail(context)
ep = {
"l2": pt['l2_list'],
"l3": pt['l3_list']
}
self.odl_manager.unregister_endpoints([ep])
# Delete Neutron's port
super(OdlMappingDriver, self).delete_policy_target_postcommit(context)
def create_l3_policy_postcommit(self, context):
tenant_id = uuid.UUID(context.current['tenant_id']).urn[9:]
l3ctx = {
"id": context.current['id'],
"name": context.current['name'],
"description": context.current['description']
}
self.odl_manager.create_update_l3_context(tenant_id, l3ctx)
def update_l3_policy_precommit(self, context):
raise UpdateL3PolicyNotSupportedOnOdlDriver()
def delete_l3_policy_postcommit(self, context):
tenant_id = uuid.UUID(context.current['tenant_id']).urn[9:]
l3ctx = {
"id": context.current['id']
}
self.odl_manager.delete_l3_context(tenant_id, l3ctx)
def create_l2_policy_postcommit(self, context):
super(OdlMappingDriver, self).create_l2_policy_postcommit(context)
tenant_id = uuid.UUID(context.current['tenant_id']).urn[9:]
# l2_policy mapped to l2_bridge_domain in ODL
l2bd = {
"id": context.current['id'],
"name": context.current['name'],
"description": context.current['description'],
"parent": context.current['l3_policy_id']
}
self.odl_manager.create_update_l2_bridge_domain(tenant_id, l2bd)
# Implicit network within l2 policy mapped to l2 FD in ODL
net_id = context.current['network_id']
network = self._core_plugin.get_network(context._plugin_context,
net_id)
l2fd = {
"id": net_id,
"name": network['name'],
"parent": context.current['id']
}
self.odl_manager.create_update_l2_flood_domain(tenant_id, l2fd)
def update_l2_policy_precommit(self, context):
raise UpdateL2PolicyNotSupportedOnOdlDriver()
def delete_l2_policy_postcommit(self, context):
super(OdlMappingDriver, self).delete_l2_policy_postcommit(context)
tenant_id = uuid.UUID(context.current['tenant_id']).urn[9:]
# l2_policy mapped to l2_bridge_domain in ODL
l2bd = {
"id": context.current['id']
}
self.odl_manager.delete_l2_bridge_domain(tenant_id, l2bd)
# Implicit network within l2 policy mapped to l2 FD in ODL
net_id = context.current['network_id']
l2fd = {
"id": net_id,
}
self.odl_manager.delete_l2_flood_domain(tenant_id, l2fd)
def create_policy_target_group_postcommit(self, context):
super(OdlMappingDriver, self).create_policy_target_group_postcommit(
context)
tenant_id = uuid.UUID(context.current['tenant_id']).urn[9:]
subnets = context.current['subnets']
provided_contract = self._make_odl_contract_and_clause(
context, context.current['provided_policy_rule_sets']
)
consumed_contract = self._make_odl_contract_and_clause(
context, context.current['consumed_policy_rule_sets']
)
# PTG mapped to EPG in ODL
# TODO(ODL); add back description field after PoC
epg = {
"id": context.current['id'],
"name": context.current['name'],
"network-domain": subnets[0]
}
if provided_contract:
epg['provider-named-selector'] = {
"name": 'Contract-' + provided_contract['id'],
"contract": provided_contract['id']
}
self.odl_manager.create_update_contract(tenant_id,
provided_contract)
if consumed_contract:
epg['consumer-named-selector'] = {
"name": 'Contract-' + consumed_contract['id'],
"contract": consumed_contract['id']
}
self.odl_manager.create_update_contract(tenant_id,
consumed_contract)
self.odl_manager.create_update_endpoint_group(tenant_id, epg)
# Implicit subnet within policy target group mapped to subnet in ODL
for subnet_id in subnets:
neutron_subnet = self._core_plugin.get_subnet(
context._plugin_context, subnet_id
)
odl_subnet = {
"id": subnet_id,
"ip-prefix": neutron_subnet['cidr'],
"parent": neutron_subnet['network_id'],
"virtual-router-ip": neutron_subnet['gateway_ip']
}
self.odl_manager.create_update_subnet(tenant_id, odl_subnet)
def _make_odl_contract_and_clause(self, context, rule_sets):
# As no contract/clause in O.S., they will be generated dynamically
# when rule sets are associated with PTG:
# 1. an association is mapped to a contract with single clause
# 2. rule sets mapped to subjects
# 3. clause name is concatenation of sorted subject names
# 4. contract ID is generated based on the clause name
# 5. As a combination of same rule sets produce same sorted subject
# names, consistent clause name and contract ID are guaranteed
contract = None
if rule_sets:
subjects = []
subject_names = []
for rule_set_id in rule_sets:
# a subject is mapped to a rule set
subject = self._make_subject(context, rule_set_id)
subjects.append(subject)
subject_names.append(subject['name'])
clause_name = "-".join(sorted(subject_names)).encode('ascii',
'ignore')
contract_id = uuid.uuid3(uuid.NAMESPACE_DNS, clause_name).urn[9:]
clauses = [
{
"name": clause_name,
"subject-refs": subject_names
}
]
contract = {
"id": contract_id,
"clause": clauses,
"subject": subjects
}
return contract
def _make_subject(self, context, rule_set_id):
rule_set = context._plugin.get_policy_rule_set(
context._plugin_context, rule_set_id
)
rules = []
for rule_id in rule_set['policy_rules']:
rule = self._make_odl_rule(context, rule_id)
rules.append(rule)
return {
"name": rule_set['name'],
"rule": rules
}
def _make_odl_rule(self, context, rule_id):
rule = context._plugin.get_policy_rule(
context._plugin_context, rule_id
)
stack_classifier = context._plugin.get_policy_classifier(
context._plugin_context, rule['policy_classifier_id']
)
# while openstack supports only one classifier per rule, the classifier
# may mapped to multi classifier in ODL
classifier_refs = []
classifiers = self._make_odl_classifiers(stack_classifier)
for classifier in classifiers:
classifier_ref = {
"name": classifier['name']
}
if classifier['direction'] != "bidirectional":
classifier_ref['direction'] = classifier['direction']
classifier_refs.append(classifier_ref)
action_refs = []
for action_id in rule['policy_actions']:
action = context._plugin.get_policy_action(
context._plugin_context, action_id
)
action_refs.append(
{
"name": action['name']
}
)
# TODO(ODL): send action_refs later but not for PoC
return {
"name": rule['name'],
"classifier-ref": classifier_refs,
}
def update_policy_target_group_precommit(self, context):
raise UpdatePTGNotSupportedOnOdlDriver()
def delete_policy_target_group_postcommit(self, context):
# TODO(ODL): delete contract if no one uses it
tenant_id = uuid.UUID(context.current['tenant_id']).urn[9:]
subnets = context.current['subnets']
# delete mapped subnets in ODL, and clean them up from neutron
for subnet_id in subnets:
self._cleanup_subnet(context._plugin_context, subnet_id, None)
odl_subnet = {
"id": subnet_id
}
self.odl_manager.delete_subnet(tenant_id, odl_subnet)
# delete mapped EPG in ODL
epg = {
"id": context.current['id'],
}
self.odl_manager.delete_endpoint_group(tenant_id, epg)
def create_policy_action_precommit(self, context):
# TODO(odl): allow redirect for service chaining
if context.current['action_type'] == g_const.GP_ACTION_REDIRECT:
raise RedirectActionNotSupportedOnOdlDriver()
def create_policy_action_postcommit(self, context):
super(OdlMappingDriver, self).create_policy_action_postcommit(context)
# TODO(ODL): remove comment out after PoC
# tenant_id = uuid.UUID(context.current['tenant_id']).urn[9:]
# fill in action instance data
if context.current['action_type'] == g_const.GP_ACTION_ALLOW:
# TODO(ODL): remove the return and comment out after POC
return
# action_definition_id = "f942e8fd-e957-42b7-bd18-f73d11266d17"
# action_instance = {
# "action-definition-id": action_definition_id,
# "name": context.current['name'],
# "parameter-value": [
# {
# "name": context.current['name'],
# "string-value": context.current['action_type'],
# }
# ]
# }
# self.odl_manager.create_action(tenant_id, action_instance)
else:
raise OnlyAllowActionSupportedOnOdlDriver()
def update_policy_action_precommit(self, context):
raise UpdatePolicyActionNotSupportedOnOdlDriver()
def delete_policy_action_postcommit(self, context):
super(OdlMappingDriver, self).delete_policy_action_postcommit(context)
# TODO(ODL): remove comment out after PoC
# tenant_id = uuid.UUID(context.current['tenant_id']).urn[9:]
#
# # fill in action instance data
# action_instance = {
# "name": context.current['name']
# }
# self.odl_manager.delete_action(tenant_id, action_instance)
def create_policy_classifier_postcommit(self, context):
tenant_id = uuid.UUID(context.current['tenant_id']).urn[9:]
classifiers = self._make_odl_classifiers(context.current)
for classifier in classifiers:
classifier_instance = {
"classifier-definition-id":
classifier['classifier-definition-id'],
"name": classifier['name'],
"parameter-value": classifier['parameter-value']
}
self.odl_manager.create_classifier(tenant_id, classifier_instance)
def _make_odl_classifiers(self, stack_classifier):
classifiers = []
if stack_classifier['protocol'] == constants.ICMP:
direction = stack_classifier['direction']
if direction == 'bi':
direction = "bidirectional"
classifier = {
# Use hard coded value based on current ODL implementation
"classifier-definition-id":
'79c6fdb2-1e1a-4832-af57-c65baf5c2335',
"name": stack_classifier['name'],
"parameter-value": [
{
"name": "proto",
# TODO(yapeng): change the hard code value
"int-value": 1,
}
],
"direction": direction
}
classifiers.append(classifier)
else:
# For TCP and UDP protoocol create two classifier (in and out)
for port in ['sourceport', 'destport']:
if stack_classifier['direction'] == 'in':
if port == 'destport':
direction = 'in'
else:
direction = 'out'
elif stack_classifier['direction'] == 'out':
if port == 'destport':
direction = 'out'
else:
direction = 'in'
else:
direction = 'bidirectional'
classifier = {
# Use hard coded value based on current ODL implementation
"classifier-definition-id":
'4250ab32-e8b8-445a-aebb-e1bd2cdd291f',
"direction": direction,
"name": stack_classifier['name'] + '-' + port,
"parameter-value": [
{
"name": "type",
"string-value": stack_classifier['protocol'],
},
{
"name": port,
"int-value": stack_classifier['port_range'],
}
]
}
classifiers.append(classifier)
return classifiers
def update_policy_classifier_precommit(self, context):
raise UpdateClassifierNotSupportedOnOdlDriver()
def delete_policy_classifier_postcommit(self, context):
tenant_id = uuid.UUID(context.current['tenant_id']).urn[9:]
if context.current['protocol'] == constants.ICMP:
# fill in classifier instance data
classifier_instance = {
"name": context.current['name']
}
self.odl_manager.delete_classifier(tenant_id, classifier_instance)
return
# fill in classifier instance data
for port in ['sourceport', 'destport']:
classifier_instance = {
"name": context.current['name'] + '-' + port,
}
self.odl_manager.delete_classifier(tenant_id, classifier_instance)
def create_policy_rule_precommit(self, context):
if ('policy_actions' in context.current and
len(context.current['policy_actions']) != 1):
# TODO(odl): to be fixed when redirect is supported
raise ExactlyOneActionPerRuleIsSupportedOnOdlDriver()
def update_policy_rule_precommit(self, context):
# TODO(ivar): add support for action update on policy rules
raise PolicyRuleUpdateNotSupportedOnOdlDriver()
def _get_pt_detail(self, context):
port_id = context.current['port_id']
port = self._core_plugin.get_port(context._plugin_context, port_id)
tenant_id = uuid.UUID(context.current['tenant_id']).urn[9:]
ptg_id = context.current['policy_target_group_id']
ptg = self.gbp_plugin.get_policy_target_group(context._plugin_context,
ptg_id)
l2ctx_id = ptg['l2_policy_id']
l2ctx = self.gbp_plugin.get_l2_policy(context._plugin_context,
l2ctx_id)
l3ctx_id = l2ctx['l3_policy_id']
mac_address = port['mac_address']
neutron_port_id = 'tap' + port_id[:11]
l3_list = []
for fixed_ip in port['fixed_ips']:
l3_list.append(
{
"ip-address": fixed_ip['ip_address'],
"l3-context": l3ctx_id
}
)
l2_list = [
{
"l2-context": l2ctx_id,
"mac-address": mac_address
}
]
return {
"port_id": port_id,
"tenant_id": tenant_id,
"ptg_id": ptg_id,
"l2ctx_id": l2ctx_id,
"l3ctx_id": l3ctx_id,
"mac_address": mac_address,
"neutron_port_id": neutron_port_id,
"l3_list": l3_list,
"l2_list": l2_list,
}
| |
import asyncio
import logging
import uuid
import os
from functools import partial
from typing import Any, List, Optional, Text, Union, Dict
import rasa.core.utils
from rasa.shared.exceptions import RasaException
import rasa.shared.utils.common
import rasa.utils
import rasa.utils.common
import rasa.utils.io
from rasa import server, telemetry
from rasa.constants import ENV_SANIC_BACKLOG
from rasa.core import agent, channels, constants
from rasa.core.agent import Agent
from rasa.core.channels import console
from rasa.core.channels.channel import InputChannel
from rasa.core.utils import AvailableEndpoints
import rasa.shared.utils.io
from sanic import Sanic
from asyncio import AbstractEventLoop
logger = logging.getLogger() # get the root logger
def create_http_input_channels(
channel: Optional[Text], credentials_file: Optional[Text]
) -> List["InputChannel"]:
"""Instantiate the chosen input channel."""
if credentials_file:
all_credentials = rasa.shared.utils.io.read_config_file(credentials_file)
else:
all_credentials = {}
if channel:
if len(all_credentials) > 1:
logger.info(
"Connecting to channel '{}' which was specified by the "
"'--connector' argument. Any other channels will be ignored. "
"To connect to all given channels, omit the '--connector' "
"argument.".format(channel)
)
return [_create_single_channel(channel, all_credentials.get(channel))]
else:
return [_create_single_channel(c, k) for c, k in all_credentials.items()]
def _create_single_channel(channel: Text, credentials: Dict[Text, Any]) -> Any:
from rasa.core.channels import BUILTIN_CHANNELS
if channel in BUILTIN_CHANNELS:
return BUILTIN_CHANNELS[channel].from_credentials(credentials)
else:
# try to load channel based on class name
try:
input_channel_class = rasa.shared.utils.common.class_from_module_path(
channel
)
return input_channel_class.from_credentials(credentials)
except (AttributeError, ImportError):
raise RasaException(
f"Failed to find input channel class for '{channel}'. Unknown "
f"input channel. Check your credentials configuration to "
f"make sure the mentioned channel is not misspelled. "
f"If you are creating your own channel, make sure it "
f"is a proper name of a class in a module."
)
def _create_app_without_api(cors: Optional[Union[Text, List[Text]]] = None) -> Sanic:
app = Sanic(__name__, configure_logging=False)
server.add_root_route(app)
server.configure_cors(app, cors)
return app
def configure_app(
input_channels: Optional[List["InputChannel"]] = None,
cors: Optional[Union[Text, List[Text], None]] = None,
auth_token: Optional[Text] = None,
enable_api: bool = True,
response_timeout: int = constants.DEFAULT_RESPONSE_TIMEOUT,
jwt_secret: Optional[Text] = None,
jwt_method: Optional[Text] = None,
route: Optional[Text] = "/webhooks/",
port: int = constants.DEFAULT_SERVER_PORT,
endpoints: Optional[AvailableEndpoints] = None,
log_file: Optional[Text] = None,
conversation_id: Optional[Text] = uuid.uuid4().hex,
use_syslog: bool = False,
syslog_address: Optional[Text] = None,
syslog_port: Optional[int] = None,
syslog_protocol: Optional[Text] = None,
) -> Sanic:
"""Run the agent."""
rasa.core.utils.configure_file_logging(
logger, log_file, use_syslog, syslog_address, syslog_port, syslog_protocol,
)
if enable_api:
app = server.create_app(
cors_origins=cors,
auth_token=auth_token,
response_timeout=response_timeout,
jwt_secret=jwt_secret,
jwt_method=jwt_method,
endpoints=endpoints,
)
else:
app = _create_app_without_api(cors)
if input_channels:
channels.channel.register(input_channels, app, route=route)
else:
input_channels = []
if logger.isEnabledFor(logging.DEBUG):
rasa.core.utils.list_routes(app)
async def configure_async_logging() -> None:
if logger.isEnabledFor(logging.DEBUG):
rasa.utils.io.enable_async_loop_debugging(asyncio.get_event_loop())
app.add_task(configure_async_logging)
if "cmdline" in {c.name() for c in input_channels}:
async def run_cmdline_io(running_app: Sanic) -> None:
"""Small wrapper to shut down the server once cmd io is done."""
await asyncio.sleep(1) # allow server to start
await console.record_messages(
server_url=constants.DEFAULT_SERVER_FORMAT.format("http", port),
sender_id=conversation_id,
)
logger.info("Killing Sanic server now.")
running_app.stop() # kill the sanic server
app.add_task(run_cmdline_io)
return app
def serve_application(
model_path: Optional[Text] = None,
channel: Optional[Text] = None,
interface: Optional[Text] = constants.DEFAULT_SERVER_INTERFACE,
port: int = constants.DEFAULT_SERVER_PORT,
credentials: Optional[Text] = None,
cors: Optional[Union[Text, List[Text]]] = None,
auth_token: Optional[Text] = None,
enable_api: bool = True,
response_timeout: int = constants.DEFAULT_RESPONSE_TIMEOUT,
jwt_secret: Optional[Text] = None,
jwt_method: Optional[Text] = None,
endpoints: Optional[AvailableEndpoints] = None,
remote_storage: Optional[Text] = None,
log_file: Optional[Text] = None,
ssl_certificate: Optional[Text] = None,
ssl_keyfile: Optional[Text] = None,
ssl_ca_file: Optional[Text] = None,
ssl_password: Optional[Text] = None,
conversation_id: Optional[Text] = uuid.uuid4().hex,
use_syslog: Optional[bool] = False,
syslog_address: Optional[Text] = None,
syslog_port: Optional[int] = None,
syslog_protocol: Optional[Text] = None,
) -> None:
"""Run the API entrypoint."""
if not channel and not credentials:
channel = "cmdline"
input_channels = create_http_input_channels(channel, credentials)
app = configure_app(
input_channels,
cors,
auth_token,
enable_api,
response_timeout,
jwt_secret,
jwt_method,
port=port,
endpoints=endpoints,
log_file=log_file,
conversation_id=conversation_id,
use_syslog=use_syslog,
syslog_address=syslog_address,
syslog_port=syslog_port,
syslog_protocol=syslog_protocol,
)
ssl_context = server.create_ssl_context(
ssl_certificate, ssl_keyfile, ssl_ca_file, ssl_password
)
protocol = "https" if ssl_context else "http"
logger.info(f"Starting Rasa server on {protocol}://{interface}:{port}")
app.register_listener(
partial(load_agent_on_start, model_path, endpoints, remote_storage),
"before_server_start",
)
app.register_listener(close_resources, "after_server_stop")
number_of_workers = rasa.core.utils.number_of_sanic_workers(
endpoints.lock_store if endpoints else None
)
telemetry.track_server_start(
input_channels, endpoints, model_path, number_of_workers, enable_api
)
rasa.utils.common.update_sanic_log_level(
log_file, use_syslog, syslog_address, syslog_port, syslog_protocol,
)
app.run(
host=interface,
port=port,
ssl=ssl_context,
backlog=int(os.environ.get(ENV_SANIC_BACKLOG, "100")),
workers=number_of_workers,
)
# noinspection PyUnusedLocal
async def load_agent_on_start(
model_path: Text,
endpoints: AvailableEndpoints,
remote_storage: Optional[Text],
app: Sanic,
loop: AbstractEventLoop,
) -> Agent:
"""Load an agent.
Used to be scheduled on server start
(hence the `app` and `loop` arguments).
"""
app.agent = await agent.load_agent(
model_path=model_path,
remote_storage=remote_storage,
endpoints=endpoints,
loop=loop,
)
logger.info("Rasa server is up and running.")
return app.agent
async def close_resources(app: Sanic, _: AbstractEventLoop) -> None:
"""Gracefully closes resources when shutting down server.
Args:
app: The Sanic application.
_: The current Sanic worker event loop.
"""
current_agent = getattr(app, "agent", None)
if not current_agent:
logger.debug("No agent found when shutting down server.")
return
event_broker = current_agent.tracker_store.event_broker
if event_broker:
await event_broker.close()
| |
#!/usr/bin/env python3
# The MIT License
# Copyright (c) 2016 Estonian Information System Authority (RIA), Population Register Centre (VRK)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Test case for verifying that the operational monitoring related data of
# HTTP GET metadata requests are stored correctly by the operational monitoring daemon.
import os
import sys
import time
sys.path.append('..')
import python_common as common
def _expected_keys_and_values_of_wsdl_query_rec(
security_server_address, security_server_type):
return [
("clientMemberClass", "GOV"),
("clientMemberCode", "00000001"),
("clientSecurityServerAddress", "xtee9.ci.kit"),
("clientXRoadInstance", "XTEE-CI-XM"),
("messageProtocolVersion", "4.0"),
("requestAttachmentCount", 0),
("requestSoapSize", 1057),
("responseAttachmentCount", 1),
("responseMimeSize", 15800),
("responseSoapSize", 1255),
("securityServerInternalIp", security_server_address),
("securityServerType", security_server_type),
("serviceCode", "getWsdl"),
("serviceMemberClass", "GOV"),
("serviceMemberCode", "00000000"),
("serviceSecurityServerAddress", "xtee8.ci.kit"),
("serviceSubsystemCode", "Center"),
("serviceVersion", "v1"),
("serviceXRoadInstance", "XTEE-CI-XM"),
("succeeded", True),
]
def run(client_security_server_address, producer_security_server_address,
ssh_user, request_template_dir):
query_data_client_template_filename = os.path.join(
request_template_dir, "query_operational_data_client_ss_owner_template.xml")
query_data_producer_template_filename = os.path.join(
request_template_dir, "query_operational_data_producer_ss_owner_template.xml")
### Metadata and operational data requests and the relevant checks
client_timestamp_before_requests = common.get_remote_timestamp(
client_security_server_address, ssh_user)
print("\n---- Sending a verificationconf request to the client's security server ----\n")
response = common.make_get_request(
client_security_server_address + "/verificationconf")
common.check_status(response)
print("Received the following status code and response headers: \n")
common.print_response_status_and_headers(response)
print("\n---- Sending a listClients request to the client's security server ----\n")
response = common.make_get_request(
client_security_server_address + "/listClients")
common.check_status(response)
print("Received the following response: \n")
common.print_response_status_and_headers(response)
common.wait_for_operational_data()
client_timestamp_after_requests = common.get_remote_timestamp(
client_security_server_address, ssh_user)
producer_timestamp_after_requests = common.get_remote_timestamp(
producer_security_server_address, ssh_user)
# Now make an operational data request to the client's security server and check the
# response payload.
# We expect that neither of the requests sent above have been stored in the
# operational monitoring database.
print("\n---- Sending an operational data request to the client's security server ----\n")
message_id = common.generate_message_id()
print("Generated message ID %s for query data request" % (message_id, ))
request_contents = common.format_query_operational_data_request_template(
query_data_client_template_filename, message_id,
client_timestamp_before_requests, client_timestamp_after_requests)
print("Generated the following query data request for the client's security server: \n")
print(request_contents)
response = common.post_xml_request(
client_security_server_address, request_contents,
get_raw_stream=True)
mime_parts, raw_response = common.parse_multipart_response(response)
if mime_parts:
soap_part, record_count = common.get_multipart_soap_and_record_count(mime_parts[0])
common.print_multipart_soap_and_record_count(soap_part, record_count)
common.check_record_count(record_count, 0)
else:
common.parse_and_check_soap_response(raw_response)
# Wait a second to ensure that the previous operational data request is not included
# in the operational data that we request below.
time.sleep(1)
print("\n---- Sending a wsdl request to the client's security server ----\n")
client_timestamp_before_requests = common.get_remote_timestamp(
client_security_server_address, ssh_user)
producer_timestamp_before_requests = common.get_remote_timestamp(
producer_security_server_address, ssh_user)
response = common.make_get_request(
client_security_server_address + "/wsdl?xRoadInstance=" \
"XTEE-CI-XM&memberClass=GOV&memberCode=00000000&subsystemCode=" \
"Center&serviceCode=xroadGetRandom&version=v1")
common.check_status(response)
print("Received the following response: \n")
common.print_response_status_and_headers(response)
common.wait_for_operational_data()
client_timestamp_after_requests = common.get_remote_timestamp(
client_security_server_address, ssh_user)
producer_timestamp_after_requests = common.get_remote_timestamp(
producer_security_server_address, ssh_user)
# Now make operational data requests to both security servers and check the
# response payloads. We expect that the wsdl request has been stored in the
# operational monitoring database.
print("\n---- Sending an operational data request to the client's security server ----\n")
message_id = common.generate_message_id()
print("Generated message ID %s for query data request" % (message_id, ))
request_contents = common.format_query_operational_data_request_template(
query_data_client_template_filename, message_id,
client_timestamp_before_requests, client_timestamp_after_requests)
print("Generated the following query data request for the client's security server: \n")
print(request_contents)
response = common.post_xml_request(
client_security_server_address, request_contents,
get_raw_stream=True)
mime_parts, raw_response = common.parse_multipart_response(response)
if mime_parts:
soap_part, record_count = common.get_multipart_soap_and_record_count(mime_parts[0])
common.print_multipart_soap_and_record_count(soap_part, record_count)
json_payload = common.get_multipart_json_payload(mime_parts[1])
# Check the presence of all the required fields in at least one JSON structure.
common.assert_present_in_json(
json_payload, _expected_keys_and_values_of_wsdl_query_rec(
client_security_server_address, "Client"))
# Check if the timestamps in the response are in the expected range.
common.assert_expected_timestamp_values(
json_payload, client_timestamp_before_requests, client_timestamp_after_requests)
common.print_multipart_query_data_response(json_payload)
else:
common.parse_and_check_soap_response(raw_response)
print("\n---- Sending an operational data request to the producer's " \
"security server ----\n")
message_id = common.generate_message_id()
print("\nGenerated message ID %s for query data request" % (message_id, ))
request_contents = common.format_query_operational_data_request_template(
query_data_producer_template_filename, message_id,
producer_timestamp_before_requests, producer_timestamp_after_requests)
print("Generated the following query data request for the producer's " \
"security server: \n")
print(request_contents)
response = common.post_xml_request(
producer_security_server_address, request_contents,
get_raw_stream=True)
mime_parts, raw_response = common.parse_multipart_response(response)
if mime_parts:
soap_part, record_count = common.get_multipart_soap_and_record_count(mime_parts[0])
common.print_multipart_soap_and_record_count(soap_part, record_count, is_client=False)
json_payload = common.get_multipart_json_payload(mime_parts[1])
# Check the presence of all the required fields in at least one JSON structure.
common.assert_present_in_json(
json_payload, _expected_keys_and_values_of_wsdl_query_rec(
producer_security_server_address, "Producer"))
# Check timestamp values
common.assert_expected_timestamp_values(
json_payload,
producer_timestamp_before_requests, producer_timestamp_after_requests)
common.print_multipart_query_data_response(json_payload)
else:
common.parse_and_check_soap_response(raw_response)
| |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUPermissionsFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from .fetchers import NUCOSRemarkingPoliciesFetcher
from bambou import NURESTObject
class NUCOSRemarkingPolicyTable(NURESTObject):
""" Represents a COSRemarkingPolicyTable in the VSD
Notes:
Provides the definition of a table that holds multiple FC to Dot1p mappings . Used in Egress QoS policies.
"""
__rest_name__ = "cosremarkingpolicytable"
__resource_name__ = "cosremarkingpolicytables"
## Constants
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
""" Initializes a COSRemarkingPolicyTable instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> cosremarkingpolicytable = NUCOSRemarkingPolicyTable(id=u'xxxx-xxx-xxx-xxx', name=u'COSRemarkingPolicyTable')
>>> cosremarkingpolicytable = NUCOSRemarkingPolicyTable(data=my_dict)
"""
super(NUCOSRemarkingPolicyTable, self).__init__()
# Read/Write Attributes
self._name = None
self._last_updated_by = None
self._last_updated_date = None
self._description = None
self._embedded_metadata = None
self._entity_scope = None
self._creation_date = None
self._owner = None
self._external_id = None
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.cos_remarking_policies = NUCOSRemarkingPoliciesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def name(self):
""" Get name value.
Notes:
A unique name of the fc-dot1p mapping table.
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
A unique name of the fc-dot1p mapping table.
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def last_updated_date(self):
""" Get last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
return self._last_updated_date
@last_updated_date.setter
def last_updated_date(self, value):
""" Set last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
self._last_updated_date = value
@property
def description(self):
""" Get description value.
Notes:
A description of the fc-dot1p mapping table.
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
A description of the fc-dot1p mapping table.
"""
self._description = value
@property
def embedded_metadata(self):
""" Get embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
return self._embedded_metadata
@embedded_metadata.setter
def embedded_metadata(self, value):
""" Set embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
self._embedded_metadata = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def creation_date(self):
""" Get creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
return self._creation_date
@creation_date.setter
def creation_date(self, value):
""" Set creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
self._creation_date = value
@property
def owner(self):
""" Get owner value.
Notes:
Identifies the user that has created this object.
"""
return self._owner
@owner.setter
def owner(self, value):
""" Set owner value.
Notes:
Identifies the user that has created this object.
"""
self._owner = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
| |
# Copyright (c) 2006-2009 Sippy Software, Inc. All rights reserved.
#
# This file is part of SIPPY, a free RFC3261 SIP stack and B2BUA.
#
# SIPPY is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# For a license to use the SIPPY software under conditions
# other than those described here, or to purchase support for this
# software, please contact Sippy Software, Inc. by e-mail at the
# following addresses: sales@sippysoft.com.
#
# SIPPY is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
from twisted.internet import reactor
from errno import ECONNRESET, ENOTCONN, ESHUTDOWN, EWOULDBLOCK, ENOBUFS, EAGAIN, \
EINTR
from datetime import datetime
from time import sleep
from threading import Thread, Condition
import socket
import sys, traceback
MAX_WORKERS = 30
class AsyncSender(Thread):
userv = None
def __init__(self, userv):
Thread.__init__(self)
self.userv = userv
self.setDaemon(True)
self.start()
def run(self):
while True:
self.userv.wi_available.acquire()
while len(self.userv.wi) == 0:
self.userv.wi_available.wait()
wi = self.userv.wi.pop(0)
if wi == None:
# Shutdown request, relay it further
self.userv.wi.append(None)
self.userv.wi_available.notify()
self.userv.wi_available.release()
if wi == None:
break
data, address = wi
try:
ai = socket.getaddrinfo(address[0], None, self.userv.family)
except:
continue
if self.userv.family == socket.AF_INET:
address = (ai[0][4][0], address[1])
else:
address = (ai[0][4][0], address[1], ai[0][4][2], ai[0][4][3])
for i in range(0, 20):
try:
if self.userv.skt.sendto(data, address) == len(data):
break
except socket.error as why:
if why[0] not in (EWOULDBLOCK, ENOBUFS, EAGAIN):
break
sleep(0.01)
self.userv = None
class AsyncReceiver(Thread):
userv = None
def __init__(self, userv):
Thread.__init__(self)
self.userv = userv
self.setDaemon(True)
self.start()
def run(self):
while True:
try:
data, address = self.userv.skt.recvfrom(8192)
if not data:
break
except Exception as why:
if isinstance(why, socket.error) and why[0] in (ECONNRESET, ENOTCONN, ESHUTDOWN):
break
if isinstance(why, socket.error) and why[0] in (EINTR,):
continue
else:
print(datetime.now(), 'Udp_server: unhandled exception when receiving incoming data')
print('-' * 70)
traceback.print_exc(file = sys.stdout)
print('-' * 70)
sys.stdout.flush()
sleep(1)
continue
if self.userv.family == socket.AF_INET6:
address = ('[%s]' % address[0], address[1])
reactor.callFromThread(self.userv.handle_read, data, address)
self.userv = None
class Udp_server(object):
skt = None
family = None
data_callback = None
laddress = None
sendqueue = None
stats = None
wi_available = None
wi = None
def __init__(self, address, data_callback, family = None):
self.laddress = address
if family == None:
if address != None and address[0].startswith('['):
family = socket.AF_INET6
address = (address[0][1:-1], address[1])
else:
family = socket.AF_INET
self.family = family
self.skt = socket.socket(family, socket.SOCK_DGRAM)
if address != None:
ai = socket.getaddrinfo(address[0], None, family)
if family == socket.AF_INET:
address = (ai[0][4][0], address[1])
else:
address = (ai[0][4][0], address[1], ai[0][4][2], ai[0][4][3])
self.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if hasattr(socket, 'SO_REUSEPORT'):
self.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
self.skt.bind(address)
self.data_callback = data_callback
self.sendqueue = []
self.stats = [0, 0, 0]
self.wi_available = Condition()
self.wi = []
for i in range(0, MAX_WORKERS):
AsyncSender(self)
AsyncReceiver(self)
def send_to(self, data, address):
if self.family == socket.AF_INET6:
if not address[0].startswith('['):
raise Exception('Invalid IPv6 address: %s' % address[0])
address = (address[0][1:-1], address[1])
self.wi_available.acquire()
self.wi.append((data, address))
self.wi_available.notify()
self.wi_available.release()
def handle_read(self, data, address):
self.stats[2] += 1
try:
self.data_callback(data, address, self)
except:
print(datetime.now(), 'Udp_server: unhandled exception when processing incoming data')
print('-' * 70)
traceback.print_exc(file = sys.stdout)
print('-' * 70)
sys.stdout.flush()
def shutdown(self):
self.skt.shutdown(socket.SHUT_RDWR)
self.wi_available.acquire()
self.wi.append(None)
self.wi_available.notify()
self.wi_available.release()
self.data_callback = None
if __name__ == '__main__':
from sys import exit
npongs = 2
def ping_received(data, address, udp_server):
print('ping_received')
if not (data == 'ping!' and address == ('127.0.0.1', 54321)):
exit(1)
udp_server.send_to('pong!', address)
def pong_received(data, address, udp_server):
print('pong_received')
if not (data == 'pong!' and address == ('127.0.0.1', 12345)):
exit(1)
global npongs
npongs -= 1
if npongs == 0:
reactor.stop()
def ping_received6(data, address, udp_server):
print('ping_received6', address)
if not (data == 'ping!' and address == ('[::1]', 54321)):
exit(1)
udp_server.send_to('pong!', address)
def pong_received6(data, address, udp_server):
print('pong_received6', address)
if not (data == 'pong!' and address == ('[::1]', 12345)):
exit(1)
global npongs
npongs -= 1
if npongs == 0:
reactor.stop()
udp_server_ping = Udp_server(('127.0.0.1', 12345), ping_received)
udp_server_pong = Udp_server(('127.0.0.1', 54321), pong_received)
udp_server_pong.send_to('ping!', ('127.0.0.1', 12345))
udp_server_ping6 = Udp_server(('[::1]', 12345), ping_received6)
udp_server_pong6 = Udp_server(('::1', 54321), pong_received6, socket.AF_INET6)
udp_server_pong6.send_to('ping!', ('[::1]', 12345))
reactor.run()
| |
from xml.etree import ElementTree
from vistas.core.color import RGBColor
from vistas.core.graphics.feature import FeatureFactory
from vistas.core.graphics.terrain import TerrainTileFactory
from vistas.core.legend import StretchedLegend, CategoricalLegend
from vistas.core.plugins.data import DataPlugin
from vistas.core.plugins.option import Option, OptionGroup
from vistas.core.plugins.visualization import VisualizationPlugin3D
from vistas.core.timeline import Timeline
from vistas.ui.utils import *
GREY = RGBColor(0.5, 0.5, 0.5)
class EnvisionVisualization(VisualizationPlugin3D):
id = 'envision_tiles_viz'
name = 'Envision'
description = 'Terrain visualization with features'
author = 'Conservation Biology Institute'
version = '1.0'
visualization_name = 'Envision'
def __init__(self):
super().__init__()
self._scene = None
# Renderable objects for this scene
self.tile_layer = None
self.feature_layer = None
self.feature_data = None
self.delta_data = None
# Flags for rendering
self.needs_mesh = False
self.needs_color = False
self.current_attribute = None
self.legend = None
self.envision_style = None
# Options
self._attributes = Option(self, Option.CHOICE, 'Attributes', 0)
self._zoom = Option(self, Option.SLIDER, 'Zoom Level', 9, 5, 11, 1)
self._transparency = Option(self, Option.SLIDER, 'Transparency', 0.75, 0.0, 1.0, 0.1)
self._height = Option(self, Option.SLIDER, 'Height Multiplier', 1.0, 0.01, 5.0, 0.01)
self._offset = Option(self, Option.FLOAT, 'Height Offset', 5, 0, 10)
self._delta_toggle = Option(self, Option.CHECKBOX, 'Use Deltas', False)
self._options = OptionGroup()
self._options.items = [
self._attributes, self._zoom, self._transparency, self._height, self._offset, self._delta_toggle
]
@property
def use_deltas(self):
return self._delta_toggle.value
def get_options(self):
return self._options
def update_option(self, option=None):
if option.plugin is not self:
return
# Update zoom layer for map
if option.name == self._zoom.name:
zoom = int(self._zoom.value)
if self.tile_layer and zoom != self.tile_layer.zoom:
self.tile_layer.zoom = zoom
if self.feature_layer and zoom != self.feature_layer.zoom:
self.feature_layer.zoom = zoom
elif option.name == self._transparency.name:
if self.feature_layer:
self.feature_layer.shader.alpha = self._transparency.value
elif option.name == self._attributes.name:
if self._attributes.selected != self.current_attribute:
self.current_attribute = self._attributes.selected
self.needs_color = True
elif option.name == self._height.name:
multiplier = self._height.value
if self.tile_layer:
self.tile_layer.shader.height_factor = multiplier
if self.feature_layer:
self.feature_layer.shader.height_factor = multiplier
elif option.name == self._offset.name:
offset = self._offset.value
if self.feature_layer:
self.feature_layer.shader.height_offset = offset
elif option.name == self._delta_toggle.name:
if self.delta_data and self.delta_data.time_info.is_temporal and \
self.is_delta_attribute(self.current_attribute):
self.needs_color = True
self.refresh()
def update_colors(self, build=True):
if self.feature_layer is None:
return
# Here we determine what type and how we are going to render the viz. Then we're going to send a render request
sample_feature = next(self.feature_data.get_features())
props = sample_feature.get('properties')
if self.envision_style is not None:
value = props.get(self.envision_style[self.current_attribute].get('column'))
if value is not None:
self.legend = CategoricalLegend(self.envision_style[self.current_attribute].get('categories'))
# Decide which color function to use
if self.delta_data and self.is_delta_attribute(self.current_attribute) and self.use_deltas:
self.feature_layer.set_color_function(self.color_deltas)
else:
self.feature_layer.set_color_function(self.color_shapes)
else: # Nothing to be done, color it grey
self.legend = None
self.feature_layer.set_color_function(None)
else: # Envision styling is not active
stats = self.feature_data.variable_stats(self.current_attribute)
value = props.get(self.current_attribute)
self.feature_layer.set_color_function(self.color_shapes)
if isinstance(value, (int, float)):
min_value = stats.min_value
max_value = stats.max_value
min_color = RGBColor(0, 0, 1)
max_color = RGBColor(1, 0, 0)
self.legend = StretchedLegend(min_value, max_value, min_color, max_color)
elif isinstance(value, str):
categories = [(RGBColor.random(), label) for label in stats.misc['unique_values']]
self.legend = CategoricalLegend(categories)
else:
self.legend = None
post_new_legend()
if self.needs_color:
self.feature_layer.needs_color = True
if build:
self.feature_layer.build()
def is_delta_attribute(self, variable):
if self.envision_style is not None:
return self.envision_style[variable].get('column') in self.delta_data.variables
return False
def timeline_changed(self):
if self.feature_data and self.delta_data:
self.needs_color = self.use_deltas and \
self.delta_data.time_info.is_temporal and \
self.is_delta_attribute(self.current_attribute)
self.refresh()
def get_legend(self, width, height):
if self.legend is not None:
return self.legend.render(width, height)
else:
return None
def has_legend(self):
return self.legend is not None
@property
def can_visualize(self):
return self.feature_data is not None
@property
def data_roles(self):
return [
(DataPlugin.FEATURE, 'Shapefile'),
(DataPlugin.ARRAY, 'Delta Array')
]
def parse_envision_style(self):
document = ElementTree.parse(self.feature_data.path.replace('.shp', '.xml'))
root = document.getroot()
# Parse the envision xml style one time into a dictionary
data = {}
for submenu in root:
for field in submenu:
field_data = dict(field.items())
col = field_data.get('col')
label = field_data.get('label')
field_data = {'column': col, 'label': label}
for piece in field:
if piece.tag != 'attributes':
continue
attr_data = [dict(attr.items()) for attr in piece]
field_data.update({'legend': attr_data})
data[label] = field_data
self.envision_style = data
# Make colors by category
empties = []
for column in self.envision_style:
legend = self.envision_style[column].get('legend')
if not legend: # If legend doesn't exist or the length is 0, discard from the style
empties.append(column)
continue
categories = []
for data in legend:
color = RGBColor(*[int(x) / 255 for x in data.get('color')[1:-1].split(',')])
label = data.get('label')
categories.append((color, label))
self.envision_style[column]['categories'] = categories
if 'minVal' in legend[0]:
minmax = []
for data in legend:
minmax.append((float(data.get('minVal')), float(data.get('maxVal'))))
self.envision_style[column]['minmax'] = minmax
for column in empties:
self.envision_style.pop(column)
# Remove variables from style that are not in the shapefile
variables = self.feature_data.variables
keys = list(self.envision_style.keys())
for key in keys:
column = self.envision_style[key].get('column')
if column not in variables:
self.envision_style.pop(key)
def set_data(self, data: DataPlugin, role):
if role == 0:
self.feature_data = data
self.needs_mesh = True
self.needs_color = True
if self.feature_data is not None:
try:
self.parse_envision_style()
self._attributes.labels = list(self.envision_style.keys())
self.current_attribute = self._attributes.labels[0]
except (ElementTree.ParseError, ValueError, FileNotFoundError):
post_message('XML parsing failed, defaulting to feature schema.', 1)
# Use shapefile colors instead
self.envision_style = None
self._attributes.labels = self.feature_data.variables
self.current_attribute = self._attributes.labels[0]
else:
self._attributes.labels = []
self.current_attribute = None
self.envision_style = None
elif role == 1:
if not isinstance(data, (type(None), VisualizationPlugin3D.by_name('envision_delta_reader'))):
raise ValueError('Delta Array role must use the Envision Delta Array Data Plugin.')
self.delta_data = data
def get_data(self, role):
if role == 0:
return self.feature_data
elif role == 1:
return self.delta_data
return None
@property
def scene(self):
return self._scene
@scene.setter
def scene(self, scene):
if self._scene is not None:
if self.tile_layer:
self._scene.remove_object(self.tile_layer)
self._scene = scene
if self.tile_layer and self._scene is not None:
self._scene.add_object(self.tile_layer)
def refresh(self):
if self.needs_mesh:
self.create_terrain_mesh()
self.needs_mesh = False
if self.needs_color:
self.update_colors()
self.needs_color = False
post_redisplay()
def create_terrain_mesh(self):
if self.feature_data is not None:
zoom = int(self._zoom.value)
self.tile_layer = TerrainTileFactory(self.feature_data.extent, initial_zoom=zoom, plugin=self)
self.scene.add_object(self.tile_layer)
self.feature_layer = FeatureFactory(
self.feature_data.extent, self.feature_data, initial_zoom=zoom, plugin=self
)
self.update_colors(build=False)
self.scene.add_object(self.feature_layer)
self.needs_color = False
else:
self.scene.remove_all_objects()
if self.tile_layer is not None:
self.tile_layer.dispose()
self.tile_layer = None
if self.feature_layer is not None:
self.feature_layer.dispose()
self.feature_layer = None
def color_shapes(self, feature, data):
"""
Color features based either on Envision XML style or on a generic color scheme derived from the feature schema.
"""
if self.current_attribute is None or self.legend is None:
return GREY
if self.envision_style is not None:
envision_attribute = self.envision_style[self.current_attribute]
shp_attribute = envision_attribute.get('column')
legend = envision_attribute.get('legend')
value = feature.get('properties').get(shp_attribute)
minmax = envision_attribute.get('minmax', None)
string_value = ''
if minmax is not None:
for i, pair in enumerate(minmax):
if pair[0] <= value <= pair[1]:
string_value = legend[i].get('label')
break
else:
for entry in legend:
try:
val = float(entry.get('value'))
except ValueError:
continue
if val == value:
string_value = entry.get('label')
break
color = self.legend.get_color(string_value)
if color is None:
color = GREY
return color
# Fallback to coloring based on shapefile schema
else:
value = feature.get('properties').get(self.current_attribute)
return self.legend.get_color(value)
def color_deltas(self, feature, data):
"""
Color features based on presence in the Envision Delta Array. Features absent from delta array are colored grey.
"""
if self.legend is None:
return GREY
envision_attribute = self.envision_style[self.current_attribute]
shp_attribute = envision_attribute.get('column')
legend = envision_attribute.get('legend')
value = feature.get('properties').get(shp_attribute)
idu = int(feature.get('id'))
minmax = envision_attribute.get('minmax', None)
string_value = ''
try:
if 'delta_array' not in data:
data['delta_array'] = self.delta_data.get_data(shp_attribute, Timeline.app().current)
data['index'] = 0
if data.get('delta_array') is None:
self.feature_layer.set_color_function(self.color_shapes)
post_message('Could not retrieve deltas, defaulting to base value.', 1)
return self.color_shapes(feature, None)
darray = data.get('delta_array')
index = data.get('index')
if darray[index].idu == idu:
value += darray[index].new_value
data['index'] += 1
else:
if darray[index].idu < idu:
while darray[index].idu <= idu:
index += 1
data['index'] = index
return GREY
# Handle if we reached the end of the delta array
except IndexError:
return GREY
if minmax is not None:
for i, pair in enumerate(minmax):
if pair[0] <= value <= pair[1]:
string_value = legend[i].get('label')
break
color = self.legend.get_color(string_value)
if color is None:
color = GREY
return color
| |
from six import BytesIO
from vcr.filters import (
remove_headers, replace_headers,
remove_query_parameters, replace_query_parameters,
remove_post_data_parameters, replace_post_data_parameters,
decode_response
)
from vcr.compat import mock
from vcr.request import Request
import gzip
import json
import zlib
def test_replace_headers():
# This tests all of:
# 1. keeping a header
# 2. removing a header
# 3. replacing a header
# 4. replacing a header using a callable
# 5. removing a header using a callable
# 6. replacing a header that doesn't exist
headers = {
'one': ['keep'],
'two': ['lose'],
'three': ['change'],
'four': ['shout'],
'five': ['whisper'],
}
request = Request('GET', 'http://google.com', '', headers)
replace_headers(request, [
('two', None),
('three', 'tada'),
('four', lambda key, value, request: value.upper()),
('five', lambda key, value, request: None),
('six', 'doesntexist'),
])
assert request.headers == {
'one': 'keep',
'three': 'tada',
'four': 'SHOUT',
}
def test_replace_headers_empty():
headers = {'hello': 'goodbye', 'secret': 'header'}
request = Request('GET', 'http://google.com', '', headers)
replace_headers(request, [])
assert request.headers == headers
def test_replace_headers_callable():
# This goes beyond test_replace_headers() to ensure that the callable
# receives the expected arguments.
headers = {'hey': 'there'}
request = Request('GET', 'http://google.com', '', headers)
callme = mock.Mock(return_value='ho')
replace_headers(request, [('hey', callme)])
assert request.headers == {'hey': 'ho'}
assert callme.call_args == ((), {'request': request,
'key': 'hey',
'value': 'there'})
def test_remove_headers():
# Test the backward-compatible API wrapper.
headers = {'hello': ['goodbye'], 'secret': ['header']}
request = Request('GET', 'http://google.com', '', headers)
remove_headers(request, ['secret'])
assert request.headers == {'hello': 'goodbye'}
def test_replace_query_parameters():
# This tests all of:
# 1. keeping a parameter
# 2. removing a parameter
# 3. replacing a parameter
# 4. replacing a parameter using a callable
# 5. removing a parameter using a callable
# 6. replacing a parameter that doesn't exist
uri = 'http://g.com/?one=keep&two=lose&three=change&four=shout&five=whisper'
request = Request('GET', uri, '', {})
replace_query_parameters(request, [
('two', None),
('three', 'tada'),
('four', lambda key, value, request: value.upper()),
('five', lambda key, value, request: None),
('six', 'doesntexist'),
])
assert request.query == [
('four', 'SHOUT'),
('one', 'keep'),
('three', 'tada'),
]
def test_remove_all_query_parameters():
uri = 'http://g.com/?q=cowboys&w=1'
request = Request('GET', uri, '', {})
replace_query_parameters(request, [('w', None), ('q', None)])
assert request.uri == 'http://g.com/'
def test_replace_query_parameters_callable():
# This goes beyond test_replace_query_parameters() to ensure that the
# callable receives the expected arguments.
uri = 'http://g.com/?hey=there'
request = Request('GET', uri, '', {})
callme = mock.Mock(return_value='ho')
replace_query_parameters(request, [('hey', callme)])
assert request.uri == 'http://g.com/?hey=ho'
assert callme.call_args == ((), {'request': request,
'key': 'hey',
'value': 'there'})
def test_remove_query_parameters():
# Test the backward-compatible API wrapper.
uri = 'http://g.com/?q=cowboys&w=1'
request = Request('GET', uri, '', {})
remove_query_parameters(request, ['w'])
assert request.uri == 'http://g.com/?q=cowboys'
def test_replace_post_data_parameters():
# This tests all of:
# 1. keeping a parameter
# 2. removing a parameter
# 3. replacing a parameter
# 4. replacing a parameter using a callable
# 5. removing a parameter using a callable
# 6. replacing a parameter that doesn't exist
body = b'one=keep&two=lose&three=change&four=shout&five=whisper'
request = Request('POST', 'http://google.com', body, {})
replace_post_data_parameters(request, [
('two', None),
('three', 'tada'),
('four', lambda key, value, request: value.upper()),
('five', lambda key, value, request: None),
('six', 'doesntexist'),
])
assert request.body == b'one=keep&three=tada&four=SHOUT'
def test_remove_post_data_parameters():
# Test the backward-compatible API wrapper.
body = b'id=secret&foo=bar'
request = Request('POST', 'http://google.com', body, {})
remove_post_data_parameters(request, ['id'])
assert request.body == b'foo=bar'
def test_preserve_multiple_post_data_parameters():
body = b'id=secret&foo=bar&foo=baz'
request = Request('POST', 'http://google.com', body, {})
replace_post_data_parameters(request, [('id', None)])
assert request.body == b'foo=bar&foo=baz'
def test_remove_all_post_data_parameters():
body = b'id=secret&foo=bar'
request = Request('POST', 'http://google.com', body, {})
replace_post_data_parameters(request, [('id', None), ('foo', None)])
assert request.body == b''
def test_replace_json_post_data_parameters():
# This tests all of:
# 1. keeping a parameter
# 2. removing a parameter
# 3. replacing a parameter
# 4. replacing a parameter using a callable
# 5. removing a parameter using a callable
# 6. replacing a parameter that doesn't exist
body = b'{"one": "keep", "two": "lose", "three": "change", "four": "shout", "five": "whisper"}'
request = Request('POST', 'http://google.com', body, {})
request.headers['Content-Type'] = 'application/json'
replace_post_data_parameters(request, [
('two', None),
('three', 'tada'),
('four', lambda key, value, request: value.upper()),
('five', lambda key, value, request: None),
('six', 'doesntexist'),
])
request_data = json.loads(request.body.decode('utf-8'))
expected_data = json.loads('{"one": "keep", "three": "tada", "four": "SHOUT"}')
assert request_data == expected_data
def test_remove_json_post_data_parameters():
# Test the backward-compatible API wrapper.
body = b'{"id": "secret", "foo": "bar", "baz": "qux"}'
request = Request('POST', 'http://google.com', body, {})
request.headers['Content-Type'] = 'application/json'
remove_post_data_parameters(request, ['id'])
request_body_json = json.loads(request.body.decode('utf-8'))
expected_json = json.loads(b'{"foo": "bar", "baz": "qux"}'.decode('utf-8'))
assert request_body_json == expected_json
def test_remove_all_json_post_data_parameters():
body = b'{"id": "secret", "foo": "bar"}'
request = Request('POST', 'http://google.com', body, {})
request.headers['Content-Type'] = 'application/json'
replace_post_data_parameters(request, [('id', None), ('foo', None)])
assert request.body == b'{}'
def test_decode_response_uncompressed():
recorded_response = {
"status": {
"message": "OK",
"code": 200
},
"headers": {
"content-length": ["10806"],
"date": ["Fri, 24 Oct 2014 18:35:37 GMT"],
"content-type": ["text/html; charset=utf-8"],
},
"body": {
"string": b""
}
}
assert decode_response(recorded_response) == recorded_response
def test_decode_response_deflate():
body = b'deflate message'
deflate_response = {
'body': {'string': zlib.compress(body)},
'headers': {
'access-control-allow-credentials': ['true'],
'access-control-allow-origin': ['*'],
'connection': ['keep-alive'],
'content-encoding': ['deflate'],
'content-length': ['177'],
'content-type': ['application/json'],
'date': ['Wed, 02 Dec 2015 19:44:32 GMT'],
'server': ['nginx']
},
'status': {'code': 200, 'message': 'OK'}
}
decoded_response = decode_response(deflate_response)
assert decoded_response['body']['string'] == body
assert decoded_response['headers']['content-length'] == [str(len(body))]
def test_decode_response_gzip():
body = b'gzip message'
buf = BytesIO()
f = gzip.GzipFile('a', fileobj=buf, mode='wb')
f.write(body)
f.close()
compressed_body = buf.getvalue()
buf.close()
gzip_response = {
'body': {'string': compressed_body},
'headers': {
'access-control-allow-credentials': ['true'],
'access-control-allow-origin': ['*'],
'connection': ['keep-alive'],
'content-encoding': ['gzip'],
'content-length': ['177'],
'content-type': ['application/json'],
'date': ['Wed, 02 Dec 2015 19:44:32 GMT'],
'server': ['nginx']
},
'status': {'code': 200, 'message': 'OK'}
}
decoded_response = decode_response(gzip_response)
assert decoded_response['body']['string'] == body
assert decoded_response['headers']['content-length'] == [str(len(body))]
| |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:8791")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:8791")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Killercoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Killercoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| |
import re
from builtins import object, range
from . import exceptions
class Purl(object):
def __init__(self, url, **options):
super(Purl, self).__init__()
self.param_interpolate = {
'start': ':',
'end': ''
}
for opt in options:
if opt == 'interpolate' and len(options[opt]):
self.setup_interpolate_settings(options[opt])
split_url = url.split('?')
baseurl_split = split_url[0].split('://')
# protocol and host are required
if len(baseurl_split) != 2:
raise exceptions.InvalidUrlError
host_port_split = Purl.__split_hostname_and_port(baseurl_split[1])
self._protocol = baseurl_split[0] + '://'
self._hostname = host_port_split[0]
self._port = None
self._path = None
self._params = {}
self.query = None
self._path_compiled = None
# port + (path)
if len(host_port_split) == 2:
port_path = Purl.__split_once(host_port_split[1], '/')
self._port = ':' + port_path[0]
if len(port_path) == 2:
self._path = port_path[1]
# check port format
if not Purl.__is_valid_port(self._port):
raise exceptions.InvalidUrlError
# hostname + (path)
else:
hostname_path = Purl.__split_once(host_port_split[0], '/')
if len(hostname_path) == 2:
self._hostname = hostname_path[0]
self._path = hostname_path[1]
if len(self._hostname) < 1:
raise purl_exc.InvalidUrlError
# (query)
try:
self.query = Purl.__parse_querystring(split_url[1])
except IndexError:
self.query = {}
@staticmethod
def create_with(options):
url = ''
try:
url = options['protocol'] + options['hostname']
except KeyError:
raise exceptions.InvalidUrlError
if 'port' in options:
if not Purl.__is_valid_port(options['port']):
raise exceptions.InvalidUrlError
url += options['port']
if 'path' in options:
url += options['path']
return Purl(url)
# check port format
@staticmethod
def __is_valid_port(port):
return not not re.match(r':\d+', port)
## url splitting helper
@staticmethod
def __split_once(s, target):
idx = s.find(target)
if idx >= 0:
result = []
result.append(s[:idx])
result.append(s[idx:])
return result
else:
return [s]
if len(result) < 1:
result = None
elif len(result) == 1:
result = result[0]
return result
## generate list containing hostname (and port if available)
@staticmethod
def __split_hostname_and_port(host_port):
host_port_split = None
host_port_match = re.search('[a-zA-Z](:)', host_port)
if host_port_match:
border = host_port_match.start(1)
host_port_split = [
host_port[0:border],
host_port[border + 1:]
]
else:
host_port_split = [host_port]
return host_port_split
def setup_interpolate_settings(self, options):
if isinstance(options, str):
self.param_interpolate['start'] = options
else:
self.param_interpolate['start'] = options[0]
if len(options) == 2:
self.param_interpolate['end'] = options[1]
## update query
def add_query(self, query, value=None):
if value is None:
for k in query:
self.add_query(k, query[k])
else:
self.query[query] = value
return self
## delete keys from query
def delete_query(self, query):
if isinstance(query, list):
for k in query:
Purl.__del_dict(self.query, k)
else:
Purl.__del_dict(self.query, query)
return self
## delete query helper
@staticmethod
def __del_dict(d, k):
try:
del d[k]
except KeyError:
pass
## update path params
def param(self, param, value=None):
if value is None:
for k in param:
self._params[k] = param[k]
else:
self._params[param] = value
self._path_compiled = self.path_with_params()
return self
def path_with_params(self):
split_path = self._path.split('/')
for param in self._params:
value = self._params[param]
param = self.__to_param_key(param)
for i in range(0, len(split_path)):
if (param == re.sub(' ', '', split_path[i])):
split_path[i] = Purl.__encode_string(value)
path = '/'.join(split_path)
return path
def __to_param_key(self, param):
return self.param_interpolate['start'] + str(param) + self.param_interpolate['end']
## generate querystring
def querystring(self):
qs = ''
for k in self.query:
k = Purl.__encode_string(k)
v = Purl.__encode_string(self.query[k])
qs += k + '=' + v + '&'
# remove trailing ampersand
if self.query:
qs = qs[:-1]
return qs
## convert querystring into a dict
@staticmethod
def __parse_querystring(qs):
query = {}
split_qs = qs.split('&')
for qs_pair in split_qs:
qs_pair = qs_pair.split('=')
if len(qs_pair) == 2:
query[qs_pair[0]] = qs_pair[1]
return query
## encode/decode stubs
@staticmethod
def __encode_string(s):
if isinstance(s, bool):
if s == True:
s = 'true'
elif s == False:
s = 'false'
s = str(s)
return s
@staticmethod
def __decode_string(s):
if s == 'true':
s = True
elif s == 'false':
s = False
return s
## generate url string
def __str__(self):
url = self._protocol + self._hostname
qs = self.querystring()
if self._port:
url += self._port
if self._path:
if self._path_compiled:
url += self._path_compiled
else:
url += self._path
if qs:
url += '?' + qs
return url
def __repr__(self):
s = '<Purl: url="' + str(self) + '">'
return s
## attribute getters and chainable setters
def protocol(self, value=None):
if value is None:
return self._protocol
else:
if not re.match('[a-zA-Z]+://', value):
raise exceptions.InvalidUrlError
self._protocol = value
return self
def hostname(self, value=None):
if value is None:
return self._hostname
else:
if len(value) < 1 or re.search(r'/|:', value):
raise exceptions.InvalidUrlError
self._hostname = value
return self
def port(self, value=None):
if value is None:
return self._port
else:
if len(value) and not Purl.__is_valid_port(value):
raise exceptions.InvalidUrlError
elif len(value) == 0:
value = None
self._port = value
return self
def path(self, value=None):
if value is None:
return self._path
else:
if len(value) and value[0] != '/':
raise exceptions.InvalidUrlError
elif len(value) == 0:
value = None
self._path = value
return self
| |
#!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Regular expression based JavaScript parsing classes."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
import copy
import re
from closure_linter import javascripttokens
from closure_linter.common import matcher
from closure_linter.common import tokenizer
# Shorthand
Type = javascripttokens.JavaScriptTokenType
Matcher = matcher.Matcher
class JavaScriptModes(object):
"""Enumeration of the different matcher modes used for JavaScript."""
TEXT_MODE = 'text'
SINGLE_QUOTE_STRING_MODE = 'single_quote_string'
DOUBLE_QUOTE_STRING_MODE = 'double_quote_string'
BLOCK_COMMENT_MODE = 'block_comment'
DOC_COMMENT_MODE = 'doc_comment'
DOC_COMMENT_LEX_SPACES_MODE = 'doc_comment_spaces'
LINE_COMMENT_MODE = 'line_comment'
PARAMETER_MODE = 'parameter'
FUNCTION_MODE = 'function'
class JavaScriptTokenizer(tokenizer.Tokenizer):
"""JavaScript tokenizer.
Convert JavaScript code in to an array of tokens.
"""
# Useful patterns for JavaScript parsing.
IDENTIFIER_CHAR = r'A-Za-z0-9_$'
# Number patterns based on:
# http://www.mozilla.org/js/language/js20-2000-07/formal/lexer-grammar.html
MANTISSA = r"""
(\d+(?!\.)) | # Matches '10'
(\d+\.(?!\d)) | # Matches '10.'
(\d*\.\d+) # Matches '.5' or '10.5'
"""
DECIMAL_LITERAL = r'(%s)([eE][-+]?\d+)?' % MANTISSA
HEX_LITERAL = r'0[xX][0-9a-fA-F]+'
NUMBER = re.compile(r"""
((%s)|(%s))
""" % (HEX_LITERAL, DECIMAL_LITERAL), re.VERBOSE)
# Strings come in three parts - first we match the start of the string, then
# the contents, then the end. The contents consist of any character except a
# backslash or end of string, or a backslash followed by any character, or a
# backslash followed by end of line to support correct parsing of multi-line
# strings.
SINGLE_QUOTE = re.compile(r"'")
SINGLE_QUOTE_TEXT = re.compile(r"([^'\\]|\\(.|$))+")
DOUBLE_QUOTE = re.compile(r'"')
DOUBLE_QUOTE_TEXT = re.compile(r'([^"\\]|\\(.|$))+')
START_SINGLE_LINE_COMMENT = re.compile(r'//')
END_OF_LINE_SINGLE_LINE_COMMENT = re.compile(r'//$')
START_DOC_COMMENT = re.compile(r'/\*\*')
START_BLOCK_COMMENT = re.compile(r'/\*')
END_BLOCK_COMMENT = re.compile(r'\*/')
BLOCK_COMMENT_TEXT = re.compile(r'([^*]|\*(?!/))+')
# Comment text is anything that we are not going to parse into another special
# token like (inline) flags or end comments. Complicated regex to match
# most normal characters, and '*', '{', '}', and '@' when we are sure that
# it is safe. Expression [^*{\s]@ must come first, or the other options will
# match everything before @, and we won't match @'s that aren't part of flags
# like in email addresses in the @author tag.
DOC_COMMENT_TEXT = re.compile(r'([^*{}\s]@|[^*{}@]|\*(?!/))+')
DOC_COMMENT_NO_SPACES_TEXT = re.compile(r'([^*{}\s]@|[^*{}@\s]|\*(?!/))+')
# Match anything that is allowed in a type definition, except for tokens
# needed to parse it (and the lookahead assertion for "*/").
DOC_COMMENT_TYPE_TEXT = re.compile(r'([^*|!?=<>(){}:,\s]|\*(?!/))+')
# Match the prefix ' * ' that starts every line of jsdoc. Want to include
# spaces after the '*', but nothing else that occurs after a '*', and don't
# want to match the '*' in '*/'.
DOC_PREFIX = re.compile(r'\s*\*(\s+|(?!/))')
START_BLOCK = re.compile('{')
END_BLOCK = re.compile('}')
REGEX_CHARACTER_CLASS = r"""
\[ # Opening bracket
([^\]\\]|\\.)* # Anything but a ] or \,
# or a backslash followed by anything
\] # Closing bracket
"""
# We ensure the regex is followed by one of the above tokens to avoid
# incorrectly parsing something like x / y / z as x REGEX(/ y /) z
POST_REGEX_LIST = [
';', ',', r'\.', r'\)', r'\]', '$', r'\/\/', r'\/\*', ':', '}']
REGEX = re.compile(r"""
/ # opening slash
(?!\*) # not the start of a comment
(\\.|[^\[\/\\]|(%s))* # a backslash followed by anything,
# or anything but a / or [ or \,
# or a character class
/ # closing slash
[gimsx]* # optional modifiers
(?=\s*(%s))
""" % (REGEX_CHARACTER_CLASS, '|'.join(POST_REGEX_LIST)),
re.VERBOSE)
ANYTHING = re.compile(r'.*')
PARAMETERS = re.compile(r'[^\)]+')
CLOSING_PAREN_WITH_SPACE = re.compile(r'\)\s*')
FUNCTION_DECLARATION = re.compile(r'\bfunction\b')
OPENING_PAREN = re.compile(r'\(')
CLOSING_PAREN = re.compile(r'\)')
OPENING_BRACKET = re.compile(r'\[')
CLOSING_BRACKET = re.compile(r'\]')
# We omit these JS keywords from the list:
# function - covered by FUNCTION_DECLARATION.
# delete, in, instanceof, new, typeof - included as operators.
# this - included in identifiers.
# null, undefined - not included, should go in some "special constant" list.
KEYWORD_LIST = [
'break',
'case',
'catch',
'continue',
'default',
'do',
'else',
'finally',
'for',
'if',
'return',
'switch',
'throw',
'try',
'var',
'while',
'with',
]
# List of regular expressions to match as operators. Some notes: for our
# purposes, the comma behaves similarly enough to a normal operator that we
# include it here. r'\bin\b' actually matches 'in' surrounded by boundary
# characters - this may not match some very esoteric uses of the in operator.
# Operators that are subsets of larger operators must come later in this list
# for proper matching, e.g., '>>' must come AFTER '>>>'.
OPERATOR_LIST = [
',',
r'\+\+',
'===',
'!==',
'>>>=',
'>>>',
'==',
'>=',
'<=',
'!=',
'<<=',
'>>=',
'<<',
'>>',
'=>',
'>',
'<',
r'\+=',
r'\+',
'--',
r'\^=',
'-=',
'-',
'/=',
'/',
r'\*=',
r'\*',
'%=',
'%',
'&&',
r'\|\|',
'&=',
'&',
r'\|=',
r'\|',
'=',
'!',
':',
r'\?',
r'\^',
r'\bdelete\b',
r'\bin\b',
r'\binstanceof\b',
r'\bnew\b',
r'\btypeof\b',
r'\bvoid\b',
r'\.',
]
OPERATOR = re.compile('|'.join(OPERATOR_LIST))
WHITESPACE = re.compile(r'\s+')
SEMICOLON = re.compile(r';')
# Technically JavaScript identifiers can't contain '.', but we treat a set of
# nested identifiers as a single identifier, except for trailing dots.
NESTED_IDENTIFIER = r'[a-zA-Z_$]([%s]|\.[a-zA-Z_$])*' % IDENTIFIER_CHAR
IDENTIFIER = re.compile(NESTED_IDENTIFIER)
SIMPLE_LVALUE = re.compile(r"""
(?P<identifier>%s) # a valid identifier
(?=\s* # optional whitespace
\= # look ahead to equal sign
(?!=)) # not follwed by equal
""" % NESTED_IDENTIFIER, re.VERBOSE)
# A doc flag is a @ sign followed by non-space characters that appears at the
# beginning of the line, after whitespace, or after a '{'. The look-behind
# check is necessary to not match someone@google.com as a flag.
DOC_FLAG = re.compile(r'(^|(?<=\s))@(?P<name>[a-zA-Z]+)')
# To properly parse parameter names and complex doctypes containing
# whitespace, we need to tokenize whitespace into a token after certain
# doctags. All statetracker.HAS_TYPE that are not listed here must not contain
# any whitespace in their types.
DOC_FLAG_LEX_SPACES = re.compile(
r'(^|(?<=\s))@(?P<name>%s)\b' %
'|'.join([
'const',
'enum',
'extends',
'final',
'implements',
'param',
'private',
'protected',
'public',
'return',
'type',
'typedef'
]))
DOC_INLINE_FLAG = re.compile(r'(?<={)@(?P<name>[a-zA-Z]+)')
DOC_TYPE_BLOCK_START = re.compile(r'[<(]')
DOC_TYPE_BLOCK_END = re.compile(r'[>)]')
DOC_TYPE_MODIFIERS = re.compile(r'[!?|,:=]')
# Star followed by non-slash, i.e a star that does not end a comment.
# This is used for TYPE_GROUP below.
SAFE_STAR = r'(\*(?!/))'
COMMON_DOC_MATCHERS = [
# Find the end of the comment.
Matcher(END_BLOCK_COMMENT, Type.END_DOC_COMMENT,
JavaScriptModes.TEXT_MODE),
# Tokenize documented flags like @private.
Matcher(DOC_INLINE_FLAG, Type.DOC_INLINE_FLAG),
Matcher(DOC_FLAG_LEX_SPACES, Type.DOC_FLAG,
JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE),
# Encountering a doc flag should leave lex spaces mode.
Matcher(DOC_FLAG, Type.DOC_FLAG, JavaScriptModes.DOC_COMMENT_MODE),
# Tokenize braces so we can find types.
Matcher(START_BLOCK, Type.DOC_START_BRACE),
Matcher(END_BLOCK, Type.DOC_END_BRACE),
# And some more to parse types.
Matcher(DOC_TYPE_BLOCK_START, Type.DOC_TYPE_START_BLOCK),
Matcher(DOC_TYPE_BLOCK_END, Type.DOC_TYPE_END_BLOCK),
Matcher(DOC_TYPE_MODIFIERS, Type.DOC_TYPE_MODIFIER),
Matcher(DOC_COMMENT_TYPE_TEXT, Type.COMMENT),
Matcher(DOC_PREFIX, Type.DOC_PREFIX, None, True)]
# When text is not matched, it is given this default type based on mode.
# If unspecified in this map, the default default is Type.NORMAL.
JAVASCRIPT_DEFAULT_TYPES = {
JavaScriptModes.DOC_COMMENT_MODE: Type.COMMENT,
JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE: Type.COMMENT
}
@classmethod
def BuildMatchers(cls):
"""Builds the token matcher group.
The token matcher groups work as follows: it is a list of Matcher objects.
The matchers will be tried in this order, and the first to match will be
returned. Hence the order is important because the matchers that come first
overrule the matchers that come later.
Returns:
The completed token matcher group.
"""
# Match a keyword string followed by a non-identifier character in order to
# not match something like doSomething as do + Something.
keyword = re.compile('(%s)((?=[^%s])|$)' % (
'|'.join(cls.KEYWORD_LIST), cls.IDENTIFIER_CHAR))
return {
# Matchers for basic text mode.
JavaScriptModes.TEXT_MODE: [
# Check a big group - strings, starting comments, and regexes - all
# of which could be intertwined. 'string with /regex/',
# /regex with 'string'/, /* comment with /regex/ and string */ (and
# so on)
Matcher(cls.START_DOC_COMMENT, Type.START_DOC_COMMENT,
JavaScriptModes.DOC_COMMENT_MODE),
Matcher(cls.START_BLOCK_COMMENT, Type.START_BLOCK_COMMENT,
JavaScriptModes.BLOCK_COMMENT_MODE),
Matcher(cls.END_OF_LINE_SINGLE_LINE_COMMENT,
Type.START_SINGLE_LINE_COMMENT),
Matcher(cls.START_SINGLE_LINE_COMMENT,
Type.START_SINGLE_LINE_COMMENT,
JavaScriptModes.LINE_COMMENT_MODE),
Matcher(cls.SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_START,
JavaScriptModes.SINGLE_QUOTE_STRING_MODE),
Matcher(cls.DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_START,
JavaScriptModes.DOUBLE_QUOTE_STRING_MODE),
Matcher(cls.REGEX, Type.REGEX),
# Next we check for start blocks appearing outside any of the items
# above.
Matcher(cls.START_BLOCK, Type.START_BLOCK),
Matcher(cls.END_BLOCK, Type.END_BLOCK),
# Then we search for function declarations.
Matcher(cls.FUNCTION_DECLARATION, Type.FUNCTION_DECLARATION,
JavaScriptModes.FUNCTION_MODE),
# Next, we convert non-function related parens to tokens.
Matcher(cls.OPENING_PAREN, Type.START_PAREN),
Matcher(cls.CLOSING_PAREN, Type.END_PAREN),
# Next, we convert brackets to tokens.
Matcher(cls.OPENING_BRACKET, Type.START_BRACKET),
Matcher(cls.CLOSING_BRACKET, Type.END_BRACKET),
# Find numbers. This has to happen before operators because
# scientific notation numbers can have + and - in them.
Matcher(cls.NUMBER, Type.NUMBER),
# Find operators and simple assignments
Matcher(cls.SIMPLE_LVALUE, Type.SIMPLE_LVALUE),
Matcher(cls.OPERATOR, Type.OPERATOR),
# Find key words and whitespace.
Matcher(keyword, Type.KEYWORD),
Matcher(cls.WHITESPACE, Type.WHITESPACE),
# Find identifiers.
Matcher(cls.IDENTIFIER, Type.IDENTIFIER),
# Finally, we convert semicolons to tokens.
Matcher(cls.SEMICOLON, Type.SEMICOLON)],
# Matchers for single quote strings.
JavaScriptModes.SINGLE_QUOTE_STRING_MODE: [
Matcher(cls.SINGLE_QUOTE_TEXT, Type.STRING_TEXT),
Matcher(cls.SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_END,
JavaScriptModes.TEXT_MODE)],
# Matchers for double quote strings.
JavaScriptModes.DOUBLE_QUOTE_STRING_MODE: [
Matcher(cls.DOUBLE_QUOTE_TEXT, Type.STRING_TEXT),
Matcher(cls.DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_END,
JavaScriptModes.TEXT_MODE)],
# Matchers for block comments.
JavaScriptModes.BLOCK_COMMENT_MODE: [
# First we check for exiting a block comment.
Matcher(cls.END_BLOCK_COMMENT, Type.END_BLOCK_COMMENT,
JavaScriptModes.TEXT_MODE),
# Match non-comment-ending text..
Matcher(cls.BLOCK_COMMENT_TEXT, Type.COMMENT)],
# Matchers for doc comments.
JavaScriptModes.DOC_COMMENT_MODE: cls.COMMON_DOC_MATCHERS + [
Matcher(cls.DOC_COMMENT_TEXT, Type.COMMENT)],
JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE: cls.COMMON_DOC_MATCHERS + [
Matcher(cls.WHITESPACE, Type.COMMENT),
Matcher(cls.DOC_COMMENT_NO_SPACES_TEXT, Type.COMMENT)],
# Matchers for single line comments.
JavaScriptModes.LINE_COMMENT_MODE: [
# We greedy match until the end of the line in line comment mode.
Matcher(cls.ANYTHING, Type.COMMENT, JavaScriptModes.TEXT_MODE)],
# Matchers for code after the function keyword.
JavaScriptModes.FUNCTION_MODE: [
# Must match open paren before anything else and move into parameter
# mode, otherwise everything inside the parameter list is parsed
# incorrectly.
Matcher(cls.OPENING_PAREN, Type.START_PARAMETERS,
JavaScriptModes.PARAMETER_MODE),
Matcher(cls.WHITESPACE, Type.WHITESPACE),
Matcher(cls.IDENTIFIER, Type.FUNCTION_NAME)],
# Matchers for function parameters
JavaScriptModes.PARAMETER_MODE: [
# When in function parameter mode, a closing paren is treated
# specially. Everything else is treated as lines of parameters.
Matcher(cls.CLOSING_PAREN_WITH_SPACE, Type.END_PARAMETERS,
JavaScriptModes.TEXT_MODE),
Matcher(cls.PARAMETERS, Type.PARAMETERS,
JavaScriptModes.PARAMETER_MODE)]}
def __init__(self, parse_js_doc=True):
"""Create a tokenizer object.
Args:
parse_js_doc: Whether to do detailed parsing of javascript doc comments,
or simply treat them as normal comments. Defaults to parsing JsDoc.
"""
matchers = self.BuildMatchers()
if not parse_js_doc:
# Make a copy so the original doesn't get modified.
matchers = copy.deepcopy(matchers)
matchers[JavaScriptModes.DOC_COMMENT_MODE] = matchers[
JavaScriptModes.BLOCK_COMMENT_MODE]
tokenizer.Tokenizer.__init__(self, JavaScriptModes.TEXT_MODE, matchers,
self.JAVASCRIPT_DEFAULT_TYPES)
def _CreateToken(self, string, token_type, line, line_number, values=None):
"""Creates a new JavaScriptToken object.
Args:
string: The string of input the token contains.
token_type: The type of token.
line: The text of the line this token is in.
line_number: The line number of the token.
values: A dict of named values within the token. For instance, a
function declaration may have a value called 'name' which captures the
name of the function.
"""
return javascripttokens.JavaScriptToken(string, token_type, line,
line_number, values, line_number)
| |
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helpers for comparing version strings.
"""
import functools
import inspect
import pkg_resources
import six
from neutron_lbaas.openstack.common._i18n import _
from neutron_lbaas.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class deprecated(object):
"""A decorator to mark callables as deprecated.
This decorator logs a deprecation message when the callable it decorates is
used. The message will include the release where the callable was
deprecated, the release where it may be removed and possibly an optional
replacement.
Examples:
1. Specifying the required deprecated release
>>> @deprecated(as_of=deprecated.ICEHOUSE)
... def a(): pass
2. Specifying a replacement:
>>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()')
... def b(): pass
3. Specifying the release where the functionality may be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=+1)
... def c(): pass
4. Specifying the deprecated functionality will not be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=0)
... def d(): pass
5. Specifying a replacement, deprecated functionality will not be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()', remove_in=0)
... def e(): pass
"""
# NOTE(morganfainberg): Bexar is used for unit test purposes, it is
# expected we maintain a gap between Bexar and Folsom in this list.
BEXAR = 'B'
FOLSOM = 'F'
GRIZZLY = 'G'
HAVANA = 'H'
ICEHOUSE = 'I'
JUNO = 'J'
KILO = 'K'
_RELEASES = {
# NOTE(morganfainberg): Bexar is used for unit test purposes, it is
# expected we maintain a gap between Bexar and Folsom in this list.
'B': 'Bexar',
'F': 'Folsom',
'G': 'Grizzly',
'H': 'Havana',
'I': 'Icehouse',
'J': 'Juno',
'K': 'Kilo',
}
_deprecated_msg_with_alternative = _(
'%(what)s is deprecated as of %(as_of)s in favor of '
'%(in_favor_of)s and may be removed in %(remove_in)s.')
_deprecated_msg_no_alternative = _(
'%(what)s is deprecated as of %(as_of)s and may be '
'removed in %(remove_in)s. It will not be superseded.')
_deprecated_msg_with_alternative_no_removal = _(
'%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s.')
_deprecated_msg_with_no_alternative_no_removal = _(
'%(what)s is deprecated as of %(as_of)s. It will not be superseded.')
def __init__(self, as_of, in_favor_of=None, remove_in=2, what=None):
"""Initialize decorator
:param as_of: the release deprecating the callable. Constants
are define in this class for convenience.
:param in_favor_of: the replacement for the callable (optional)
:param remove_in: an integer specifying how many releases to wait
before removing (default: 2)
:param what: name of the thing being deprecated (default: the
callable's name)
"""
self.as_of = as_of
self.in_favor_of = in_favor_of
self.remove_in = remove_in
self.what = what
def __call__(self, func_or_cls):
if not self.what:
self.what = func_or_cls.__name__ + '()'
msg, details = self._build_message()
if inspect.isfunction(func_or_cls):
@six.wraps(func_or_cls)
def wrapped(*args, **kwargs):
LOG.deprecated(msg, details)
return func_or_cls(*args, **kwargs)
return wrapped
elif inspect.isclass(func_or_cls):
orig_init = func_or_cls.__init__
# TODO(tsufiev): change `functools` module to `six` as
# soon as six 1.7.4 (with fix for passing `assigned`
# argument to underlying `functools.wraps`) is released
# and added to the neutron_lbaas-incubator requrements
@functools.wraps(orig_init, assigned=('__name__', '__doc__'))
def new_init(self, *args, **kwargs):
LOG.deprecated(msg, details)
orig_init(self, *args, **kwargs)
func_or_cls.__init__ = new_init
return func_or_cls
else:
raise TypeError('deprecated can be used only with functions or '
'classes')
def _get_safe_to_remove_release(self, release):
# TODO(dstanek): this method will have to be reimplemented once
# when we get to the X release because once we get to the Y
# release, what is Y+2?
new_release = chr(ord(release) + self.remove_in)
if new_release in self._RELEASES:
return self._RELEASES[new_release]
else:
return new_release
def _build_message(self):
details = dict(what=self.what,
as_of=self._RELEASES[self.as_of],
remove_in=self._get_safe_to_remove_release(self.as_of))
if self.in_favor_of:
details['in_favor_of'] = self.in_favor_of
if self.remove_in > 0:
msg = self._deprecated_msg_with_alternative
else:
# There are no plans to remove this function, but it is
# now deprecated.
msg = self._deprecated_msg_with_alternative_no_removal
else:
if self.remove_in > 0:
msg = self._deprecated_msg_no_alternative
else:
# There are no plans to remove this function, but it is
# now deprecated.
msg = self._deprecated_msg_with_no_alternative_no_removal
return msg, details
def is_compatible(requested_version, current_version, same_major=True):
"""Determine whether `requested_version` is satisfied by
`current_version`; in other words, `current_version` is >=
`requested_version`.
:param requested_version: version to check for compatibility
:param current_version: version to check against
:param same_major: if True, the major version must be identical between
`requested_version` and `current_version`. This is used when a
major-version difference indicates incompatibility between the two
versions. Since this is the common-case in practice, the default is
True.
:returns: True if compatible, False if not
"""
requested_parts = pkg_resources.parse_version(requested_version)
current_parts = pkg_resources.parse_version(current_version)
if same_major and (requested_parts[0] != current_parts[0]):
return False
return current_parts >= requested_parts
| |
# -*- encoding: utf-8 -*-
from google.appengine.ext import ndb
from google.appengine.ext.blobstore import BlobInfo
class DuplicatedContainerName(Exception):
pass
class Container(ndb.Model):
name = ndb.StringProperty(required = True)
help = ndb.StringProperty(required = True, indexed = False)
order = ndb.IntegerProperty(required = True, indexed = False)
container = ndb.KeyProperty(kind = 'Container')
def _pre_put_hook(self):
if not self.key.id():
if Container.query(Container.name == self.name).count() > 0:
raise DuplicatedContainerName('name already exists')
def get_containerkey(self, name):
thecontainer = ndb.gql("SELECT __key__ FROM Container WHERE name = :1", name).get()
return thecontainer
class DuplicatedItemName(Exception):
pass
class Item(ndb.Model):
name = ndb.StringProperty(required = True, indexed = False)
help = ndb.StringProperty(required = True, indexed = False)
route = ndb.StringProperty(required = True)
order = ndb.IntegerProperty(required = True, indexed = False)
container = ndb.KeyProperty(kind = Container)
def _pre_put_hook(self):
if not self.key.id():
if Item.query(Item.route == self.route).count() > 0:
raise DuplicatedItemName('route already exists')
def get_item_route(self, route):
theitem = ndb.gql("SELECT * FROM Item WHERE route = :1", route).get()
return theitem
def get_nav(self):
nav = []
items = ndb.gql("SELECT * FROM Item").fetch()
for i in items:
navitem = []
grade = 0
navitem.append([int(i.key.id()), int(i.container.id()), True, i.name, i.help, i.route, i.order, grade])
# Read the leaf container
grade += 1
leafcontainer = ndb.gql("SELECT * FROM Container WHERE __key__ = :1", i.container).get()
father_id = leafcontainer.container
if father_id:
father_id = int(leafcontainer.container.id())
navitem.append([int(leafcontainer.key.id()), father_id, False, leafcontainer.name, leafcontainer.help, None, leafcontainer.order, grade])
father = leafcontainer.container
while father:
grade += 1
fathercontainer = ndb.gql("SELECT * FROM Container WHERE __key__ = :1", father).get()
father_id = fathercontainer.container
if father_id:
father_id = int(fathercontainer.container.id())
navitem.append([int(fathercontainer.key.id()), father_id, False, fathercontainer.name, fathercontainer.help, None, fathercontainer.order, grade])
father = fathercontainer.container
maxlevel = len(navitem)
level = maxlevel
for j in range(0, maxlevel):
navitem[j].append(level - 1)
level -= 1
# Identify the first container already in nav and store its grade
grade = 0
isinnav = False
for ni in navitem:
if grade > 0:
for n in nav:
if ni[0] == n[0]:
isinnav = True
if isinnav:
break
grade += 1
# Erase containers already in nav according to its grade
while True:
if navitem[-1][7] >= grade:
navitem.pop()
else:
break
while len(navitem):
nav.append(navitem.pop())
return nav
class DuplicatedIngredientName(Exception):
pass
class Ingredient(ndb.Model):
name = ndb.StringProperty(required = True)
def _pre_put_hook(self):
if not self.key.id():
if Ingredient.query(Ingredient.name == self.name).count() > 0:
raise DuplicatedIngredientName('name already exists')
def get_ingredientkey(self, name):
theingredient = ndb.gql("SELECT __key__ FROM Ingredient WHERE name = :1", name).get()
return theingredient
class DuplicatedSpecialityName(Exception):
pass
class Speciality(ndb.Model):
name = ndb.StringProperty(required = True)
def _pre_put_hook(self):
if not self.key.id():
if Speciality.query(Speciality.name == self.name).count() > 0:
raise DuplicatedSpecialityName('name already exists')
def get_specialitykey(self, name):
thespeciality = ndb.gql("SELECT __key__ FROM Speciality WHERE name = :1", name).get()
return thespeciality
def get_specialities(self):
specialities = ndb.gql("SELECT * FROM Speciality").order(Speciality.name).fetch()
s = []
s.append((0, 'Select an speciality'))
for speciality in specialities:
s.append((speciality.key.id(), speciality.name))
return s
class DuplicatedDishName(Exception):
pass
class Dish(ndb.Model):
name = ndb.StringProperty(required = True)
description = ndb.StringProperty(required = True, indexed = False)
servings = ndb.IntegerProperty(required = True, indexed = False)
directions = ndb.StringProperty(required = True, indexed = False)
photo = ndb.BlobKeyProperty()
video = ndb.BlobKeyProperty()
audio = ndb.BlobKeyProperty()
photogallery = ndb.BlobKeyProperty(repeated=True)
speciality = ndb.KeyProperty(kind = Speciality)
def _pre_put_hook(self):
if not self.key.id():
if Dish.query(Dish.name == self.name).count() > 0:
raise DuplicatedDishName('name already exists')
def get_dishkey(self, name):
thedish = ndb.gql("SELECT __key__ FROM Dish WHERE name = :1", name).get()
return thedish
def get_dishes(self, speciality_id):
thespeciality = ndb.Key('Speciality', speciality_id)
dishes = ndb.gql("SELECT * FROM Dish WHERE speciality = :1", thespeciality).order(Dish.name).fetch()
d = []
d.append((0, 'Select a dish'))
for dish in dishes:
d.append((dish.key.id(), dish.name))
return d
def getphoto(self, dish_id):
dish = Dish.get_by_id(int(dish_id))
docinfo = BlobInfo.get(dish.photo)
return docinfo
def getaudio(self, dish_id):
dish = Dish.get_by_id(int(dish_id))
docinfo = BlobInfo.get(dish.audio)
return docinfo
def getvideo(self, dish_id):
dish = Dish.get_by_id(int(dish_id))
docinfo = BlobInfo.get(dish.video)
return docinfo
def getphotogallery(self, dish_id):
dish = Dish.get_by_id(int(dish_id))
docinfo = BlobInfo.get(dish.photogallery)
return docinfo
def update(self, dish_id, description, servings, directions):
dish = Dish.get_by_id(int(dish_id))
actualizar = False
if dish.description != description:
dish.description = description
actualizar = True
if dish.servings != servings:
dish.servings = servings
actualizar = True
if dish.directions != directions:
dish.directions = directions
actualizar = True
if actualizar:
dish.put()
def updatephoto(self, dish_id, photo):
dish = Dish.get_by_id(int(dish_id))
if dish.photo:
blob_info = BlobInfo.get(dish.photo)
blob_info.delete()
dish.photo = photo
dish.put()
def updateaudio(self, dish_id, audio):
dish = Dish.get_by_id(int(dish_id))
if dish.audio:
blob_info = BlobInfo.get(dish.audio)
blob_info.delete()
dish.audio = audio
dish.put()
def updatevideo(self, dish_id, video):
dish = Dish.get_by_id(int(dish_id))
if dish.video:
blob_info = BlobInfo.get(dish.video)
blob_info.delete()
dish.video = video
dish.put()
def updatephotogallery(self, dish_id, photogallery):
dish = Dish.get_by_id(int(dish_id))
for photo in dish.photogallery:
blob_info = BlobInfo.get(photo)
blob_info.delete()
dish.photogallery = []
for photo in photogallery:
dish.photogallery.append(photo)
dish.put()
class DuplicatedUnitName(Exception):
pass
class Unit(ndb.Model):
name = ndb.StringProperty(required = True)
def _pre_put_hook(self):
if not self.key.id():
if Unit.query(Unit.name == self.name).count() > 0:
raise DuplicatedUnitName('name already exists')
def get_unitkey(self, name):
theunit = ndb.gql("SELECT __key__ FROM Unit WHERE name = :1", name).get()
return theunit
class DuplicatedIngredientdish(Exception):
pass
class Ingredientdish(ndb.Model):
quantity = ndb.FloatProperty(required = True, indexed = False)
unit = ndb.KeyProperty(kind = Unit)
dish = ndb.KeyProperty(kind = Dish)
ingredient = ndb.KeyProperty(kind = Ingredient)
order = ndb.IntegerProperty(required = True)
def _pre_put_hook(self):
if not self.key.id():
if Unit.query(Ingredientdish.unit == self.unit and Ingredientdish.dish == self.dish and Ingredientdish.ingredient == self.ingredient).count() > 0:
raise DuplicatedIngredientdish('Ingredient Dish already exists')
def get_ingredientsofadish(self, dish_id):
thedish = ndb.Key('Dish', dish_id)
ing = ndb.gql("SELECT * FROM Ingredientdish WHERE dish = :1", thedish).order(Ingredientdish.order).fetch()
ingredients = []
for i in ing:
ingredient = i.ingredient.get()
unit = i.unit.get()
ingredients.append([ingredient.name, i.quantity, unit.name])
return ingredients
| |
#!/usr/bin/env python
"""This file contains common grr jobs."""
import gc
import logging
import pdb
import threading
import time
import traceback
import psutil
from grr.client import client_utils
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import utils
from grr.lib.rdfvalues import flows as rdf_flows
# Our first response in the session is this:
INITIAL_RESPONSE_ID = 1
class Error(Exception):
pass
class CPUExceededError(Error):
pass
class NetworkBytesExceededError(Error):
"""Exceeded the maximum number of bytes allowed to be sent for this action."""
class ThreadNotFoundError(Error):
"""A suspended thread was requested that doesn't exist on the client."""
class ActionPlugin(object):
"""Baseclass for plugins.
An action is a plugin abstraction which receives an rdfvalue and
sends another rdfvalue in response.
The code is specified in the Run() method, while the data is
specified in the in_rdfvalue and out_rdfvalues classes.
Windows and OS X client actions cannot be imported on the linux server since
they require OS-specific libraries. If you are adding a client action that
doesn't have a linux implementation, you will need to register it in
libs/server_stubs.py
Windows and OS X implementations of client actions with the same name (e.g.
EnumerateInterfaces) as linux actions must accept and return the same rdfvalue
types as their linux counterparts.
"""
# The rdfvalue used to encode this message.
in_rdfvalue = None
# TODO(user): The RDFValue instance for the output protobufs. This is
# required temporarily until the client sends RDFValue instances instead of
# protobufs.
out_rdfvalues = [None]
# Authentication Required for this Action:
_authentication_required = True
__metaclass__ = registry.MetaclassRegistry
__abstract = True # pylint: disable=invalid-name
priority = rdf_flows.GrrMessage.Priority.MEDIUM_PRIORITY
require_fastpoll = True
last_progress_time = 0
def __init__(self, grr_worker=None):
"""Initializes the action plugin.
Args:
grr_worker: The grr client worker object which may be used to
e.g. send new actions on.
"""
self.grr_worker = grr_worker
self.response_id = INITIAL_RESPONSE_ID
self.cpu_used = None
self.nanny_controller = None
self.status = rdf_flows.GrrStatus(
status=rdf_flows.GrrStatus.ReturnedStatus.OK)
self._last_gc_run = rdfvalue.RDFDatetime.Now()
self._gc_frequency = config_lib.CONFIG["Client.gc_frequency"]
self.proc = psutil.Process()
self.cpu_start = self.proc.cpu_times()
self.cpu_limit = rdf_flows.GrrMessage().cpu_limit
def Execute(self, message):
"""This function parses the RDFValue from the server.
The Run method will be called with the specified RDFValue.
Args:
message: The GrrMessage that we are called to process.
Returns:
Upon return a callback will be called on the server to register
the end of the function and pass back exceptions.
Raises:
RuntimeError: The arguments from the server do not match the expected
rdf type.
"""
self.message = message
if message:
self.priority = message.priority
self.require_fastpoll = message.require_fastpoll
args = None
try:
if self.message.args_rdf_name:
if not self.in_rdfvalue:
raise RuntimeError("Did not expect arguments, got %s." %
self.message.args_rdf_name)
if self.in_rdfvalue.__name__ != self.message.args_rdf_name:
raise RuntimeError("Unexpected arg type %s != %s." % (
self.message.args_rdf_name, self.in_rdfvalue.__name__))
args = self.message.payload
# Only allow authenticated messages in the client
if self._authentication_required and (
self.message.auth_state !=
rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED):
raise RuntimeError("Message for %s was not Authenticated." %
self.message.name)
self.cpu_start = self.proc.cpu_times()
self.cpu_limit = self.message.cpu_limit
if getattr(flags.FLAGS, "debug_client_actions", False):
pdb.set_trace()
try:
self.Run(args)
# Ensure we always add CPU usage even if an exception occurred.
finally:
used = self.proc.cpu_times()
self.cpu_used = (used.user - self.cpu_start.user,
used.system - self.cpu_start.system)
except NetworkBytesExceededError as e:
self.SetStatus(rdf_flows.GrrStatus.ReturnedStatus.NETWORK_LIMIT_EXCEEDED,
"%r: %s" % (e, e), traceback.format_exc())
# We want to report back all errors and map Python exceptions to
# Grr Errors.
except Exception as e: # pylint: disable=broad-except
self.SetStatus(rdf_flows.GrrStatus.ReturnedStatus.GENERIC_ERROR,
"%r: %s" % (e, e), traceback.format_exc())
if flags.FLAGS.debug:
self.DisableNanny()
pdb.post_mortem()
if self.status.status != rdf_flows.GrrStatus.ReturnedStatus.OK:
logging.info("Job Error (%s): %s", self.__class__.__name__,
self.status.error_message)
if self.status.backtrace:
logging.debug(self.status.backtrace)
if self.cpu_used:
self.status.cpu_time_used.user_cpu_time = self.cpu_used[0]
self.status.cpu_time_used.system_cpu_time = self.cpu_used[1]
# This returns the error status of the Actions to the flow.
self.SendReply(self.status, message_type=rdf_flows.GrrMessage.Type.STATUS)
self._RunGC()
def _RunGC(self):
# After each action we can run the garbage collection to reduce our memory
# footprint a bit. We don't do it too frequently though since this is
# a bit expensive.
now = rdfvalue.RDFDatetime.Now()
if now - self._last_gc_run > self._gc_frequency:
gc.collect()
self._last_gc_run = now
def ForceGC(self):
self._last_gc_run = rdfvalue.RDFDatetime(0)
self._RunGC()
def Run(self, unused_args):
"""Main plugin entry point.
This function will always be overridden by real plugins.
Args:
unused_args: An already initialised protobuf object.
Raises:
KeyError: if not implemented.
"""
raise KeyError("Action %s not available on this platform." %
self.message.name)
def SetStatus(self, status, message="", backtrace=None):
"""Set a status to report back to the server."""
self.status.status = status
self.status.error_message = utils.SmartUnicode(message)
if backtrace:
self.status.backtrace = utils.SmartUnicode(backtrace)
def SendReply(self,
rdf_value=None,
message_type=rdf_flows.GrrMessage.Type.MESSAGE,
**kw):
"""Send response back to the server."""
if rdf_value is None:
# The only client actions with multiple out_rdfvalues have them for
# server-side checks that allow for backwards compatibility. In the future
# if an action genuinely returns multiple rdfvalues it should pass them in
# using the rdf_value keyword.
rdf_value = self.out_rdfvalues[0](**kw) # pylint: disable=not-callable
self.grr_worker.SendReply(
rdf_value,
# This is not strictly necessary but adds context
# to this response.
name=self.__class__.__name__,
session_id=self.message.session_id,
response_id=self.response_id,
request_id=self.message.request_id,
message_type=message_type,
task_id=self.message.task_id,
priority=self.priority,
require_fastpoll=self.require_fastpoll)
self.response_id += 1
def Progress(self):
"""Indicate progress of the client action.
This function should be called periodically during client actions that do
not finish instantly. It will notify the nanny that the action is not stuck
and avoid the timeout and it will also check if the action has reached its
cpu limit.
Raises:
CPUExceededError: CPU limit exceeded.
"""
now = time.time()
if now - self.last_progress_time <= 2:
return
self.last_progress_time = now
# Prevent the machine from sleeping while the action is running.
client_utils.KeepAlive()
if self.nanny_controller is None:
self.nanny_controller = client_utils.NannyController()
self.nanny_controller.Heartbeat()
user_start = self.cpu_start.user
system_start = self.cpu_start.system
cpu_times = self.proc.cpu_times()
user_end = cpu_times.user
system_end = cpu_times.system
used_cpu = user_end - user_start + system_end - system_start
if used_cpu > self.cpu_limit:
self.grr_worker.SendClientAlert("Cpu limit exceeded.")
raise CPUExceededError("Action exceeded cpu limit.")
def SyncTransactionLog(self):
"""This flushes the transaction log.
This function should be called by the client before performing
potential dangerous actions so the server can get notified in case
the whole machine crashes.
"""
if self.nanny_controller is None:
self.nanny_controller = client_utils.NannyController()
self.nanny_controller.SyncTransactionLog()
def ChargeBytesToSession(self, length):
self.grr_worker.ChargeBytesToSession(
self.message.session_id, length, limit=self.network_bytes_limit)
def DisableNanny(self):
try:
self.nanny_controller.nanny.Stop()
except AttributeError:
logging.info("Can't disable Nanny on this OS.")
@property
def session_id(self):
try:
return self.message.session_id
except AttributeError:
return None
@property
def network_bytes_limit(self):
try:
return self.message.network_bytes_limit
except AttributeError:
return None
class IteratedAction(ActionPlugin):
"""An action which can restore its state from an iterator.
Implement iterating actions by extending this class and overriding the
Iterate() method.
"""
__abstract = True # pylint: disable=invalid-name
def Run(self, request):
"""Munge the iterator to the server and abstract it away."""
# Pass the client_state as a dict to the action. This is often more
# efficient than manipulating a protobuf.
client_state = request.iterator.client_state.ToDict()
# Derived classes should implement this.
self.Iterate(request, client_state)
# Update the iterator client_state from the dict.
request.iterator.client_state = client_state
# Return the iterator
self.SendReply(
request.iterator, message_type=rdf_flows.GrrMessage.Type.ITERATOR)
def Iterate(self, request, client_state):
"""Actions should override this."""
class ClientActionWorker(threading.Thread):
"""A worker thread for the suspendable client action."""
daemon = True
def __init__(self, action=None, *args, **kw):
super(ClientActionWorker, self).__init__(*args, **kw)
self.cond = threading.Condition(lock=threading.RLock())
self.id = None
self.action_obj = action
self.exception_status = None
def Resume(self):
with self.cond:
self.cond.notify()
self.cond.wait()
def Suspend(self):
with self.cond:
self.cond.notify()
self.cond.wait()
def run(self):
# Suspend right after starting.
self.Suspend()
try:
# Do the actual work.
self.action_obj.Iterate()
except Exception: # pylint: disable=broad-except
if flags.FLAGS.debug:
pdb.post_mortem()
# Record the exception status so the main thread can propagate it to the
# server.
self.exception_status = traceback.format_exc()
raise
finally:
# Notify the action that we are about to exit. This always has to happen
# or the main thread will stop.
self.action_obj.Done()
with self.cond:
self.cond.notify()
class SuspendableAction(ActionPlugin):
"""An action that can be suspended on the client.
How suspended client actions work?
The GRRClientWorker maintains a store of suspended client actions. A suspended
client action is one where the thread of execution can be suspended by the
client at any time, and control is passed back to the server flow. The server
flow then can resume the client action.
Since only a single thread can run on the client worker at the same time, the
suspendable client action and the worker thread are exclusively blocked.
1) Initially the server issues a regular request to this suspendable action.
2) Since the Iterator field in the request is initially empty, the
GRRClientWorker() will instantiate a new ActionPlugin() instance.
3) The SuspendableAction() instance is then added to the GRRClientWorker's
suspended_actions store using a unique ID. The unique ID is also copied to
the request's Iterator.client_state dict.
4) We then call the client action's Execute method.
5) The suspendable client action does all its work in a secondary thread, in
order to allow it to be suspended arbitrarily. We therefore create a
ClientActionWorker() thread, and pass control it - while the main thread is
waiting for it.
6) The ClientActionWorker() thread calls back into the Iterate() method of the
SuspendableAction() - this is where all the work is done.
7) When the client action wants to suspend it called its Suspend()
method. This will block the ClientActionWorker() thread and release the
main GRRClientWorker() thread. The request is then completed by sending the
server an ITERATOR and a STATUS message. The corresponding thread is now
allowed to process all replies so far. The SuspendableAction() is blocked
until further notice.
8) The server processes the responses, and then sends a new request,
containing the same Iterator object that the client gave it. The Iterator
contains an opaque client_state dict.
9) The client finds the unique key in the Iterator.client_state dict, which
allows it to retrieve the SuspendableAction() from the GRRClientWorker()'s
suspended_actions store. We then call the Run method, which switched
execution to the ClientActionWorker() thread.
"""
__abstract = True # pylint: disable=invalid-name
def __init__(self, *args, **kw):
# We allow specifying the worker class.
self.worker_cls = kw.pop("action_worker_cls", None)
super(SuspendableAction, self).__init__(*args, **kw)
self.exceptions = []
# A SuspendableAction does all its main work in a subthread controlled
# through a condition variable.
self.worker = None
def Run(self, request):
"""Process a server request."""
# This method will be called multiple times for each new client request,
# therefore we need to resent the response_id each time.
self.response_id = INITIAL_RESPONSE_ID
self.request = request
# We need to start a new worker thread.
if not self.worker:
worker_cls = self.worker_cls or ClientActionWorker
self.worker = worker_cls(action=self)
# Grab the lock before the thread is started.
self.worker.cond.acquire()
self.worker.start()
# The worker will be blocked trying to get the lock that we are already
# holding it so we call Resume() and enter a state where the main thread
# waits for the condition variable and, at the same time, releases the
# lock. Next the worker will notify the condition variable and suspend
# itself. This guarantees that we are now in a defined state where the
# worker is suspended and waiting on the condition variable and the main
# thread is running. After the next call to Resume() below, the worker
# will wake up and actually begin running the client action.
self.worker.Resume()
# Store ourselves in the worker thread's suspended_actions store.
self.grr_worker.suspended_actions[id(self)] = self
# Mark our ID in the iterator's client_state area. The server will return
# this (opaque) client_state to us on subsequent calls. The client worker
# thread will be able to retrieve us in this case.
self.request.iterator.suspended_action = id(self)
# We stop running, and let the worker run instead.
self.worker.Resume()
# An exception occured in the worker thread and it was terminated. We
# re-raise it here.
if self.worker.exception_status:
raise RuntimeError("Exception in child thread: %s" %
(self.worker.exception_status))
# Return the iterator
self.SendReply(
self.request.iterator, message_type=rdf_flows.GrrMessage.Type.ITERATOR)
def Done(self):
# Let the server know we finished.
self.request.iterator.state = self.request.iterator.State.FINISHED
# Remove the action from the suspended_actions store.
del self.grr_worker.suspended_actions[id(self)]
def Suspend(self):
self.worker.Suspend()
def Iterate(self):
"""Actions should override this."""
| |
#!/usr/bin/python3.4
# vim:ts=4:sw=4:softtabstop=0:smarttab
DOC = """
Master builder (custom script).
This top-level setup script helps with dealing with all sub-packages at
once. It also provides an installer for a simplify setting up developer mode.
Invoke it like a standard setup.py script. However, Any names after the
operation name are taken as sub-package names that are operated on. If no
names are given then all packages are operated on.
Commands:
list -- List available subpackages. These are the names you may
optionally supply.
publish -- Put source distribution on pypi.
build -- Run setuptools build phase on named sub-packages
(or all of them).
install -- Run setuptools install phase.
install_scripts -- Only install scripts (files in bin) with a direct copy.
eggs -- Build distributable egg package.
rpms -- Build RPMs on platforms that support building RPMs.
msis -- Build Microsoft .msi on Windows.
wininst -- Build .exe installer on Windows.
develop -- Developer mode, installing .pth and script files in
user's directory.
clean -- Run setuptools clean phase.
squash -- Squash (flatten) all named sub-packages into single tree
in $PYCOPIA_SQUASH, or user site-directory if no
$PYCOPIA_SQUASH defined. This also removes the setuptools
runtime dependency.
Most regular setuptools commands also work. They are passed through by
default.
NOTE: The install operation requires that the sudo command be configured for
you.
"""
import sys
import os
import site
try:
import setuptools
except ImportError:
print("Pycopia requires the package named 'setuptools' to be installed.", file=sys.stderr)
raise
try:
WEXITSTATUS = os.WEXITSTATUS
except AttributeError: # running on Windows
def WEXITSTATUS(arg):
return arg
os.environ["HOME"] = os.environ["USERPROFILE"]
RSYNCCHECK = "rsync --version >nul"
SCRIPT_DIR = os.path.join(sys.prefix, "Scripts")
else:
RSYNCCHECK = "rsync --version >/dev/null"
SCRIPT_DIR = "/usr/local/bin"
# sub-packages are listed in dependency order. A subpackage may import modules
# from other subpackages that appear earlier in this list, but not later.
PACKAGES = [
"aid",
"utils",
"core",
"CLI",
"debugger",
"process",
"net",
#"SMI",
#"mibs",
#"SNMP",
#"audio",
#"XML",
"WWW",
"QA",
#"doc",
]
# Where to put "squashed", or flattened target where all subpackages are
# installed into one directory, and removing "package namespace" support.
PYCOPIA_SQUASH = os.environ.get("PYCOPIA_SQUASH", site.USER_SITE)
# Where top-level scripts will be installed to when install_scripts command is used.
PYCOPIA_BIN = os.environ.get("PYCOPIA_BIN", os.path.join(os.path.expandvars("$HOME"), "bin"))
def _do_commands(name, cmds, root):
# use sudo on Linux and possibly other platforms. On Windows it's
# assumed you're running as Administrator (everybody does it...)
if root and sys.platform not in ("win32", "cli"):
sudo = "sudo "
else:
sudo = ""
cmd = "%s%s setup.py %s" % (sudo, sys.executable, " ".join(cmds))
print("========", name, "==", cmd)
rv = False
os.chdir(name)
try:
rv = WEXITSTATUS(os.system(cmd)) == 0
finally:
os.chdir("..")
print("====================== END", name, "\n")
return rv
def do_eggs(name):
return _do_commands(name, ["bdist_egg"], False)
def do_rpms(name):
return _do_commands(name, ["bdist_rpm", "--python", sys.executable], False)
def do_msis(name):
return _do_commands(name, ["bdist_msi"], False)
def do_wininst(name):
return _do_commands(name, ["bdist_wininst"], False)
# "scripts", those files in bin/, may require some special interpreter
# flags, such as -S, This prevents setuptools from functioning.
# Since Pycopia scripts are written generically there is not reason not to
# install them as-is.
# only works on Linux for now.
def _do_scripts(name, scriptdir, root=False):
if root and sys.platform not in ("win32", "cli"):
sudo = "sudo "
else:
sudo = ""
os.chdir(name)
rv = True
try:
if os.path.isdir("bin"):
if sys.platform == "darwin":
cmd = "%scp -a bin/* %s" % (sudo, scriptdir)
else:
cmd = "%scp -dR --preserve=mode bin/* %s" % (sudo, scriptdir)
print("======== SCRIPTS", name, "==", cmd)
rv = WEXITSTATUS(os.system(cmd)) == 0
finally:
os.chdir("..")
print("====================== END SCRIPTS", name)
return rv
def do_install_scripts(name):
return _do_scripts(name, PYCOPIA_BIN)
def do_develop(name):
if not os.path.isdir(site.USER_SITE):
os.makedirs(site.USER_SITE)
rv = _do_commands(name, ["develop", "--install-dir", site.USER_SITE, "--script-dir", PYCOPIA_BIN, "-l -N"], False)
rvs = _do_scripts(name, PYCOPIA_BIN)
return rv and rvs
def do_publish(name):
return _do_commands(name, ['egg_info -RDb ""', "sdist", "register", "upload"], False)
def do_egg_info(name):
return _do_commands(name, ['egg_info'], False)
def do_install(name):
rv1 = _do_commands(name, ["install -O2", "--install-scripts", SCRIPT_DIR], True)
# Don't use the setuptools script wrapper for Pycopia scripts. This
# will overwrite the installed scripts with a direct copy.
rv2 = _do_scripts(name, SCRIPT_DIR, True)
return rv1 and rv2
def do_clean(name):
return _do_commands(name, ["clean"], False)
def do_list(name):
print(name, end=" ")
return True
# "squash" selected sub packages to a single package. Also removes
# setuptools dependency when tarballed.
def do_squash(name):
if not _check_rsync():
print("Squash requires rsync tool to be installed.")
return False
if not os.path.isdir(PYCOPIA_SQUASH):
os.makedirs(PYCOPIA_SQUASH)
os.chdir(name)
uname = os.uname()
bin_dir = os.path.join("build", "lib.%s-%s-%s" % (uname[0].lower(), uname[4], sys.version[:3]))
# e.g: build/lib.linux-x86_64-2.5/pycopia
print("======== SQUASH", name, "to", PYCOPIA_SQUASH)
try:
if WEXITSTATUS(os.system("%s setup.py build" % (sys.executable,))) != 0:
return False
for pydir in ("build/lib", bin_dir):
if os.path.isdir(pydir):
cmd = "rsync -azvu %s/ %s" % (pydir, PYCOPIA_SQUASH)
if WEXITSTATUS(os.system(cmd)) != 0:
return False
finally:
os.chdir("..")
_null_init(PYCOPIA_SQUASH)
print("====================== END", name, "squashed into", PYCOPIA_SQUASH, "\n")
return True
def _null_init(directory):
open(os.path.join(directory, "pycopia", "__init__.py"), "w").close()
def _check_rsync():
return WEXITSTATUS(os.system(RSYNCCHECK)) == 0
def do_generic(name):
pass
def get_svn_revision():
import subprocess
from xml.etree import ElementTree
info = ElementTree.fromstring(subprocess.check_output("svn info --xml".split()))
rev = info.find("entry").attrib["revision"]
return int(rev)
def main(argv):
try:
cmd = argv[1]
except IndexError:
print(DOC)
return 1
# mainrev = get_svn_revision()
# os.environ["PYCOPIA_REVISION"] = str(mainrev)
try:
method = globals()["do_" + cmd]
except KeyError:
def method(name):
return _do_commands(name, [cmd], False)
for name in (argv[2:] or PACKAGES):
if not method(name):
break
print()
return 0
sys.exit(main(sys.argv))
| |
'''
Inspector
=========
.. versionadded:: 1.0.9
.. warning::
This module is highly experimental, use it with care.
The Inspector is a tool for finding a widget in the widget tree by clicking or
tapping on it.
Some keyboard shortcuts are activated:
* "Ctrl + e": activate / deactivate the inspector view
* "Escape": cancel widget lookup first, then hide the inspector view
Available inspector interactions:
* tap once on a widget to select it without leaving inspect mode
* double tap on a widget to select and leave inspect mode (then you can
manipulate the widget again)
Some properties can be edited live. However, due to the delayed usage of
some properties, it might crash if you don't handle all the cases.
Usage
-----
For normal module usage, please see the :mod:`~kivy.modules` documentation.
The Inspector, however, can also be imported and used just like a normal
python module. This has the added advantage of being able to activate and
deactivate the module programmatically::
from kivy.core.window import Window
from kivy.app import App
from kivy.uix.button import Button
from kivy.modules import inspector
class Demo(App):
def build(self):
button = Button(text="Test")
inspector.create_inspector(Window, button)
return button
Demo().run()
To remove the Inspector, you can do the following::
inspector.stop(Window, button)
'''
__all__ = ('start', 'stop', 'create_inspector')
import kivy
kivy.require('1.0.9')
import weakref
from functools import partial
from itertools import chain
from kivy.animation import Animation
from kivy.logger import Logger
from kivy.uix.widget import Widget
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.textinput import TextInput
from kivy.uix.image import Image
from kivy.uix.treeview import TreeViewNode, TreeView
from kivy.uix.gridlayout import GridLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.modalview import ModalView
from kivy.graphics import Color, Rectangle, PushMatrix, PopMatrix
from kivy.graphics.context_instructions import Transform
from kivy.graphics.transformation import Matrix
from kivy.properties import ObjectProperty, BooleanProperty, ListProperty, \
NumericProperty, StringProperty, OptionProperty, \
ReferenceListProperty, AliasProperty, VariableListProperty
from kivy.graphics.texture import Texture
from kivy.clock import Clock
from kivy.lang import Builder
Builder.load_string('''
<Inspector>:
layout: layout
widgettree: widgettree
treeview: treeview
content: content
BoxLayout:
orientation: 'vertical'
id: layout
size_hint_y: None
height: 250
padding: 5
spacing: 5
top: 0
canvas:
Color:
rgb: .4, .4, .4
Rectangle:
pos: self.x, self.top
size: self.width, 1
Color:
rgba: .185, .18, .18, .95
Rectangle:
pos: self.pos
size: self.size
# Top Bar
BoxLayout:
size_hint_y: None
height: 50
spacing: 5
Button:
text: 'Move to Top'
on_release: root.toggle_position(args[0])
size_hint_x: None
width: 120
ToggleButton:
text: 'Inspect'
on_state: root.inspect_enabled = args[1] == 'down'
size_hint_x: None
state: 'down' if root.inspect_enabled else 'normal'
width: 80
Button:
text: 'Parent'
on_release:
root.highlight_widget(root.widget.parent) if root.widget \
else None
size_hint_x: None
width: 80
Button:
text: '%r' % root.widget
on_release: root.show_widget_info()
Button:
text: 'X'
size_hint_x: None
width: 50
on_release: root.activated = False
# Bottom Bar
BoxLayout:
ScrollView:
scroll_type: ['bars', 'content']
bar_width: 10
size_hint_x: 0.0001
WidgetTree:
id: widgettree
hide_root: True
size_hint: None, None
height: self.minimum_height
width: max(self.parent.width, self.minimum_width)
selected_widget: root.widget
on_select_widget: root.highlight_widget(args[1])
Splitter:
sizeable_from: 'left'
min_size: self.parent.width / 2
max_size: self.parent.width
BoxLayout:
ScrollView:
scroll_type: ['bars', 'content']
bar_width: 10
TreeView:
id: treeview
size_hint_y: None
hide_root: True
height: self.minimum_height
Splitter:
sizeable_from: 'left'
keep_within_parent: True
rescale_with_parent: True
max_size: self.parent.width / 2
min_size: 0
ScrollView:
id: content
<TreeViewProperty>:
height: max(lkey.texture_size[1], ltext.texture_size[1])
Label:
id: lkey
text: root.key
text_size: (self.width, None)
width: 150
size_hint_x: None
Label:
id: ltext
text: [repr(getattr(root.widget, root.key, '')), root.refresh][0]\
if root.widget else ''
text_size: (self.width, None)
<-TreeViewWidget>:
height: self.texture_size[1] + sp(4)
size_hint_x: None
width: self.texture_size[0] + sp(4)
canvas.before:
Color:
rgba: self.color_selected if self.is_selected else (0, 0, 0, 0)
Rectangle:
pos: self.pos
size: self.size
Color:
rgba: 1, 1, 1, int(not self.is_leaf)
Rectangle:
source: 'atlas://data/images/defaulttheme/tree_%s' % ('opened' if self.is_open else 'closed')
size: 16, 16
pos: self.x - 20, self.center_y - 8
canvas:
Color:
rgba: self.disabled_color if self.disabled else (self.color if not self.markup else (1, 1, 1, 1))
Rectangle:
texture: self.texture
size: self.texture_size
pos: int(self.center_x - self.texture_size[0] / 2.), int(self.center_y - self.texture_size[1] / 2.)
''')
class TreeViewProperty(BoxLayout, TreeViewNode):
widget_ref = ObjectProperty(None, allownone=True)
def _get_widget(self):
wr = self.widget_ref
if wr is None:
return None
wr = wr()
if wr is None:
self.widget_ref = None
return None
return wr
widget = AliasProperty(_get_widget, None, bind=('widget_ref', ))
key = ObjectProperty(None, allownone=True)
inspector = ObjectProperty(None)
refresh = BooleanProperty(False)
class TreeViewWidget(Label, TreeViewNode):
widget = ObjectProperty(None)
class WidgetTree(TreeView):
selected_widget = ObjectProperty(None, allownone=True)
__events__ = ('on_select_widget',)
def __init__(self, **kwargs):
super(WidgetTree, self).__init__(**kwargs)
self.update_scroll = Clock.create_trigger(self._update_scroll)
def find_node_by_widget(self, widget):
for node in self.iterate_all_nodes():
if not node.parent_node:
continue
try:
if node.widget == widget:
return node
except ReferenceError:
pass
return None
def update_selected_widget(self, widget):
if widget:
node = self.find_node_by_widget(widget)
if node:
self.select_node(node, False)
while node and isinstance(node, TreeViewWidget):
if not node.is_open:
self.toggle_node(node)
node = node.parent_node
def on_selected_widget(self, inst, widget):
if widget:
self.update_selected_widget(widget)
self.update_scroll()
def select_node(self, node, select_widget=True):
super(WidgetTree, self).select_node(node)
if select_widget:
try:
self.dispatch('on_select_widget', node.widget.__self__)
except ReferenceError:
pass
def on_select_widget(self, widget):
pass
def _update_scroll(self, *args):
node = self._selected_node
if not node:
return
self.parent.scroll_to(node)
class Inspector(FloatLayout):
widget = ObjectProperty(None, allownone=True)
layout = ObjectProperty(None)
widgettree = ObjectProperty(None)
treeview = ObjectProperty(None)
inspect_enabled = BooleanProperty(False)
activated = BooleanProperty(False)
widget_info = BooleanProperty(False)
content = ObjectProperty(None)
at_bottom = BooleanProperty(True)
def __init__(self, **kwargs):
super(Inspector, self).__init__(**kwargs)
self.avoid_bring_to_top = False
self.win = kwargs.get('win')
with self.canvas.before:
self.gcolor = Color(1, 0, 0, .25)
PushMatrix()
self.gtransform = Transform(Matrix())
self.grect = Rectangle(size=(0, 0))
PopMatrix()
Clock.schedule_interval(self.update_widget_graphics, 0)
def on_touch_down(self, touch):
ret = super(Inspector, self).on_touch_down(touch)
if (('button' not in touch.profile or touch.button == 'left')
and not ret and self.inspect_enabled):
self.highlight_at(*touch.pos)
if touch.is_double_tap:
self.inspect_enabled = False
self.show_widget_info()
ret = True
return ret
def on_touch_move(self, touch):
ret = super(Inspector, self).on_touch_move(touch)
if not ret and self.inspect_enabled:
self.highlight_at(*touch.pos)
ret = True
return ret
def on_touch_up(self, touch):
ret = super(Inspector, self).on_touch_up(touch)
if not ret and self.inspect_enabled:
ret = True
return ret
def on_window_children(self, win, children):
if self.avoid_bring_to_top:
return
self.avoid_bring_to_top = True
win.remove_widget(self)
win.add_widget(self)
self.avoid_bring_to_top = False
def highlight_at(self, x, y):
widget = None
# reverse the loop - look at children on top first and
# modalviews before others
win_children = self.win.children
children = chain(
(c for c in reversed(win_children) if isinstance(c, ModalView)),
(c for c in reversed(win_children) if not isinstance(c, ModalView))
)
for child in children:
if child is self:
continue
widget = self.pick(child, x, y)
if widget:
break
self.highlight_widget(widget)
def highlight_widget(self, widget, info=True, *largs):
# no widget to highlight, reduce rectangle to 0, 0
self.widget = widget
if not widget:
self.grect.size = 0, 0
if self.widget_info and info:
self.show_widget_info()
def update_widget_graphics(self, *l):
if not self.activated:
return
if self.widget is None:
self.grect.size = 0, 0
return
self.grect.size = self.widget.size
matrix = self.widget.get_window_matrix()
if self.gtransform.matrix.get() != matrix.get():
self.gtransform.matrix = matrix
def toggle_position(self, button):
to_bottom = button.text == 'Move to Bottom'
if to_bottom:
button.text = 'Move to Top'
if self.widget_info:
Animation(top=250, t='out_quad', d=.3).start(self.layout)
else:
Animation(top=60, t='out_quad', d=.3).start(self.layout)
bottom_bar = self.layout.children[1]
self.layout.remove_widget(bottom_bar)
self.layout.add_widget(bottom_bar)
else:
button.text = 'Move to Bottom'
if self.widget_info:
Animation(top=self.height, t='out_quad', d=.3).start(
self.layout)
else:
Animation(y=self.height - 60, t='out_quad', d=.3).start(
self.layout)
bottom_bar = self.layout.children[1]
self.layout.remove_widget(bottom_bar)
self.layout.add_widget(bottom_bar)
self.at_bottom = to_bottom
def pick(self, widget, x, y):
ret = None
# try to filter widgets that are not visible (invalid inspect target)
if (hasattr(widget, 'visible') and not widget.visible):
return ret
if widget.collide_point(x, y):
ret = widget
x2, y2 = widget.to_local(x, y)
# reverse the loop - look at children on top first
for child in reversed(widget.children):
ret = self.pick(child, x2, y2) or ret
return ret
def on_activated(self, instance, activated):
if not activated:
self.grect.size = 0, 0
if self.at_bottom:
anim = Animation(top=0, t='out_quad', d=.3)
else:
anim = Animation(y=self.height, t='out_quad', d=.3)
anim.bind(on_complete=self.animation_close)
anim.start(self.layout)
self.widget = None
self.widget_info = False
else:
self.win.add_widget(self)
Logger.info('Inspector: inspector activated')
if self.at_bottom:
Animation(top=60, t='out_quad', d=.3).start(self.layout)
else:
Animation(y=self.height - 60, t='out_quad', d=.3).start(
self.layout)
Clock.schedule_interval(self.update_widget_tree, 1)
self.update_widget_tree()
def animation_close(self, instance, value):
if self.activated is False:
self.inspect_enabled = False
self.win.remove_widget(self)
self.content.clear_widgets()
treeview = self.treeview
for node in list(treeview.iterate_all_nodes()):
node.widget_ref = None
treeview.remove_node(node)
self._window_node = None
Clock.unschedule(self.update_widget_tree)
widgettree = self.widgettree
for node in list(widgettree.iterate_all_nodes()):
widgettree.remove_node(node)
Logger.info('Inspector: inspector deactivated')
def show_widget_info(self):
self.content.clear_widgets()
widget = self.widget
treeview = self.treeview
for node in list(treeview.iterate_all_nodes())[:]:
node.widget_ref = None
treeview.remove_node(node)
if not widget:
if self.at_bottom:
Animation(top=60, t='out_quad', d=.3).start(self.layout)
else:
Animation(y=self.height - 60, t='out_quad', d=.3).start(
self.layout)
self.widget_info = False
return
self.widget_info = True
if self.at_bottom:
Animation(top=250, t='out_quad', d=.3).start(self.layout)
else:
Animation(top=self.height, t='out_quad', d=.3).start(self.layout)
for node in list(treeview.iterate_all_nodes())[:]:
treeview.remove_node(node)
keys = list(widget.properties().keys())
keys.sort()
node = None
wk_widget = weakref.ref(widget)
for key in keys:
text = '%s' % key
node = TreeViewProperty(text=text, key=key, widget_ref=wk_widget)
node.bind(is_selected=self.show_property)
try:
widget.bind(**{key: partial(
self.update_node_content, weakref.ref(node))})
except:
pass
treeview.add_node(node)
def update_node_content(self, node, *l):
node = node()
if node is None:
return
node.refresh = True
node.refresh = False
def keyboard_shortcut(self, win, scancode, *largs):
modifiers = largs[-1]
if scancode == 101 and modifiers == ['ctrl']:
self.activated = not self.activated
if self.activated:
self.inspect_enabled = True
return True
elif scancode == 27:
if self.inspect_enabled:
self.inspect_enabled = False
return True
if self.activated:
self.activated = False
return True
def show_property(self, instance, value, key=None, index=-1, *l):
# normal call: (tree node, focus, )
# nested call: (widget, prop value, prop key, index in dict/list)
if value is False:
return
content = None
if key is None:
# normal call
nested = False
widget = instance.widget
key = instance.key
prop = widget.property(key)
value = getattr(widget, key)
else:
# nested call, we might edit subvalue
nested = True
widget = instance
prop = None
dtype = None
if isinstance(prop, AliasProperty) or nested:
# trying to resolve type dynamicly
if type(value) in (str, str):
dtype = 'string'
elif type(value) in (int, float):
dtype = 'numeric'
elif type(value) in (tuple, list):
dtype = 'list'
if isinstance(prop, NumericProperty) or dtype == 'numeric':
content = TextInput(text=str(value) or '', multiline=False)
content.bind(text=partial(
self.save_property_numeric, widget, key, index))
elif isinstance(prop, StringProperty) or dtype == 'string':
content = TextInput(text=value or '', multiline=True)
content.bind(text=partial(
self.save_property_text, widget, key, index))
elif (isinstance(prop, ListProperty) or
isinstance(prop, ReferenceListProperty) or
isinstance(prop, VariableListProperty) or
dtype == 'list'):
content = GridLayout(cols=1, size_hint_y=None)
content.bind(minimum_height=content.setter('height'))
for i, item in enumerate(value):
button = Button(text=repr(item), size_hint_y=None, height=44)
if isinstance(item, Widget):
button.bind(on_release=partial(self.highlight_widget, item,
False))
else:
button.bind(on_release=partial(self.show_property, widget,
item, key, i))
content.add_widget(button)
elif isinstance(prop, OptionProperty):
content = GridLayout(cols=1, size_hint_y=None)
content.bind(minimum_height=content.setter('height'))
for option in prop.options:
button = ToggleButton(
text=option,
state='down' if option == value else 'normal',
group=repr(content.uid), size_hint_y=None,
height=44)
button.bind(on_press=partial(
self.save_property_option, widget, key))
content.add_widget(button)
elif isinstance(prop, ObjectProperty):
if isinstance(value, Widget):
content = Button(text=repr(value))
content.bind(on_release=partial(self.highlight_widget, value))
elif isinstance(value, Texture):
content = Image(texture=value)
else:
content = Label(text=repr(value))
elif isinstance(prop, BooleanProperty):
state = 'down' if value else 'normal'
content = ToggleButton(text=key, state=state)
content.bind(on_release=partial(self.save_property_boolean, widget,
key, index))
self.content.clear_widgets()
if content:
self.content.add_widget(content)
def save_property_numeric(self, widget, key, index, instance, value):
try:
if index >= 0:
getattr(widget, key)[index] = float(instance.text)
else:
setattr(widget, key, float(instance.text))
except:
pass
def save_property_text(self, widget, key, index, instance, value):
try:
if index >= 0:
getattr(widget, key)[index] = instance.text
else:
setattr(widget, key, instance.text)
except:
pass
def save_property_boolean(self, widget, key, index, instance, ):
try:
value = instance.state == 'down'
if index >= 0:
getattr(widget, key)[index] = value
else:
setattr(widget, key, value)
except:
pass
def save_property_option(self, widget, key, instance, *l):
try:
setattr(widget, key, instance.text)
except:
pass
def _update_widget_tree_node(self, node, widget, is_open=False):
tree = self.widgettree
update_nodes = []
nodes = {}
for cnode in node.nodes[:]:
try:
nodes[cnode.widget] = cnode
except ReferenceError:
# widget no longer exists, just remove it
pass
tree.remove_node(cnode)
for child in widget.children:
if child is self:
continue
if child in nodes:
cnode = tree.add_node(nodes[child], node)
else:
cnode = tree.add_node(TreeViewWidget(text=child.__class__.__name__, widget=child.proxy_ref, is_open=is_open), node)
update_nodes.append((cnode, child))
return update_nodes
def update_widget_tree(self, *args):
if not hasattr(self, '_window_node') or not self._window_node:
self._window_node = self.widgettree.add_node(
TreeViewWidget(text='Window', widget=self.win, is_open=True))
nodes = self._update_widget_tree_node(self._window_node, self.win, is_open=True)
while nodes:
ntmp = nodes[:]
nodes = []
for node in ntmp:
nodes += self._update_widget_tree_node(*node)
self.widgettree.update_selected_widget(self.widget)
def create_inspector(win, ctx, *l):
'''Create an Inspector instance attached to the *ctx* and bound to the
Windows :meth:`~kivy.core.window.WindowBase.on_keyboard` event for capturing
the keyboard shortcut.
:Parameters:
`win`: A :class:`Window <kivy.core.window.WindowBase>`
The application Window to bind to.
`ctx`: A :class:`~kivy.uix.widget.Widget` or subclass
The Widget to be inspected.
'''
# Dunno why, but if we are creating inspector within the start(), no lang
# rules are applied.
ctx.inspector = Inspector(win=win)
win.bind(children=ctx.inspector.on_window_children,
on_keyboard=ctx.inspector.keyboard_shortcut)
def start(win, ctx):
Clock.schedule_once(partial(create_inspector, win, ctx))
def stop(win, ctx):
'''Stop and unload any active Inspectors for the given *ctx*.'''
if hasattr(ctx, 'inspector'):
win.unbind(children=ctx.inspector.on_window_children,
on_keyboard=ctx.inspector.keyboard_shortcut)
win.remove_widget(ctx.inspector)
del ctx.inspector
| |
# -*- coding: utf-8 -*-
# Copyright 2017 Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provide commands for generating report files."""
from __future__ import absolute_import
import logging
import os
import sys
from builtins import open
from functools import partial
from multiprocessing import Pool, cpu_count
import click
import git
from libsbml import SBMLError
from sqlalchemy.exc import ArgumentError
import memote.suite.api as api
import memote.suite.cli.callbacks as callbacks
import memote.suite.results as managers
import memote.utils as utils
from memote.suite.cli import CONTEXT_SETTINGS
from memote.suite.reporting import ReportConfiguration
LOGGER = logging.getLogger(__name__)
@click.group()
@click.help_option("--help", "-h")
def report():
"""Generate one of three different types of reports."""
pass
@report.command(context_settings=CONTEXT_SETTINGS)
@click.help_option("--help", "-h")
@click.argument(
"model", type=click.Path(exists=True, dir_okay=False), envvar="MEMOTE_MODEL"
)
@click.option(
"--filename",
type=click.Path(exists=False, writable=True),
default="index.html",
show_default=True,
help="Path for the HTML report output.",
)
@click.option(
"--pytest-args",
"-a",
callback=callbacks.validate_pytest_args,
help="Any additional arguments you want to pass to pytest. "
"Should be given as one continuous string.",
)
@click.option(
"--exclusive",
type=str,
multiple=True,
metavar="TEST",
help="The name of a test or test module to be run exclusively. "
"All other tests are skipped. This option can be used "
"multiple times and takes precedence over '--skip'.",
)
@click.option(
"--skip",
type=str,
multiple=True,
metavar="TEST",
help="The name of a test or test module to be skipped. This "
"option can be used multiple times.",
)
@click.option(
"--solver",
type=click.Choice(["cplex", "glpk", "gurobi", "glpk_exact"]),
default="glpk",
show_default=True,
help="Set the solver to be used.",
)
@click.option(
"--solver-timeout",
type=int,
default=10,
help="Timeout in seconds to set on the mathematical optimization solver.",
)
@click.option(
"--experimental",
type=click.Path(exists=True, dir_okay=False),
default=None,
callback=callbacks.validate_experimental,
help="Define additional tests using experimental data.",
)
@click.option(
"--custom-tests",
type=click.Path(exists=True, file_okay=False),
multiple=True,
help="A path to a directory containing custom test "
"modules. Please refer to the documentation for more "
"information on how to write custom tests. May be "
"specified multiple times.",
)
@click.option(
"--custom-config",
type=click.Path(exists=True, dir_okay=False),
multiple=True,
help="A path to a report configuration file that will be merged "
"into the default configuration. It's primary use is to "
"configure the placement and scoring of custom tests but "
"it can also alter the default behavior. Please refer to "
"the documentation for the expected YAML format used. This "
"option can be specified multiple times.",
)
def snapshot(
model,
filename,
pytest_args,
exclusive,
skip,
solver,
solver_timeout,
experimental,
custom_tests,
custom_config,
):
"""
Take a snapshot of a model's state and generate a report.
MODEL: Path to model file. Can also be supplied via the environment variable
MEMOTE_MODEL or configured in 'setup.cfg' or 'memote.ini'.
"""
model_obj, sbml_ver, notifications = api.validate_model(model)
if model_obj is None:
LOGGER.critical(
"The model could not be loaded due to the following SBML errors."
)
utils.stdout_notifications(notifications)
api.validation_report(model, notifications, filename)
sys.exit(1)
if not any(a.startswith("--tb") for a in pytest_args):
pytest_args = ["--tb", "no"] + pytest_args
# Add further directories to search for tests.
pytest_args.extend(custom_tests)
config = ReportConfiguration.load()
# Update the default test configuration with custom ones (if any).
for custom in custom_config:
config.merge(ReportConfiguration.load(custom))
model_obj.solver = solver
_, results = api.test_model(
model_obj,
sbml_version=sbml_ver,
results=True,
pytest_args=pytest_args,
skip=skip,
exclusive=exclusive,
experimental=experimental,
solver_timeout=solver_timeout,
)
with open(filename, "w", encoding="utf-8") as file_handle:
LOGGER.info("Writing snapshot report to '%s'.", filename)
file_handle.write(api.snapshot_report(results, config))
@report.command(context_settings=CONTEXT_SETTINGS)
@click.help_option("--help", "-h")
@click.option(
"--location",
envvar="MEMOTE_LOCATION",
help="Location of test results. Can either by a directory or an "
"rfc1738 compatible database URL.",
)
@click.option(
"--model",
envvar="MEMOTE_MODEL",
help="The path of the model file. Used to check if it was " "modified.",
)
@click.option(
"--filename",
type=click.Path(exists=False, writable=True),
default="index.html",
show_default=True,
help="Path for the HTML report output.",
)
@click.option(
"--deployment",
default="gh-pages",
show_default=True,
help="Results will be read from and committed to the given " "branch.",
)
@click.option(
"--custom-config",
type=click.Path(exists=True, dir_okay=False),
multiple=True,
help="A path to a report configuration file that will be merged "
"into the default configuration. It's primary use is to "
"configure the placement and scoring of custom tests but "
"it can also alter the default behavior. Please refer to "
"the documentation for the expected YAML format used. This "
"option can be specified multiple times.",
)
def history(location, model, filename, deployment, custom_config):
"""Generate a report over a model's git commit history."""
callbacks.git_installed()
LOGGER.info("Initialising history report generation.")
if location is None:
raise click.BadParameter("No 'location' given or configured.")
try:
repo = git.Repo()
except git.InvalidGitRepositoryError:
LOGGER.critical(
"The history report requires a git repository in order to check "
"the model's commit history."
)
sys.exit(1)
LOGGER.info(
"Obtaining history of results from "
"the deployment branch {}.".format(deployment)
)
repo.git.checkout(deployment)
try:
manager = managers.SQLResultManager(repository=repo, location=location)
except (AttributeError, ArgumentError):
manager = managers.RepoResultManager(repository=repo, location=location)
config = ReportConfiguration.load()
# Update the default test configuration with custom ones (if any).
for custom in custom_config:
config.merge(ReportConfiguration.load(custom))
LOGGER.info("Tracing the commit history.")
history = managers.HistoryManager(repository=repo, manager=manager)
history.load_history(model, skip={deployment})
LOGGER.info("Composing the history report.")
report = api.history_report(history, config=config)
with open(filename, "w", encoding="utf-8") as file_handle:
file_handle.write(report)
def _test_diff(
model_and_model_ver_tuple,
pytest_args,
skip,
exclusive,
experimental,
solver_timeout,
):
model, sbml_ver = model_and_model_ver_tuple
_, diff_results = api.test_model(
model,
sbml_version=sbml_ver,
results=True,
pytest_args=pytest_args,
skip=skip,
exclusive=exclusive,
experimental=experimental,
solver_timeout=solver_timeout,
)
return diff_results
@report.command(context_settings=CONTEXT_SETTINGS)
@click.help_option("--help", "-h")
@click.argument("models", type=click.Path(exists=True, dir_okay=False), nargs=-1)
@click.option(
"--filename",
type=click.Path(exists=False, writable=True),
default="index.html",
show_default=True,
help="Path for the HTML report output.",
)
@click.option(
"--pytest-args",
"-a",
callback=callbacks.validate_pytest_args,
help="Any additional arguments you want to pass to pytest. "
"Should be given as one continuous string.",
)
@click.option(
"--exclusive",
type=str,
multiple=True,
metavar="TEST",
help="The name of a test or test module to be run exclusively. "
"All other tests are skipped. This option can be used "
"multiple times and takes precedence over '--skip'.",
)
@click.option(
"--skip",
type=str,
multiple=True,
metavar="TEST",
help="The name of a test or test module to be skipped. This "
"option can be used multiple times.",
)
@click.option(
"--solver",
type=click.Choice(["cplex", "glpk", "gurobi"]),
default="glpk",
show_default=True,
help="Set the solver to be used.",
)
@click.option(
"--solver-timeout",
type=int,
default=10,
help="Timeout in seconds to set on the mathematical optimization solver.",
)
@click.option(
"--experimental",
type=click.Path(exists=True, dir_okay=False),
default=None,
callback=callbacks.validate_experimental,
help="Define additional tests using experimental data.",
)
@click.option(
"--custom-tests",
type=click.Path(exists=True, file_okay=False),
multiple=True,
help="A path to a directory containing custom test "
"modules. Please refer to the documentation for more "
"information on how to write custom tests "
"(memote.readthedocs.io). This option can be specified "
"multiple times.",
)
@click.option(
"--custom-config",
type=click.Path(exists=True, dir_okay=False),
multiple=True,
help="A path to a report configuration file that will be merged "
"into the default configuration. It's primary use is to "
"configure the placement and scoring of custom tests but "
"it can also alter the default behavior. Please refer to "
"the documentation for the expected YAML format used "
"(memote.readthedocs.io). This option can be specified "
"multiple times.",
)
def diff(
models,
filename,
pytest_args,
exclusive,
skip,
solver,
solver_timeout,
experimental,
custom_tests,
custom_config,
):
"""
Take a snapshot of all the supplied models and generate a diff report.
MODELS: List of paths to two or more model files.
"""
if not any(a.startswith("--tb") for a in pytest_args):
pytest_args = ["--tb", "no"] + pytest_args
# Add further directories to search for tests.
pytest_args.extend(custom_tests)
config = ReportConfiguration.load()
# Update the default test configuration with custom ones (if any).
for custom in custom_config:
config.merge(ReportConfiguration.load(custom))
# Build the diff report specific data structure
diff_results = dict()
model_and_model_ver_tuple = list()
for model_path in models:
try:
model_filename = os.path.basename(model_path)
diff_results.setdefault(model_filename, dict())
model, model_ver, notifications = api.validate_model(model_path)
if model is None:
head, tail = os.path.split(filename)
report_path = os.path.join(
head, "{}_structural_report.html".format(model_filename)
)
api.validation_report(model_path, notifications, report_path)
LOGGER.critical(
"The model {} could not be loaded due to SBML errors "
"reported in {}.".format(model_filename, report_path)
)
continue
model.solver = solver
model_and_model_ver_tuple.append((model, model_ver))
except (IOError, SBMLError):
LOGGER.debug(exc_info=True)
LOGGER.warning(
"An error occurred while loading the model '%s'. " "Skipping.",
model_filename,
)
# Abort the diff report unless at least two models can be loaded
# successfully.
if len(model_and_model_ver_tuple) < 2:
LOGGER.critical(
"Out of the %d provided models only %d could be loaded. Please, "
"check if the models that could not be loaded are valid SBML. "
"Aborting.",
len(models),
len(model_and_model_ver_tuple),
)
sys.exit(1)
# Running pytest in individual processes to avoid interference
partial_test_diff = partial(
_test_diff,
pytest_args=pytest_args,
skip=skip,
exclusive=exclusive,
experimental=experimental,
solver_timeout=solver_timeout,
)
pool = Pool(min(len(models), cpu_count()))
results = pool.map(partial_test_diff, model_and_model_ver_tuple)
for model_path, result in zip(models, results):
model_filename = os.path.basename(model_path)
diff_results[model_filename] = result
with open(filename, "w", encoding="utf-8") as file_handle:
LOGGER.info("Writing diff report to '%s'.", filename)
file_handle.write(api.diff_report(diff_results, config))
| |
import sys, py
from rpython.jit.metainterp.test.support import LLJitMixin
from rpython.rlib import jit
from rpython.rlib.rarithmetic import ovfcheck
from rpython.rlib.rstring import StringBuilder
class TestLLtype(LLJitMixin):
def test_dont_record_repeated_guard_class(self):
class A:
pass
class B(A):
pass
@jit.dont_look_inside
def extern(n):
if n == -7:
return None
elif n:
return A()
else:
return B()
def fn(n):
obj = extern(n)
return isinstance(obj, B) + isinstance(obj, B) + isinstance(obj, B) + isinstance(obj, B)
res = self.interp_operations(fn, [0])
assert res == 4
self.check_operations_history(guard_class=1, guard_nonnull=1)
res = self.interp_operations(fn, [1])
assert not res
def test_dont_record_guard_class_after_new(self):
class A:
pass
class B(A):
pass
def fn(n):
if n == -7:
obj = None
elif n:
obj = A()
else:
obj = B()
return isinstance(obj, B) + isinstance(obj, B) + isinstance(obj, B) + isinstance(obj, B)
res = self.interp_operations(fn, [0])
assert res == 4
self.check_operations_history(guard_class=0, guard_nonnull=0)
res = self.interp_operations(fn, [1])
assert not res
def test_guard_isnull_nullifies(self):
class A:
pass
a = A()
a.x = None
def fn(n):
if n == -7:
a.x = ""
obj = a.x
res = 0
if not obj:
res += 1
if obj:
res += 1
if obj is None:
res += 1
if obj is not None:
res += 1
return res
res = self.interp_operations(fn, [0])
assert res == 2
self.check_operations_history(guard_isnull=1)
def test_heap_caching_while_tracing(self):
class A:
pass
a1 = A()
a2 = A()
def fn(n):
if n > 0:
a = a1
else:
a = a2
a.x = n
return a.x
res = self.interp_operations(fn, [7])
assert res == 7
self.check_operations_history(getfield_gc_i=0)
res = self.interp_operations(fn, [-7])
assert res == -7
self.check_operations_history(getfield_gc_i=0)
def fn(n, ca, cb):
a1.x = n
a2.x = n
a = a1
if ca:
a = a2
b = a1
if cb:
b = a
return a.x + b.x
res = self.interp_operations(fn, [7, 0, 1])
assert res == 7 * 2
self.check_operations_history(getfield_gc_i=1)
res = self.interp_operations(fn, [-7, 1, 1])
assert res == -7 * 2
self.check_operations_history(getfield_gc_i=1)
def test_heap_caching_nonnull(self):
class A:
def __init__(self, x=None):
self.next = x
a0 = A()
a1 = A()
a2 = A(a1)
def fn(n):
if n > 0:
a = a1
else:
a = a2
if a.next:
a = A(a.next)
result = a.next is not None
a0.next = a
return result
return False
res = self.interp_operations(fn, [-7])
assert res == True
self.check_operations_history(guard_nonnull=1)
def test_heap_caching_while_tracing_invalidation(self):
class A:
pass
a1 = A()
a2 = A()
@jit.dont_look_inside
def f(a):
a.x = 5
l = [1]
def fn(n):
if n > 0:
a = a1
else:
a = a2
a.x = n
x1 = a.x
f(a)
x2 = a.x
l[0] = x2
return a.x + x1 + x2
res = self.interp_operations(fn, [7])
assert res == 5 * 2 + 7
self.check_operations_history(getfield_gc_i=1)
def test_heap_caching_dont_store_same(self):
class A:
pass
a1 = A()
a2 = A()
def fn(n):
if n > 0:
a = a1
else:
a = a2
a.x = n
a.x = n
return a.x
res = self.interp_operations(fn, [7])
assert res == 7
self.check_operations_history(getfield_gc_i=0, setfield_gc=1)
res = self.interp_operations(fn, [-7])
assert res == -7
self.check_operations_history(getfield_gc_i=0)
def test_array_caching(self):
a1 = [0, 0]
a2 = [0, 0]
def fn(n):
if n > 0:
a = a1
else:
a = a2
a[0] = n
x1 = a[0]
a[n - n] = n + 1
return a[0] + x1
res = self.interp_operations(fn, [7])
assert res == 7 + 7 + 1
self.check_operations_history(getarrayitem_gc_i=1)
res = self.interp_operations(fn, [-7])
assert res == -7 - 7 + 1
self.check_operations_history(getarrayitem_gc_i=1)
def fn(n, ca, cb):
a1[0] = n
a2[0] = n
a = a1
if ca:
a = a2
b = a1
if cb:
b = a
return a[0] + b[0]
res = self.interp_operations(fn, [7, 0, 1])
assert res == 7 * 2
self.check_operations_history(getarrayitem_gc_i=1)
res = self.interp_operations(fn, [-7, 1, 1])
assert res == -7 * 2
self.check_operations_history(getarrayitem_gc_i=1)
def test_array_caching_while_tracing_invalidation(self):
a1 = [0, 0]
a2 = [0, 0]
@jit.dont_look_inside
def f(a):
a[0] = 5
class A: pass
l = A()
def fn(n):
if n > 0:
a = a1
else:
a = a2
a[0] = n
x1 = a[0]
f(a)
x2 = a[0]
l.x = x2
return a[0] + x1 + x2
res = self.interp_operations(fn, [7])
assert res == 5 * 2 + 7
self.check_operations_history(getarrayitem_gc_i=1)
def test_array_and_getfield_interaction(self):
class A: pass
a1 = A()
a2 = A()
a1.l = a2.l = [0, 0]
def fn(n):
if n > 0:
a = a1
else:
a = a2
a.l = [0, 0]
a.x = 0
a.l[a.x] = n
a.x += 1
a.l[a.x] = n + 1
x1 = a.l[a.x]
a.x -= 1
x2 = a.l[a.x]
return x1 + x2
res = self.interp_operations(fn, [7])
assert res == 7 * 2 + 1
self.check_operations_history(setarrayitem_gc=2, setfield_gc=3,
getarrayitem_gc_i=0, getfield_gc_r=1)
def test_promote_changes_heap_cache(self):
class A: pass
a1 = A()
a2 = A()
a1.l = a2.l = [0, 0]
a1.x = a2.x = 0
def fn(n):
if n > 0:
a = a1
else:
a = a2
a.l = [0, 0]
jit.promote(a.x)
a.l[a.x] = n
a.x += 1
a.l[a.x] = n + 1
x1 = a.l[a.x]
a.x -= 1
x2 = a.l[a.x]
return x1 + x2
res = self.interp_operations(fn, [7])
assert res == 7 * 2 + 1
self.check_operations_history(setarrayitem_gc=2, setfield_gc=2,
getarrayitem_gc_i=0, getfield_gc_i=1,
getfield_gc_r=1)
def test_promote_changes_array_cache(self):
a1 = [0, 0]
a2 = [0, 0]
def fn(n):
if n > 0:
a = a1
else:
a = a2
a[0] = n
jit.hint(n, promote=True)
x1 = a[0]
jit.hint(x1, promote=True)
a[n - n] = n + 1
return a[0] + x1
res = self.interp_operations(fn, [7])
assert res == 7 + 7 + 1
self.check_operations_history(getarrayitem_gc_i=0, guard_value=1)
res = self.interp_operations(fn, [-7])
assert res == -7 - 7 + 1
self.check_operations_history(getarrayitem_gc_i=0, guard_value=1)
def test_list_caching(self):
a1 = [0, 0]
a2 = [0, 0]
def fn(n):
if n > 0:
a = a1
else:
a = a2
if n < -1000:
a.append(5)
a[0] = n
x1 = a[0]
a[n - n] = n + 1
return a[0] + x1
res = self.interp_operations(fn, [7])
assert res == 7 + 7 + 1
self.check_operations_history(getarrayitem_gc_i=1,
getfield_gc_r=1)
res = self.interp_operations(fn, [-7])
assert res == -7 - 7 + 1
self.check_operations_history(getarrayitem_gc_i=1,
getfield_gc_r=1)
def fn(n, ca, cb):
a1[0] = n
a2[0] = n
a = a1
if ca:
a = a2
if n < -100:
a.append(5)
b = a1
if cb:
b = a
return a[0] + b[0]
res = self.interp_operations(fn, [7, 0, 1])
assert res == 7 * 2
self.check_operations_history(getarrayitem_gc_i=1,
getfield_gc_r=3)
res = self.interp_operations(fn, [-7, 1, 1])
assert res == -7 * 2
self.check_operations_history(getarrayitem_gc_i=1,
getfield_gc_r=3)
def test_list_caching_negative(self):
def fn(n):
a = [0] * n
if n > 1000:
a.append(0)
a[-1] = n
x1 = a[-1]
a[n - n - 1] = n + 1
return a[-1] + x1 + 1000 * a[2]
res = self.interp_operations(fn, [7])
assert res == 7 + 7 + 1
self.check_operations_history(setarrayitem_gc=2,
setfield_gc=2, call_n=0, call_i=0, call_r=0)
def test_list_caching_negative_nonzero_init(self):
def fn(n):
a = [42] * n
if n > 1000:
a.append(0)
a[-1] = n
x1 = a[-1]
a[n - n - 1] = n + 1
return a[-1] + x1 + 1000 * a[2]
res = self.interp_operations(fn, [7])
assert res == 7 + 7 + 1 + 42000
self.check_operations_history(setarrayitem_gc=2,
setfield_gc=0, call_r=1)
def test_virtualizable_with_array_heap_cache(self):
myjitdriver = jit.JitDriver(greens = [], reds = ['n', 'x', 'i', 'frame'],
virtualizables = ['frame'])
class Frame(object):
_virtualizable_ = ['l[*]', 's']
def __init__(self, a, s):
self = jit.hint(self, access_directly=True, fresh_virtualizable=True)
self.l = [0] * (4 + a)
self.s = s
def f(n, a, i):
frame = Frame(a, 0)
frame.l[0] = a
frame.l[1] = a + 1
frame.l[2] = a + 2
frame.l[3] = a + 3
if not i:
return frame.l[0] + len(frame.l)
x = 0
while n > 0:
myjitdriver.can_enter_jit(frame=frame, n=n, x=x, i=i)
myjitdriver.jit_merge_point(frame=frame, n=n, x=x, i=i)
frame.s = jit.promote(frame.s)
n -= 1
s = frame.s
assert s >= 0
x += frame.l[s]
frame.s += 1
s = frame.s
assert s >= 0
x += frame.l[s]
x += len(frame.l)
x += f(n, n, 0)
frame.s -= 1
return x
res = self.meta_interp(f, [10, 1, 1], listops=True)
assert res == f(10, 1, 1)
self.check_history(getarrayitem_gc_i=0, getfield_gc_i=0,
getfield_gc_r=0)
def test_heap_caching_array_pure(self):
class A(object):
pass
p1 = A()
p2 = A()
def fn(n):
if n >= 0:
a = (n, n + 1)
p = p1
else:
a = (n + 1, n)
p = p2
p.x = a
return p.x[0] + p.x[1]
res = self.interp_operations(fn, [7])
assert res == 7 + 7 + 1
self.check_operations_history(getfield_gc_r=0, getfield_gc_pure_r=0)
res = self.interp_operations(fn, [-7])
assert res == -7 - 7 + 1
self.check_operations_history(getfield_gc_r=0, getfield_gc_pure_r=0)
def test_heap_caching_and_elidable_function(self):
class A:
pass
class B: pass
a1 = A()
a1.y = 6
a2 = A()
a2.y = 13
@jit.elidable
def f(b):
return b + 1
def fn(n):
if n > 0:
a = a1
else:
a = A()
a.x = n
z = f(6)
return z + a.x
res = self.interp_operations(fn, [7])
assert res == 7 + 7
self.check_operations_history(getfield_gc_i=0)
res = self.interp_operations(fn, [-7])
assert res == -7 + 7
self.check_operations_history(getfield_gc_i=0)
return
def test_heap_caching_multiple_objects(self):
class Gbl(object):
pass
g = Gbl()
class A(object):
pass
a1 = A()
g.a1 = a1
a1.x = 7
a2 = A()
g.a2 = a2
a2.x = 7
def gn(a1, a2):
return a1.x + a2.x
def fn(n):
if n < 0:
a1 = A()
g.a1 = a1
a1.x = n
a2 = A()
g.a2 = a2
a2.x = n - 1
else:
a1 = g.a1
a2 = g.a2
return a1.x + a2.x + gn(a1, a2)
res = self.interp_operations(fn, [-7])
assert res == 2 * -7 + 2 * -8
self.check_operations_history(setfield_gc=4, getfield_gc_i=0,
getfield_gc_r=0)
res = self.interp_operations(fn, [7])
assert res == 4 * 7
self.check_operations_history(getfield_gc_i=2, getfield_gc_r=2)
def test_heap_caching_multiple_tuples(self):
class Gbl(object):
pass
g = Gbl()
def gn(a1, a2):
return a1[0] + a2[0]
def fn(n):
a1 = (n, )
g.a = a1
a2 = (n - 1, )
g.a = a2
jit.promote(n)
return a1[0] + a2[0] + gn(a1, a2)
res = self.interp_operations(fn, [7])
assert res == 2 * 7 + 2 * 6
self.check_operations_history(getfield_gc_pure_i=0,
getfield_gc_pure_r=0)
res = self.interp_operations(fn, [-7])
assert res == 2 * -7 + 2 * -8
self.check_operations_history(getfield_gc_pure_i=0,
getfield_gc_pure_r=0)
def test_heap_caching_multiple_arrays(self):
class Gbl(object):
pass
g = Gbl()
def fn(n):
a1 = [n, n, n]
g.a = a1
a1[0] = n
a2 = [n, n, n]
g.a = a2
a2[0] = n - 1
return a1[0] + a2[0] + a1[0] + a2[0]
res = self.interp_operations(fn, [7])
assert res == 2 * 7 + 2 * 6
self.check_operations_history(getarrayitem_gc_i=0)
res = self.interp_operations(fn, [-7])
assert res == 2 * -7 + 2 * -8
self.check_operations_history(getarrayitem_gc_i=0)
def test_heap_caching_multiple_arrays_getarrayitem(self):
class Gbl(object):
pass
g = Gbl()
g.a1 = [7, 8, 9]
g.a2 = [8, 9, 10, 11]
def fn(i):
if i < 0:
g.a1 = [7, 8, 9]
g.a2 = [7, 8, 9, 10]
jit.promote(i)
a1 = g.a1
a1[i + 1] = 15 # make lists mutable
a2 = g.a2
a2[i + 1] = 19
return a1[i] + a2[i] + a1[i] + a2[i]
res = self.interp_operations(fn, [0])
assert res == 2 * 7 + 2 * 8
self.check_operations_history(getarrayitem_gc_i=2)
def test_heap_caching_multiple_lists(self):
class Gbl(object):
pass
g = Gbl()
g.l = []
def fn(n):
if n < -100:
g.l.append(1)
a1 = [n, n, n]
g.l = a1
a1[0] = n
a2 = [n, n, n]
g.l = a2
a2[0] = n - 1
return a1[0] + a2[0] + a1[0] + a2[0]
res = self.interp_operations(fn, [7])
assert res == 2 * 7 + 2 * 6
self.check_operations_history(getarrayitem_gc_i=0, getfield_gc_i=0,
getfield_gc_r=0)
res = self.interp_operations(fn, [-7])
assert res == 2 * -7 + 2 * -8
self.check_operations_history(getarrayitem_gc_i=0, getfield_gc_i=0,
getfield_gc_r=0)
def test_length_caching(self):
class Gbl(object):
pass
g = Gbl()
g.a = [0] * 7
def fn(n):
a = g.a
res = len(a) + len(a)
a1 = [0] * n
g.a = a1
return len(a1) + res
res = self.interp_operations(fn, [7], backendopt=True)
assert res == 7 * 3
self.check_operations_history(arraylen_gc=1)
def test_arraycopy(self):
class Gbl(object):
pass
g = Gbl()
g.a = [0] * 7
def fn(n):
assert n >= 0
a = g.a
x = [0] * n
x[2] = 21
return len(a[:n]) + x[2]
res = self.interp_operations(fn, [3], backendopt=True)
assert res == 24
self.check_operations_history(getarrayitem_gc_i=0)
def test_fold_int_add_ovf(self):
def fn(n):
jit.promote(n)
try:
n = ovfcheck(n + 1)
except OverflowError:
return 12
else:
return n
res = self.interp_operations(fn, [3])
assert res == 4
self.check_operations_history(int_add_ovf=0)
res = self.interp_operations(fn, [sys.maxint])
assert res == 12
def test_opaque_list(self):
from rpython.rlib.rerased import new_erasing_pair
erase, unerase = new_erasing_pair("test_opaque_list")
def fn(n, ca, cb):
l1 = [n]
l2 = [n]
a1 = erase(l1)
a2 = erase(l1)
a = a1
if ca:
a = a2
if n < -100:
unerase(a).append(5)
b = a1
if cb:
b = a
return unerase(a)[0] + unerase(b)[0]
res = self.interp_operations(fn, [7, 0, 1])
assert res == 7 * 2
self.check_operations_history(getarrayitem_gc_i=0,
getfield_gc_i=0, getfield_gc_r=0)
res = self.interp_operations(fn, [-7, 1, 1])
assert res == -7 * 2
self.check_operations_history(getarrayitem_gc_i=0,
getfield_gc_i=0, getfield_gc_r=0)
def test_copy_str_content(self):
def fn(n):
a = StringBuilder()
x = [1]
a.append("hello world")
return x[0]
res = self.interp_operations(fn, [0])
assert res == 1
self.check_operations_history(getarrayitem_gc_i=0,
getarrayitem_gc_pure_i=0)
def test_raise_known_class_no_guard_class(self):
def raise_exc(cls):
raise cls
def fn(n):
if n:
cls = ValueError
else:
cls = TypeError
try:
raise_exc(cls)
except ValueError:
return -1
return n
res = self.interp_operations(fn, [1])
assert res == -1
self.check_operations_history(guard_class=0)
def test_dont_record_setfield_gc_zeros(self):
py.test.skip("see test_unescaped_write_zero in test_ajit")
class A(object):
pass
def make_a():
return A()
make_a._dont_inline_ = True
def fn(n):
a = make_a()
a.x = jit.promote(n)
return a.x
res = self.interp_operations(fn, [0])
assert res == 0
self.check_operations_history(setfield_gc=0)
| |
import datetime
from fjord.base.tests import eq_, TestCase
from fjord.feedback.config import TRUNCATE_LENGTH
from fjord.feedback.models import (
Product,
Response,
ResponseEmail,
ResponseContext,
ResponsePI,
ResponseMappingType,
purge_data
)
from fjord.feedback.tests import (
ResponseFactory,
ResponseEmailFactory,
ResponseContextFactory,
ResponsePIFactory
)
from fjord.feedback.utils import compute_grams
from fjord.journal.models import Record
from fjord.search.tests import ElasticTestCase
class TestResponseModel(TestCase):
def test_description_truncate_on_save(self):
# Extra 10 characters get lopped off on save.
resp = ResponseFactory(description=('a' * (TRUNCATE_LENGTH + 10)))
eq_(resp.description, 'a' * TRUNCATE_LENGTH)
def test_description_strip_on_save(self):
# Nix leading and trailing whitespace.
resp = ResponseFactory(description=u' \n\tou812\t\n ')
eq_(resp.description, u'ou812')
def test_url_domain(self):
# Test a "normal domain"
resp = ResponseFactory(url=u'http://foo.example.com.br/blah')
eq_(resp.url_domain, u'example.com.br')
assert isinstance(resp.url_domain, unicode)
# Test a unicode domain
resp = ResponseFactory(
url=u'http://\u30c9\u30e9\u30af\u30a810.jp/dq10_skillpoint.html')
eq_(resp.url_domain, u'\u30c9\u30e9\u30af\u30a810.jp')
assert isinstance(resp.url_domain, unicode)
def test_rating_to_happy(self):
"""Test that we do populate happy from rating"""
data = {
1: False,
2: False,
3: False,
4: True,
5: True
}
for rat, expected in data.items():
# Create the response, but DON'T save it to the db.
resp = ResponseFactory.build(happy=None, rating=rat)
resp.save()
eq_(resp.happy, expected)
def test_happy_to_rating(self):
"""Test we don't populate rating from happy"""
resp = ResponseFactory.build(happy=True, rating=None)
resp.save()
eq_(resp.rating, None)
resp = ResponseFactory.build(happy=False, rating=None)
resp.save()
eq_(resp.rating, None)
class TestAutoTranslation(TestCase):
def setUp(self):
# Wipe out translation system for all products.
# FIXME - might be better to save the state and restore it in tearDown
# rather than stomp in both cases. But stomping works for now.
Product.objects.update(translation_system=u'')
super(TestAutoTranslation, self).setUp()
def tearDown(self):
# Wipe out translation system for all products.
Product.objects.update(translation_system=u'')
super(TestAutoTranslation, self).tearDown()
def test_auto_translation(self):
prod = Product.objects.get(db_name='firefox')
prod.translation_system = u'dennis'
prod.save()
resp = ResponseFactory(
locale=u'es',
product=u'firefox',
description=u'hola'
)
# Fetch it from the db again
resp = Response.objects.get(id=resp.id)
eq_(resp.translated_description, u'\xabHOLA\xbb')
class TestGenerateTranslationJobs(TestCase):
def setUp(self):
# Wipe out translation system for all products.
# FIXME - might be better to save the state and restore it in tearDown
# rather than stomp in both cases. But stomping works for now.
Product.objects.update(translation_system=u'')
super(TestGenerateTranslationJobs, self).setUp()
def tearDown(self):
# Wipe out translation system for all products.
Product.objects.update(translation_system=u'')
super(TestGenerateTranslationJobs, self).tearDown()
def test_english_no_translation(self):
"""English descriptions should get copied over"""
resp = ResponseFactory(
locale=u'en-US',
description=u'hello',
translated_description=u''
)
# No new jobs should be generated
eq_(len(resp.generate_translation_jobs()), 0)
# Re-fetch from the db and make sure the description was copied over
resp = Response.objects.get(id=resp.id)
eq_(resp.description, resp.translated_description)
def test_english_gb_no_translation(self):
"""en-GB descriptions should get copied over"""
resp = ResponseFactory(
locale=u'en-GB',
description=u'hello',
translated_description=u''
)
# No new jobs should be generated
eq_(len(resp.generate_translation_jobs()), 0)
# Re-fetch from the db and make sure the description was copied over
resp = Response.objects.get(id=resp.id)
eq_(resp.description, resp.translated_description)
def test_english_with_dennis(self):
"""English descriptions should get copied over"""
resp = ResponseFactory(
locale=u'en-US',
product=u'firefox',
description=u'hello',
translated_description=u''
)
# Set the product up for translation *after* creating the response
# so that it doesn't get auto-translated because Response is set up
# for auto-translation.
prod = Product.objects.get(db_name='firefox')
prod.translation_system = u'dennis'
prod.save()
# No new jobs should be generated
eq_(len(resp.generate_translation_jobs()), 0)
# Re-fetch from the db and make sure the description was copied over
resp = Response.objects.get(id=resp.id)
eq_(resp.description, resp.translated_description)
def test_spanish_no_translation(self):
"""Spanish should not get translated"""
resp = ResponseFactory(
locale=u'es',
product=u'firefox',
description=u'hola',
translated_description=u''
)
# No jobs should be translated
eq_(len(resp.generate_translation_jobs()), 0)
# Nothing should be translated
eq_(resp.translated_description, u'')
def test_spanish_with_dennis(self):
"""Spanish should get translated"""
resp = ResponseFactory(
locale=u'es',
product=u'firefox',
description=u'hola',
translated_description=u''
)
# Set the product up for translation *after* creating the response
# so that it doesn't get auto-translated because Response is set up
# for auto-translation.
prod = Product.objects.get(db_name='firefox')
prod.translation_system = u'dennis'
prod.save()
# One job should be generated
jobs = resp.generate_translation_jobs()
eq_(len(jobs), 1)
job = jobs[0]
eq_(job[1:], (u'dennis', u'es', u'description',
u'en', 'translated_description'))
eq_(resp.translated_description, u'')
def test_spanish_with_dennis_and_existing_translations(self):
"""Response should pick up existing translation"""
existing_resp = ResponseFactory(
locale=u'es',
product=u'firefox',
description=u'hola',
translated_description=u'DUDE!'
)
resp = ResponseFactory(
locale=u'es',
product=u'firefox',
description=u'hola',
translated_description=u''
)
# Set the product up for translation *after* creating the response
# so that it doesn't get auto-translated because Response is set up
# for auto-translation.
prod = Product.objects.get(db_name='firefox')
prod.translation_system = u'dennis'
prod.save()
# No jobs should be translated
eq_(len(resp.generate_translation_jobs()), 0)
eq_(resp.translated_description, existing_resp.translated_description)
class TestComputeGrams(TestCase):
def test_empty(self):
eq_(compute_grams(u''), [])
def test_parsing(self):
# stop words are removed
eq_(compute_grams(u'i me him her'), [])
# capital letters don't matter
eq_(compute_grams(u'I ME HIM HER'), [])
# punctuation nixed
eq_(compute_grams(u'i, me, him, her'), [])
def test_bigrams(self):
# Note: Tokens look weird after being analyzed probably due to
# the stemmer. We could write a bunch of code to "undo" some
# of the excessive stemming, but it's probably an exercise in
# futility. Ergo the tests look a little odd. e.g. "youtub"
# One word a bigram does not make
eq_(compute_grams(u'youtube'), [])
# Two words is the minimum number to create a bigram
eq_(sorted(compute_grams(u'youtube crash')),
['crash youtube'])
# Three words creates two bigrams
eq_(sorted(compute_grams(u'youtube crash flash')),
['crash flash', 'crash youtube'])
# Four words creates three bigrams
eq_(sorted(compute_grams(u'youtube crash flash bridge')),
['bridge flash', 'crash flash', 'crash youtube'])
# Nix duplicate bigrams
eq_(sorted(compute_grams(u'youtube crash youtube flash')),
['crash youtube', 'flash youtube'])
class TestParseData(ElasticTestCase):
def test_purge(self):
now = datetime.datetime.now()
cutoff = now - datetime.timedelta(days=5)
# Create 10 objs of each type--one for each day for the last
# 10 days.
for i in range(10):
ResponseEmailFactory(
opinion__created=(now - datetime.timedelta(days=i))
)
ResponseContextFactory(
opinion__created=(now - datetime.timedelta(days=i))
)
ResponsePIFactory(
opinion__created=(now - datetime.timedelta(days=i))
)
# Note that this creates 30 Response objects.
# Since creating the objects and indexing them happens very
# quickly in tests, we hit a race condition and the has_email
# column ends up being false. So instead we just drop the
# index and rebuild it.
self.setup_indexes()
# Make sure everything is in the db
eq_(Response.objects.count(), 30)
eq_(ResponseEmail.objects.count(), 10)
eq_(ResponseContext.objects.count(), 10)
eq_(ResponsePI.objects.count(), 10)
# Make sure everything is in the index
resp_s = ResponseMappingType.search()
eq_(resp_s.count(), 30)
eq_(resp_s.filter(has_email=True).count(), 10)
# Now purge everything older than 5 days and make sure things
# got removed that should have gotten removed. Also check if
# there is a journal entry for the purge operation.
cutoff = now - datetime.timedelta(days=5)
purge_data(cutoff=cutoff)
self.refresh()
eq_(Response.objects.count(), 30)
eq_(ResponseEmail.objects.count(), 5)
eq_(ResponseEmail.objects.filter(
opinion__created__gte=cutoff).count(),
5)
eq_(ResponseContext.objects.count(), 5)
eq_(ResponseContext.objects.filter(
opinion__created__gte=cutoff).count(),
5)
eq_(ResponsePI.objects.count(), 5)
eq_(ResponsePI.objects.filter(
opinion__created__gte=cutoff).count(),
5)
eq_(1,
Record.objects.filter(action='purge_data').count())
expected_msg = ('feedback_responseemail: 5, '
'feedback_responsecontext: 5, '
'feedback_responsepi: 5')
eq_(expected_msg,
Record.objects.get(action='purge_data').msg)
# Everything should still be in the index, but the number of
# things with has_email=True should go down
resp_s = ResponseMappingType.search()
eq_(resp_s.count(), 30)
eq_(resp_s.filter(has_email=True).count(), 5)
| |
# Copyright (C) 2006-2007 Robey Pointer <robeypointer@gmail.com>
# Copyright (C) 2012 Olle Lundberg <geek@nerd.sh>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Configuration file (aka ``ssh_config``) support.
"""
import fnmatch
import os
import re
import shlex
import socket
SSH_PORT = 22
class SSHConfig (object):
"""
Representation of config information as stored in the format used by
OpenSSH. Queries can be made via `lookup`. The format is described in
OpenSSH's ``ssh_config`` man page. This class is provided primarily as a
convenience to posix users (since the OpenSSH format is a de-facto
standard on posix) but should work fine on Windows too.
.. versionadded:: 1.6
"""
SETTINGS_REGEX = re.compile(r'(\w+)(?:\s*=\s*|\s+)(.+)')
def __init__(self):
"""
Create a new OpenSSH config object.
"""
self._config = []
def parse(self, file_obj):
"""
Read an OpenSSH config from the given file object.
:param file_obj: a file-like object to read the config file from
"""
host = {"host": ['*'], "config": {}}
for line in file_obj:
# Strip any leading or trailing whitespace from the line.
# See https://github.com/paramiko/paramiko/issues/499 for more info.
line = line.strip()
if not line or line.startswith('#'):
continue
match = re.match(self.SETTINGS_REGEX, line)
if not match:
raise Exception("Unparsable line %s" % line)
key = match.group(1).lower()
value = match.group(2)
if key == 'host':
self._config.append(host)
host = {
'host': self._get_hosts(value),
'config': {}
}
elif key == 'proxycommand' and value.lower() == 'none':
# Store 'none' as None; prior to 3.x, it will get stripped out
# at the end (for compatibility with issue #415). After 3.x, it
# will simply not get stripped, leaving a nice explicit marker.
host['config'][key] = None
else:
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
# identityfile, localforward, remoteforward keys are special
# cases, since they are allowed to be specified multiple times
# and they should be tried in order of specification.
if key in ['identityfile', 'localforward', 'remoteforward']:
if key in host['config']:
host['config'][key].append(value)
else:
host['config'][key] = [value]
elif key not in host['config']:
host['config'][key] = value
self._config.append(host)
def lookup(self, hostname):
"""
Return a dict of config options for a given hostname.
The host-matching rules of OpenSSH's ``ssh_config`` man page are used:
For each parameter, the first obtained value will be used. The
configuration files contain sections separated by ``Host``
specifications, and that section is only applied for hosts that match
one of the patterns given in the specification.
Since the first obtained value for each parameter is used, more host-
specific declarations should be given near the beginning of the file,
and general defaults at the end.
The keys in the returned dict are all normalized to lowercase (look for
``"port"``, not ``"Port"``. The values are processed according to the
rules for substitution variable expansion in ``ssh_config``.
:param str hostname: the hostname to lookup
"""
matches = [
config for config in self._config
if self._allowed(config['host'], hostname)
]
ret = {}
for match in matches:
for key, value in match['config'].items():
if key not in ret:
# Create a copy of the original value,
# else it will reference the original list
# in self._config and update that value too
# when the extend() is being called.
ret[key] = value[:] if value is not None else value
elif key == 'identityfile':
ret[key].extend(value)
ret = self._expand_variables(ret, hostname)
# TODO: remove in 3.x re #670
if 'proxycommand' in ret and ret['proxycommand'] is None:
del ret['proxycommand']
return ret
def get_hostnames(self):
"""
Return the set of literal hostnames defined in the SSH config (both
explicit hostnames and wildcard entries).
"""
hosts = set()
for entry in self._config:
hosts.update(entry['host'])
return hosts
def _allowed(self, hosts, hostname):
match = False
for host in hosts:
if host.startswith('!') and fnmatch.fnmatch(hostname, host[1:]):
return False
elif fnmatch.fnmatch(hostname, host):
match = True
return match
def _expand_variables(self, config, hostname):
"""
Return a dict of config options with expanded substitutions
for a given hostname.
Please refer to man ``ssh_config`` for the parameters that
are replaced.
:param dict config: the config for the hostname
:param str hostname: the hostname that the config belongs to
"""
if 'hostname' in config:
config['hostname'] = config['hostname'].replace('%h', hostname)
else:
config['hostname'] = hostname
if 'port' in config:
port = config['port']
else:
port = SSH_PORT
user = os.getenv('USER')
if 'user' in config:
remoteuser = config['user']
else:
remoteuser = user
host = socket.gethostname().split('.')[0]
fqdn = LazyFqdn(config, host)
homedir = os.path.expanduser('~')
replacements = {'controlpath':
[
('%h', config['hostname']),
('%l', fqdn),
('%L', host),
('%n', hostname),
('%p', port),
('%r', remoteuser),
('%u', user)
],
'identityfile':
[
('~', homedir),
('%d', homedir),
('%h', config['hostname']),
('%l', fqdn),
('%u', user),
('%r', remoteuser)
],
'proxycommand':
[
('~', homedir),
('%h', config['hostname']),
('%p', port),
('%r', remoteuser)
]
}
for k in config:
if config[k] is None:
continue
if k in replacements:
for find, replace in replacements[k]:
if isinstance(config[k], list):
for item in range(len(config[k])):
if find in config[k][item]:
config[k][item] = config[k][item].\
replace(find, str(replace))
else:
if find in config[k]:
config[k] = config[k].replace(find, str(replace))
return config
def _get_hosts(self, host):
"""
Return a list of host_names from host value.
"""
try:
return shlex.split(host)
except ValueError:
raise Exception("Unparsable host %s" % host)
class LazyFqdn(object):
"""
Returns the host's fqdn on request as string.
"""
def __init__(self, config, host=None):
self.fqdn = None
self.config = config
self.host = host
def __str__(self):
if self.fqdn is None:
#
# If the SSH config contains AddressFamily, use that when
# determining the local host's FQDN. Using socket.getfqdn() from
# the standard library is the most general solution, but can
# result in noticeable delays on some platforms when IPv6 is
# misconfigured or not available, as it calls getaddrinfo with no
# address family specified, so both IPv4 and IPv6 are checked.
#
# Handle specific option
fqdn = None
address_family = self.config.get('addressfamily', 'any').lower()
if address_family != 'any':
try:
family = socket.AF_INET if address_family == 'inet' \
else socket.AF_INET6
results = socket.getaddrinfo(
self.host,
None,
family,
socket.SOCK_DGRAM,
socket.IPPROTO_IP,
socket.AI_CANONNAME
)
for res in results:
af, socktype, proto, canonname, sa = res
if canonname and '.' in canonname:
fqdn = canonname
break
# giaerror -> socket.getaddrinfo() can't resolve self.host
# (which is from socket.gethostname()). Fall back to the
# getfqdn() call below.
except socket.gaierror:
pass
# Handle 'any' / unspecified
if fqdn is None:
fqdn = socket.getfqdn()
# Cache
self.fqdn = fqdn
return self.fqdn
| |
import pytest
import capnp
import os
import math
this_dir = os.path.dirname(__file__)
@pytest.fixture
def addressbook():
return capnp.load(os.path.join(this_dir, 'addressbook.capnp'))
def test_addressbook_message_classes(addressbook):
def writeAddressBook(fd):
message = capnp._MallocMessageBuilder()
addressBook = message.init_root(addressbook.AddressBook)
people = addressBook.init('people', 2)
alice = people[0]
alice.id = 123
alice.name = 'Alice'
alice.email = 'alice@example.com'
alicePhones = alice.init('phones', 1)
alicePhones[0].number = "555-1212"
alicePhones[0].type = 'mobile'
alice.employment.school = "MIT"
bob = people[1]
bob.id = 456
bob.name = 'Bob'
bob.email = 'bob@example.com'
bobPhones = bob.init('phones', 2)
bobPhones[0].number = "555-4567"
bobPhones[0].type = 'home'
bobPhones[1].number = "555-7654"
bobPhones[1].type = 'work'
bob.employment.unemployed = None
capnp._write_packed_message_to_fd(fd, message)
def printAddressBook(fd):
message = capnp._PackedFdMessageReader(f.fileno())
addressBook = message.get_root(addressbook.AddressBook)
people = addressBook.people
alice = people[0]
assert alice.id == 123
assert alice.name == 'Alice'
assert alice.email == 'alice@example.com'
alicePhones = alice.phones
assert alicePhones[0].number == "555-1212"
assert alicePhones[0].type == 'mobile'
assert alice.employment.school == "MIT"
bob = people[1]
assert bob.id == 456
assert bob.name == 'Bob'
assert bob.email == 'bob@example.com'
bobPhones = bob.phones
assert bobPhones[0].number == "555-4567"
assert bobPhones[0].type == 'home'
assert bobPhones[1].number == "555-7654"
assert bobPhones[1].type == 'work'
assert bob.employment.unemployed == None
f = open('example', 'w')
writeAddressBook(f.fileno())
f = open('example', 'r')
printAddressBook(f.fileno())
def test_addressbook(addressbook):
def writeAddressBook(file):
addresses = addressbook.AddressBook.new_message()
people = addresses.init('people', 2)
alice = people[0]
alice.id = 123
alice.name = 'Alice'
alice.email = 'alice@example.com'
alicePhones = alice.init('phones', 1)
alicePhones[0].number = "555-1212"
alicePhones[0].type = 'mobile'
alice.employment.school = "MIT"
bob = people[1]
bob.id = 456
bob.name = 'Bob'
bob.email = 'bob@example.com'
bobPhones = bob.init('phones', 2)
bobPhones[0].number = "555-4567"
bobPhones[0].type = 'home'
bobPhones[1].number = "555-7654"
bobPhones[1].type = 'work'
bob.employment.unemployed = None
addresses.write(file)
def printAddressBook(file):
addresses = addressbook.AddressBook.read(file)
people = addresses.people
alice = people[0]
assert alice.id == 123
assert alice.name == 'Alice'
assert alice.email == 'alice@example.com'
alicePhones = alice.phones
assert alicePhones[0].number == "555-1212"
assert alicePhones[0].type == 'mobile'
assert alice.employment.school == "MIT"
bob = people[1]
assert bob.id == 456
assert bob.name == 'Bob'
assert bob.email == 'bob@example.com'
bobPhones = bob.phones
assert bobPhones[0].number == "555-4567"
assert bobPhones[0].type == 'home'
assert bobPhones[1].number == "555-7654"
assert bobPhones[1].type == 'work'
assert bob.employment.unemployed == None
f = open('example', 'w')
writeAddressBook(f)
f = open('example', 'r')
printAddressBook(f)
def test_addressbook_resizable(addressbook):
def writeAddressBook(file):
addresses = addressbook.AddressBook.new_message()
people = addresses.init_resizable_list('people')
alice = people.add()
alice.id = 123
alice.name = 'Alice'
alice.email = 'alice@example.com'
alicePhones = alice.init('phones', 1)
alicePhones[0].number = "555-1212"
alicePhones[0].type = 'mobile'
alice.employment.school = "MIT"
bob = people.add()
bob.id = 456
bob.name = 'Bob'
bob.email = 'bob@example.com'
bobPhones = bob.init('phones', 2)
bobPhones[0].number = "555-4567"
bobPhones[0].type = 'home'
bobPhones[1].number = "555-7654"
bobPhones[1].type = 'work'
bob.employment.unemployed = None
people.finish()
addresses.write(file)
def printAddressBook(file):
addresses = addressbook.AddressBook.read(file)
people = addresses.people
alice = people[0]
assert alice.id == 123
assert alice.name == 'Alice'
assert alice.email == 'alice@example.com'
alicePhones = alice.phones
assert alicePhones[0].number == "555-1212"
assert alicePhones[0].type == 'mobile'
assert alice.employment.school == "MIT"
bob = people[1]
assert bob.id == 456
assert bob.name == 'Bob'
assert bob.email == 'bob@example.com'
bobPhones = bob.phones
assert bobPhones[0].number == "555-4567"
assert bobPhones[0].type == 'home'
assert bobPhones[1].number == "555-7654"
assert bobPhones[1].type == 'work'
assert bob.employment.unemployed == None
f = open('example', 'w')
writeAddressBook(f)
f = open('example', 'r')
printAddressBook(f)
def test_addressbook_explicit_fields(addressbook):
def writeAddressBook(file):
addresses = addressbook.AddressBook.new_message()
address_fields = addressbook.AddressBook.schema.fields
person_fields = addressbook.Person.schema.fields
phone_fields = addressbook.Person.PhoneNumber.schema.fields
people = addresses._init_by_field(address_fields['people'], 2)
alice = people[0]
alice._set_by_field(person_fields['id'], 123)
alice._set_by_field(person_fields['name'], 'Alice')
alice._set_by_field(person_fields['email'], 'alice@example.com')
alicePhones = alice._init_by_field(person_fields['phones'], 1)
alicePhones[0]._set_by_field(phone_fields['number'], "555-1212")
alicePhones[0]._set_by_field(phone_fields['type'], 'mobile')
employment = alice._get_by_field(person_fields['employment'])
employment._set_by_field(addressbook.Person.Employment.schema.fields['school'], "MIT")
bob = people[1]
bob._set_by_field(person_fields['id'], 456)
bob._set_by_field(person_fields['name'], 'Bob')
bob._set_by_field(person_fields['email'], 'bob@example.com')
bobPhones = bob._init_by_field(person_fields['phones'], 2)
bobPhones[0]._set_by_field(phone_fields['number'], "555-4567")
bobPhones[0]._set_by_field(phone_fields['type'], 'home')
bobPhones[1]._set_by_field(phone_fields['number'], "555-7654")
bobPhones[1]._set_by_field(phone_fields['type'], 'work')
employment = bob._get_by_field(person_fields['employment'])
employment._set_by_field(addressbook.Person.Employment.schema.fields['unemployed'], None)
addresses.write(file)
def printAddressBook(file):
addresses = addressbook.AddressBook.read(file)
address_fields = addressbook.AddressBook.schema.fields
person_fields = addressbook.Person.schema.fields
phone_fields = addressbook.Person.PhoneNumber.schema.fields
people = addresses._get_by_field(address_fields['people'])
alice = people[0]
assert alice._get_by_field(person_fields['id']) == 123
assert alice._get_by_field(person_fields['name']) == 'Alice'
assert alice._get_by_field(person_fields['email']) == 'alice@example.com'
alicePhones = alice._get_by_field(person_fields['phones'])
assert alicePhones[0]._get_by_field(phone_fields['number']) == "555-1212"
assert alicePhones[0]._get_by_field(phone_fields['type']) == 'mobile'
employment = alice._get_by_field(person_fields['employment'])
employment._get_by_field(addressbook.Person.Employment.schema.fields['school']) == "MIT"
bob = people[1]
assert bob._get_by_field(person_fields['id']) == 456
assert bob._get_by_field(person_fields['name']) == 'Bob'
assert bob._get_by_field(person_fields['email']) == 'bob@example.com'
bobPhones = bob._get_by_field(person_fields['phones'])
assert bobPhones[0]._get_by_field(phone_fields['number']) == "555-4567"
assert bobPhones[0]._get_by_field(phone_fields['type']) == 'home'
assert bobPhones[1]._get_by_field(phone_fields['number']) == "555-7654"
assert bobPhones[1]._get_by_field(phone_fields['type']) == 'work'
employment = bob._get_by_field(person_fields['employment'])
employment._get_by_field(addressbook.Person.Employment.schema.fields['unemployed']) == None
f = open('example', 'w')
writeAddressBook(f)
f = open('example', 'r')
printAddressBook(f)
@pytest.fixture
def all_types():
return capnp.load(os.path.join(this_dir, 'all_types.capnp'))
# TODO: These tests should be extended to:
# - Read each field in Python and assert that it is equal to the expected value.
# - Build an identical message using Python code and compare it to the golden.
#
def init_all_types(builder):
builder.voidField = None
builder.boolField = True
builder.int8Field = -123
builder.int16Field = -12345
builder.int32Field = -12345678
builder.int64Field = -123456789012345
builder.uInt8Field = 234
builder.uInt16Field = 45678
builder.uInt32Field = 3456789012
builder.uInt64Field = 12345678901234567890
builder.float32Field = 1234.5
builder.float64Field = -123e45
builder.textField = "foo"
builder.dataField = b"bar"
subBuilder = builder.structField
subBuilder.voidField = None
subBuilder.boolField = True
subBuilder.int8Field = -12
subBuilder.int16Field = 3456
subBuilder.int32Field = -78901234
subBuilder.int64Field = 56789012345678
subBuilder.uInt8Field = 90
subBuilder.uInt16Field = 1234
subBuilder.uInt32Field = 56789012
subBuilder.uInt64Field = 345678901234567890
subBuilder.float32Field = -1.25e-10
subBuilder.float64Field = 345
subBuilder.textField = "baz"
subBuilder.dataField = b"qux"
subSubBuilder = subBuilder.structField
subSubBuilder.textField = "nested"
subSubBuilder.structField.textField = "really nested"
subBuilder.enumField = "baz"
subBuilder.voidList = [None, None, None]
subBuilder.boolList = [False, True, False, True, True]
subBuilder.int8List = [12, -34, -0x80, 0x7f]
subBuilder.int16List = [1234, -5678, -0x8000, 0x7fff]
subBuilder.int32List = [12345678, -90123456, -0x80000000, 0x7fffffff]
subBuilder.int64List = [123456789012345, -678901234567890, -0x8000000000000000, 0x7fffffffffffffff]
subBuilder.uInt8List = [12, 34, 0, 0xff]
subBuilder.uInt16List = [1234, 5678, 0, 0xffff]
subBuilder.uInt32List = [12345678, 90123456, 0, 0xffffffff]
subBuilder.uInt64List = [123456789012345, 678901234567890, 0, 0xffffffffffffffff]
subBuilder.float32List = [0, 1234567, 1e37, -1e37, 1e-37, -1e-37]
subBuilder.float64List = [0, 123456789012345, 1e306, -1e306, 1e-306, -1e-306]
subBuilder.textList = ["quux", "corge", "grault"]
subBuilder.dataList = [b"garply", b"waldo", b"fred"]
listBuilder = subBuilder.init('structList', 3)
listBuilder[0].textField = "x structlist 1"
listBuilder[1].textField = "x structlist 2"
listBuilder[2].textField = "x structlist 3"
subBuilder.enumList = ["qux", "bar", "grault"]
builder.enumField = "corge"
builder.init("voidList", 6)
builder.boolList = [True, False, False, True]
builder.int8List = [111, -111]
builder.int16List = [11111, -11111]
builder.int32List = [111111111, -111111111]
builder.int64List = [1111111111111111111, -1111111111111111111]
builder.uInt8List = [111, 222]
builder.uInt16List = [33333, 44444]
builder.uInt32List = [3333333333]
builder.uInt64List = [11111111111111111111]
builder.float32List = [5555.5, float("inf"), float("-inf"), float("nan")]
builder.float64List = [7777.75, float("inf"), float("-inf"), float("nan")]
builder.textList = ["plugh", "xyzzy", "thud"]
builder.dataList = [b"oops", b"exhausted", b"rfc3092"]
listBuilder = builder.init('structList', 3)
listBuilder[0].textField = "structlist 1"
listBuilder[1].textField = "structlist 2"
listBuilder[2].textField = "structlist 3"
builder.enumList = ["foo", "garply"]
def assert_almost(float1, float2):
if float1 != float2:
assert abs((float1 - float2) / float1) < 0.00001
def check_list(reader, expected):
assert len(reader) == len(expected)
for (i, v) in enumerate(expected):
if type(v) is float:
assert_almost(reader[i], v)
else:
assert reader[i] == v
def check_all_types(reader):
assert reader.voidField == None
assert reader.boolField == True
assert reader.int8Field == -123
assert reader.int16Field == -12345
assert reader.int32Field == -12345678
assert reader.int64Field == -123456789012345
assert reader.uInt8Field == 234
assert reader.uInt16Field == 45678
assert reader.uInt32Field == 3456789012
assert reader.uInt64Field == 12345678901234567890
assert reader.float32Field == 1234.5
assert_almost(reader.float64Field, -123e45)
assert reader.textField == "foo"
assert reader.dataField == b"bar"
subReader = reader.structField
assert subReader.voidField == None
assert subReader.boolField == True
assert subReader.int8Field == -12
assert subReader.int16Field == 3456
assert subReader.int32Field == -78901234
assert subReader.int64Field == 56789012345678
assert subReader.uInt8Field == 90
assert subReader.uInt16Field == 1234
assert subReader.uInt32Field == 56789012
assert subReader.uInt64Field == 345678901234567890
assert_almost(subReader.float32Field, -1.25e-10)
assert subReader.float64Field == 345
assert subReader.textField == "baz"
assert subReader.dataField == b"qux"
subSubReader = subReader.structField
assert subSubReader.textField == "nested"
assert subSubReader.structField.textField == "really nested"
assert subReader.enumField == "baz"
check_list(subReader.voidList, [None, None, None])
check_list(subReader.boolList, [False, True, False, True, True])
check_list(subReader.int8List, [12, -34, -0x80, 0x7f])
check_list(subReader.int16List, [1234, -5678, -0x8000, 0x7fff])
check_list(subReader.int32List, [12345678, -90123456, -0x80000000, 0x7fffffff])
check_list(subReader.int64List, [123456789012345, -678901234567890, -0x8000000000000000, 0x7fffffffffffffff])
check_list(subReader.uInt8List, [12, 34, 0, 0xff])
check_list(subReader.uInt16List, [1234, 5678, 0, 0xffff])
check_list(subReader.uInt32List, [12345678, 90123456, 0, 0xffffffff])
check_list(subReader.uInt64List, [123456789012345, 678901234567890, 0, 0xffffffffffffffff])
check_list(subReader.float32List, [0.0, 1234567.0, 1e37, -1e37, 1e-37, -1e-37])
check_list(subReader.float64List, [0.0, 123456789012345.0, 1e306, -1e306, 1e-306, -1e-306])
check_list(subReader.textList, ["quux", "corge", "grault"])
check_list(subReader.dataList, [b"garply", b"waldo", b"fred"])
listReader = subReader.structList
assert len(listReader) == 3
assert listReader[0].textField == "x structlist 1"
assert listReader[1].textField == "x structlist 2"
assert listReader[2].textField == "x structlist 3"
check_list(subReader.enumList, ["qux", "bar", "grault"])
assert reader.enumField == "corge"
assert len(reader.voidList) == 6
check_list(reader.boolList, [True, False, False, True])
check_list(reader.int8List, [111, -111])
check_list(reader.int16List, [11111, -11111])
check_list(reader.int32List, [111111111, -111111111])
check_list(reader.int64List, [1111111111111111111, -1111111111111111111])
check_list(reader.uInt8List, [111, 222])
check_list(reader.uInt16List, [33333, 44444])
check_list(reader.uInt32List, [3333333333])
check_list(reader.uInt64List, [11111111111111111111])
listReader = reader.float32List
assert len(listReader) == 4
assert listReader[0] == 5555.5
assert listReader[1] == float("inf")
assert listReader[2] == -float("inf")
assert math.isnan(listReader[3])
listReader = reader.float64List
len(listReader) == 4
assert listReader[0] == 7777.75
assert listReader[1] == float("inf")
assert listReader[2] == -float("inf")
assert math.isnan(listReader[3])
check_list(reader.textList, ["plugh", "xyzzy", "thud"])
check_list(reader.dataList, [b"oops", b"exhausted", b"rfc3092"])
listReader = reader.structList
len(listReader) == 3
assert listReader[0].textField == "structlist 1"
assert listReader[1].textField == "structlist 2"
assert listReader[2].textField == "structlist 3"
check_list(reader.enumList, ["foo", "garply"])
def test_build(all_types):
root = all_types.TestAllTypes.new_message()
init_all_types(root)
expectedText = open(os.path.join(this_dir, 'all-types.txt'), 'r').read()
assert str(root) + '\n' == expectedText
def test_build_first_segment_size(all_types):
root = all_types.TestAllTypes.new_message(1)
init_all_types(root)
expectedText = open(os.path.join(this_dir, 'all-types.txt'), 'r').read()
assert str(root) + '\n' == expectedText
root = all_types.TestAllTypes.new_message(1024*1024)
init_all_types(root)
expectedText = open(os.path.join(this_dir, 'all-types.txt'), 'r').read()
assert str(root) + '\n' == expectedText
def test_binary_read(all_types):
f = open(os.path.join(this_dir, 'all-types.binary'), 'r')
root = all_types.TestAllTypes.read(f)
check_all_types(root)
expectedText = open(os.path.join(this_dir, 'all-types.txt'), 'r').read()
assert str(root) + '\n' == expectedText
# Test set_root().
builder = capnp._MallocMessageBuilder()
builder.set_root(root)
check_all_types(builder.get_root(all_types.TestAllTypes))
builder2 = capnp._MallocMessageBuilder()
builder2.set_root(builder.get_root(all_types.TestAllTypes))
check_all_types(builder2.get_root(all_types.TestAllTypes))
def test_packed_read(all_types):
f = open(os.path.join(this_dir, 'all-types.packed'), 'r')
root = all_types.TestAllTypes.read_packed(f)
check_all_types(root)
expectedText = open(os.path.join(this_dir, 'all-types.txt'), 'r').read()
assert str(root) + '\n' == expectedText
def test_binary_write(all_types):
root = all_types.TestAllTypes.new_message()
init_all_types(root)
root.write(open('example', 'w'))
check_all_types(all_types.TestAllTypes.read(open('example', 'r')))
def test_packed_write(all_types):
root = all_types.TestAllTypes.new_message()
init_all_types(root)
root.write_packed(open('example', 'w'))
check_all_types(all_types.TestAllTypes.read_packed(open('example', 'r')))
| |
# This is a small chunk of code from the skimage package. It is reproduced
# here because all we need is a couple color conversion routines, and adding
# all of skimage as dependecy is really heavy.
# Copyright (C) 2019, the scikit-image team
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name of skimage nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# skimage/_shared/version_requirements.py:_check_version
# Copyright (c) 2013 The IPython Development Team
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# skimage/_shared/version_requirements.py:is_installed:
# Original Copyright (C) 2009-2011 Pierre Raybaut
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# skimage/external/tifffile:
# Copyright (c) 2008-2014, Christoph Gohlke
# Copyright (c) 2008-2014, The Regents of the University of California
# Produced at the Laboratory for Fluorescence Dynamics
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from scipy import linalg
from warnings import warn
def rgb2xyz(rgb):
"""RGB to XYZ color space conversion.
Parameters
----------
rgb : (..., 3) array_like
The image in RGB format. Final dimension denotes channels.
Returns
-------
out : (..., 3) ndarray
The image in XYZ format. Same dimensions as input.
Raises
------
ValueError
If `rgb` is not at least 2-D with shape (..., 3).
Notes
-----
The CIE XYZ color space is derived from the CIE RGB color space. Note
however that this function converts from sRGB.
References
----------
.. [1] https://en.wikipedia.org/wiki/CIE_1931_color_space
Examples
--------
>>> from skimage import data
>>> img = data.astronaut()
>>> img_xyz = rgb2xyz(img)
"""
# Follow the algorithm from http://www.easyrgb.com/index.php
# except we don't multiply/divide by 100 in the conversion
arr = _prepare_colorarray(rgb).copy()
mask = arr > 0.04045
arr[mask] = np.power((arr[mask] + 0.055) / 1.055, 2.4)
arr[~mask] /= 12.92
return arr @ xyz_from_rgb.T.astype(arr.dtype)
def lab2xyz(lab, illuminant="D65", observer="2"):
"""CIE-LAB to XYZcolor space conversion.
Parameters
----------
lab : array_like
The image in lab format, in a 3-D array of shape ``(.., .., 3)``.
illuminant : {"A", "D50", "D55", "D65", "D75", "E"}, optional
The name of the illuminant (the function is NOT case sensitive).
observer : {"2", "10"}, optional
The aperture angle of the observer.
Returns
-------
out : ndarray
The image in XYZ format, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `lab` is not a 3-D array of shape ``(.., .., 3)``.
ValueError
If either the illuminant or the observer angle are not supported or
unknown.
UserWarning
If any of the pixels are invalid (Z < 0).
Notes
-----
By default Observer= 2A, Illuminant= D65. CIE XYZ tristimulus values x_ref
= 95.047, y_ref = 100., z_ref = 108.883. See function 'get_xyz_coords' for
a list of supported illuminants.
References
----------
.. [1] http://www.easyrgb.com/index.php?X=MATH&H=07#text7
.. [2] https://en.wikipedia.org/wiki/Lab_color_space
"""
arr = _prepare_colorarray(lab).copy()
L, a, b = arr[:, :, 0], arr[:, :, 1], arr[:, :, 2]
y = (L + 16.) / 116.
x = (a / 500.) + y
z = y - (b / 200.)
if np.any(z < 0):
invalid = np.nonzero(z < 0)
warn('Color data out of range: Z < 0 in %s pixels' % invalid[0].size,
stacklevel=2)
z[invalid] = 0
out = np.dstack([x, y, z])
mask = out > 0.2068966
out[mask] = np.power(out[mask], 3.)
out[~mask] = (out[~mask] - 16.0 / 116.) / 7.787
# rescale to the reference white (illuminant)
xyz_ref_white = get_xyz_coords(illuminant, observer)
out *= xyz_ref_white
return out
def xyz2lab(xyz, illuminant="D65", observer="2"):
"""XYZ to CIE-LAB color space conversion.
Parameters
----------
xyz : array_like
The image in XYZ format, in a 3- or 4-D array of shape
``(.., ..,[ ..,] 3)``.
illuminant : {"A", "D50", "D55", "D65", "D75", "E"}, optional
The name of the illuminant (the function is NOT case sensitive).
observer : {"2", "10"}, optional
The aperture angle of the observer.
Returns
-------
out : ndarray
The image in CIE-LAB format, in a 3- or 4-D array of shape
``(.., ..,[ ..,] 3)``.
Raises
------
ValueError
If `xyz` is not a 3-D array of shape ``(.., ..,[ ..,] 3)``.
ValueError
If either the illuminant or the observer angle is unsupported or
unknown.
Notes
-----
By default Observer= 2A, Illuminant= D65. CIE XYZ tristimulus values
x_ref=95.047, y_ref=100., z_ref=108.883. See function `get_xyz_coords` for
a list of supported illuminants.
References
----------
.. [1] http://www.easyrgb.com/index.php?X=MATH&H=07#text7
.. [2] https://en.wikipedia.org/wiki/Lab_color_space
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2xyz, xyz2lab
>>> img = data.astronaut()
>>> img_xyz = rgb2xyz(img)
>>> img_lab = xyz2lab(img_xyz)
"""
arr = _prepare_colorarray(xyz)
xyz_ref_white = get_xyz_coords(illuminant, observer)
# scale by CIE XYZ tristimulus values of the reference white point
arr = arr / xyz_ref_white
# Nonlinear distortion and linear transformation
mask = arr > 0.008856
arr[mask] = np.cbrt(arr[mask])
arr[~mask] = 7.787 * arr[~mask] + 16. / 116.
x, y, z = arr[..., 0], arr[..., 1], arr[..., 2]
# Vector scaling
L = (116. * y) - 16.
a = 500.0 * (x - y)
b = 200.0 * (y - z)
return np.concatenate([x[..., np.newaxis] for x in [L, a, b]], axis=-1)
def lab2rgb(lab, illuminant="D65", observer="2"):
"""Lab to RGB color space conversion.
Parameters
----------
lab : array_like
The image in Lab format, in a 3-D array of shape ``(.., .., 3)``.
illuminant : {"A", "D50", "D55", "D65", "D75", "E"}, optional
The name of the illuminant (the function is NOT case sensitive).
observer : {"2", "10"}, optional
The aperture angle of the observer.
Returns
-------
out : ndarray
The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `lab` is not a 3-D array of shape ``(.., .., 3)``.
References
----------
.. [1] https://en.wikipedia.org/wiki/Standard_illuminant
Notes
-----
This function uses lab2xyz and xyz2rgb.
By default Observer= 2A, Illuminant= D65. CIE XYZ tristimulus values
x_ref=95.047, y_ref=100., z_ref=108.883. See function `get_xyz_coords` for
a list of supported illuminants.
"""
return xyz2rgb(lab2xyz(lab, illuminant, observer))
def rgb2lab(rgb, illuminant="D65", observer="2"):
"""RGB to lab color space conversion.
Parameters
----------
rgb : array_like
The image in RGB format, in a 3- or 4-D array of shape
``(.., ..,[ ..,] 3)``.
illuminant : {"A", "D50", "D55", "D65", "D75", "E"}, optional
The name of the illuminant (the function is NOT case sensitive).
observer : {"2", "10"}, optional
The aperture angle of the observer.
Returns
-------
out : ndarray
The image in Lab format, in a 3- or 4-D array of shape
``(.., ..,[ ..,] 3)``.
Raises
------
ValueError
If `rgb` is not a 3- or 4-D array of shape ``(.., ..,[ ..,] 3)``.
References
----------
.. [1] https://en.wikipedia.org/wiki/Standard_illuminant
Notes
-----
This function uses rgb2xyz and xyz2lab.
By default Observer= 2A, Illuminant= D65. CIE XYZ tristimulus values
x_ref=95.047, y_ref=100., z_ref=108.883. See function `get_xyz_coords` for
a list of supported illuminants.
"""
return xyz2lab(rgb2xyz(rgb), illuminant, observer)
def lch2lab(lch):
"""CIE-LCH to CIE-LAB color space conversion.
LCH is the cylindrical representation of the LAB (Cartesian) colorspace
Parameters
----------
lch : array_like
The N-D image in CIE-LCH format. The last (``N+1``-th) dimension must
have at least 3 elements, corresponding to the ``L``, ``a``, and ``b``
color channels. Subsequent elements are copied.
Returns
-------
out : ndarray
The image in LAB format, with same shape as input `lch`.
Raises
------
ValueError
If `lch` does not have at least 3 color channels (i.e. l, c, h).
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2lab, lch2lab
>>> img = data.astronaut()
>>> img_lab = rgb2lab(img)
>>> img_lch = lab2lch(img_lab)
>>> img_lab2 = lch2lab(img_lch)
"""
lch = _prepare_lab_array(lch)
c, h = lch[..., 1], lch[..., 2]
lch[..., 1], lch[..., 2] = c * np.cos(h), c * np.sin(h)
return lch
def _prepare_lab_array(arr):
"""Ensure input for lab2lch, lch2lab are well-posed.
Arrays must be in floating point and have at least 3 elements in
last dimension. Return a new array.
"""
arr = np.asarray(arr)
shape = arr.shape
if shape[-1] < 3:
raise ValueError('Input array has less than 3 color channels')
return img_as_float(arr, force_copy=True)
def get_xyz_coords(illuminant, observer):
"""Get the XYZ coordinates of the given illuminant and observer [1]_.
Parameters
----------
illuminant : {"A", "D50", "D55", "D65", "D75", "E"}, optional
The name of the illuminant (the function is NOT case sensitive).
observer : {"2", "10"}, optional
The aperture angle of the observer.
Returns
-------
(x, y, z) : tuple
A tuple with 3 elements containing the XYZ coordinates of the given
illuminant.
Raises
------
ValueError
If either the illuminant or the observer angle are not supported or
unknown.
References
----------
.. [1] https://en.wikipedia.org/wiki/Standard_illuminant
"""
illuminant = illuminant.upper()
try:
return illuminants[illuminant][observer]
except KeyError:
raise ValueError("Unknown illuminant/observer combination\
(\'{0}\', \'{1}\')".format(illuminant, observer))
def _prepare_colorarray(arr):
"""Check the shape of the array and convert it to
floating point representation.
"""
arr = np.asanyarray(arr)
if arr.ndim not in [3, 4] or arr.shape[-1] != 3:
msg = ("the input array must be have a shape == (.., ..,[ ..,] 3)), " +
"got (" + (", ".join(map(str, arr.shape))) + ")")
raise ValueError(msg)
return img_as_float(arr)
def xyz2rgb(xyz):
"""XYZ to RGB color space conversion.
Parameters
----------
xyz : array_like
The image in XYZ format, in a 3-D array of shape ``(.., .., 3)``.
Returns
-------
out : ndarray
The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `xyz` is not a 3-D array of shape ``(.., .., 3)``.
Notes
-----
The CIE XYZ color space is derived from the CIE RGB color space. Note
however that this function converts to sRGB.
References
----------
.. [1] https://en.wikipedia.org/wiki/CIE_1931_color_space
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2xyz, xyz2rgb
>>> img = data.astronaut()
>>> img_xyz = rgb2xyz(img)
>>> img_rgb = xyz2rgb(img_xyz)
"""
# Follow the algorithm from http://www.easyrgb.com/index.php
# except we don't multiply/divide by 100 in the conversion
arr = _convert(rgb_from_xyz, xyz)
mask = arr > 0.0031308
arr[mask] = 1.055 * np.power(arr[mask], 1 / 2.4) - 0.055
arr[~mask] *= 12.92
np.clip(arr, 0, 1, out=arr)
return arr
def _convert(matrix, arr):
"""Do the color space conversion.
Parameters
----------
matrix : array_like
The 3x3 matrix to use.
arr : array_like
The input array.
Returns
-------
out : ndarray, dtype=float
The converted array.
"""
arr = _prepare_colorarray(arr)
return arr @ matrix.T.copy()
# ---------------------------------------------------------------
# Primaries for the coordinate systems
# ---------------------------------------------------------------
cie_primaries = np.array([700, 546.1, 435.8])
sb_primaries = np.array([1. / 155, 1. / 190, 1. / 225]) * 1e5
# ---------------------------------------------------------------
# Matrices that define conversion between different color spaces
# ---------------------------------------------------------------
# From sRGB specification
xyz_from_rgb = np.array([[0.412453, 0.357580, 0.180423],
[0.212671, 0.715160, 0.072169],
[0.019334, 0.119193, 0.950227]])
rgb_from_xyz = linalg.inv(xyz_from_rgb)
# From https://en.wikipedia.org/wiki/CIE_1931_color_space
# Note: Travis's code did not have the divide by 0.17697
xyz_from_rgbcie = np.array([[0.49, 0.31, 0.20],
[0.17697, 0.81240, 0.01063],
[0.00, 0.01, 0.99]]) / 0.17697
rgbcie_from_xyz = linalg.inv(xyz_from_rgbcie)
# construct matrices to and from rgb:
rgbcie_from_rgb = rgbcie_from_xyz @ xyz_from_rgb
rgb_from_rgbcie = rgb_from_xyz @ xyz_from_rgbcie
gray_from_rgb = np.array([[0.2125, 0.7154, 0.0721],
[0, 0, 0],
[0, 0, 0]])
yuv_from_rgb = np.array([[ 0.299 , 0.587 , 0.114 ],
[-0.14714119, -0.28886916, 0.43601035 ],
[ 0.61497538, -0.51496512, -0.10001026 ]])
rgb_from_yuv = linalg.inv(yuv_from_rgb)
yiq_from_rgb = np.array([[0.299 , 0.587 , 0.114 ],
[0.59590059, -0.27455667, -0.32134392],
[0.21153661, -0.52273617, 0.31119955]])
rgb_from_yiq = linalg.inv(yiq_from_rgb)
ypbpr_from_rgb = np.array([[ 0.299 , 0.587 , 0.114 ],
[-0.168736,-0.331264, 0.5 ],
[ 0.5 ,-0.418688,-0.081312]])
rgb_from_ypbpr = linalg.inv(ypbpr_from_rgb)
ycbcr_from_rgb = np.array([[ 65.481, 128.553, 24.966],
[ -37.797, -74.203, 112.0 ],
[ 112.0 , -93.786, -18.214]])
rgb_from_ycbcr = linalg.inv(ycbcr_from_rgb)
ydbdr_from_rgb = np.array([[ 0.299, 0.587, 0.114],
[ -0.45 , -0.883, 1.333],
[ -1.333, 1.116, 0.217]])
rgb_from_ydbdr = linalg.inv(ydbdr_from_rgb)
# CIE LAB constants for Observer=2A, Illuminant=D65
# NOTE: this is actually the XYZ values for the illuminant above.
lab_ref_white = np.array([0.95047, 1., 1.08883])
# XYZ coordinates of the illuminants, scaled to [0, 1]. For each illuminant I
# we have:
#
# illuminant[I][0] corresponds to the XYZ coordinates for the 2 degree
# field of view.
#
# illuminant[I][1] corresponds to the XYZ coordinates for the 10 degree
# field of view.
#
# The XYZ coordinates are calculated from [1], using the formula:
#
# X = x * ( Y / y )
# Y = Y
# Z = ( 1 - x - y ) * ( Y / y )
#
# where Y = 1. The only exception is the illuminant "D65" with aperture angle
# 2, whose coordinates are copied from 'lab_ref_white' for
# backward-compatibility reasons.
#
# References
# ----------
# .. [1] https://en.wikipedia.org/wiki/Standard_illuminant
illuminants = \
{"A": {'2': (1.098466069456375, 1, 0.3558228003436005),
'10': (1.111420406956693, 1, 0.3519978321919493)},
"D50": {'2': (0.9642119944211994, 1, 0.8251882845188288),
'10': (0.9672062750333777, 1, 0.8142801513128616)},
"D55": {'2': (0.956797052643698, 1, 0.9214805860173273),
'10': (0.9579665682254781, 1, 0.9092525159847462)},
"D65": {'2': (0.95047, 1., 1.08883), # This was: `lab_ref_white`
'10': (0.94809667673716, 1, 1.0730513595166162)},
"D75": {'2': (0.9497220898840717, 1, 1.226393520724154),
'10': (0.9441713925645873, 1, 1.2064272211720228)},
"E": {'2': (1.0, 1.0, 1.0),
'10': (1.0, 1.0, 1.0)}}
__all__ = ['img_as_float32', 'img_as_float64', 'img_as_float',
#'img_as_int', 'img_as_uint', 'img_as_ubyte',
#'img_as_bool',
'dtype_limits']
# For integers Numpy uses `_integer_types` basis internally, and builds a leaky
# `np.XintYY` abstraction on top of it. This leads to situations when, for
# example, there are two np.Xint64 dtypes with the same attributes but
# different object references. In order to avoid any potential issues,
# we use the basis dtypes here. For more information, see:
# - https://github.com/scikit-image/scikit-image/issues/3043
# For convenience, for these dtypes we indicate also the possible bit depths
# (some of them are platform specific). For the details, see:
# http://www.unix.org/whitepapers/64bit.html
_integer_types = (np.byte, np.ubyte, # 8 bits
np.short, np.ushort, # 16 bits
np.intc, np.uintc, # 16 or 32 or 64 bits
np.int_, np.uint, # 32 or 64 bits
np.longlong, np.ulonglong) # 64 bits
_integer_ranges = {t: (np.iinfo(t).min, np.iinfo(t).max)
for t in _integer_types}
dtype_range = {np.bool_: (False, True),
np.bool8: (False, True),
np.float16: (-1, 1),
np.float32: (-1, 1),
np.float64: (-1, 1)}
dtype_range.update(_integer_ranges)
_supported_types = list(dtype_range.keys())
def dtype_limits(image, clip_negative=False):
"""Return intensity limits, i.e. (min, max) tuple, of the image's dtype.
Parameters
----------
image : ndarray
Input image.
clip_negative : bool, optional
If True, clip the negative range (i.e. return 0 for min intensity)
even if the image dtype allows negative values.
Returns
-------
imin, imax : tuple
Lower and upper intensity limits.
"""
imin, imax = dtype_range[image.dtype.type]
if clip_negative:
imin = 0
return imin, imax
def _dtype_itemsize(itemsize, *dtypes):
"""Return first of `dtypes` with itemsize greater than `itemsize`
Parameters
----------
itemsize: int
The data type object element size.
Other Parameters
----------------
*dtypes:
Any Object accepted by `np.dtype` to be converted to a data
type object
Returns
-------
dtype: data type object
First of `dtypes` with itemsize greater than `itemsize`.
"""
return next(dt for dt in dtypes if np.dtype(dt).itemsize >= itemsize)
def _dtype_bits(kind, bits, itemsize=1):
"""Return dtype of `kind` that can store a `bits` wide unsigned int
Parameters:
kind: str
Data type kind.
bits: int
Desired number of bits.
itemsize: int
The data type object element size.
Returns
-------
dtype: data type object
Data type of `kind` that can store a `bits` wide unsigned int
"""
s = next(i for i in (itemsize, ) + (2, 4, 8) if
bits < (i * 8) or (bits == (i * 8) and kind == 'u'))
return np.dtype(kind + str(s))
def _scale(a, n, m, copy=True):
"""Scale an array of unsigned/positive integers from `n` to `m` bits.
Numbers can be represented exactly only if `m` is a multiple of `n`.
Parameters
----------
a : ndarray
Input image array.
n : int
Number of bits currently used to encode the values in `a`.
m : int
Desired number of bits to encode the values in `out`.
copy : bool, optional
If True, allocates and returns new array. Otherwise, modifies
`a` in place.
Returns
-------
out : array
Output image array. Has the same kind as `a`.
"""
kind = a.dtype.kind
if n > m and a.max() < 2 ** m:
mnew = int(np.ceil(m / 2) * 2)
if mnew > m:
dtype = "int{}".format(mnew)
else:
dtype = "uint{}".format(mnew)
n = int(np.ceil(n / 2) * 2)
warn("Downcasting {} to {} without scaling because max "
"value {} fits in {}".format(a.dtype, dtype, a.max(), dtype),
stacklevel=3)
return a.astype(_dtype_bits(kind, m))
elif n == m:
return a.copy() if copy else a
elif n > m:
# downscale with precision loss
if copy:
b = np.empty(a.shape, _dtype_bits(kind, m))
np.floor_divide(a, 2**(n - m), out=b, dtype=a.dtype,
casting='unsafe')
return b
else:
a //= 2**(n - m)
return a
elif m % n == 0:
# exact upscale to a multiple of `n` bits
if copy:
b = np.empty(a.shape, _dtype_bits(kind, m))
np.multiply(a, (2**m - 1) // (2**n - 1), out=b, dtype=b.dtype)
return b
else:
a = a.astype(_dtype_bits(kind, m, a.dtype.itemsize), copy=False)
a *= (2**m - 1) // (2**n - 1)
return a
else:
# upscale to a multiple of `n` bits,
# then downscale with precision loss
o = (m // n + 1) * n
if copy:
b = np.empty(a.shape, _dtype_bits(kind, o))
np.multiply(a, (2**o - 1) // (2**n - 1), out=b, dtype=b.dtype)
b //= 2**(o - m)
return b
else:
a = a.astype(_dtype_bits(kind, o, a.dtype.itemsize), copy=False)
a *= (2**o - 1) // (2**n - 1)
a //= 2**(o - m)
return a
def convert(image, dtype, force_copy=False, uniform=False):
"""
Convert an image to the requested data-type.
Warnings are issued in case of precision loss, or when negative values
are clipped during conversion to unsigned integer types (sign loss).
Floating point values are expected to be normalized and will be clipped
to the range [0.0, 1.0] or [-1.0, 1.0] when converting to unsigned or
signed integers respectively.
Numbers are not shifted to the negative side when converting from
unsigned to signed integer types. Negative values will be clipped when
converting to unsigned integers.
Parameters
----------
image : ndarray
Input image.
dtype : dtype
Target data-type.
force_copy : bool, optional
Force a copy of the data, irrespective of its current dtype.
uniform : bool, optional
Uniformly quantize the floating point range to the integer range.
By default (uniform=False) floating point values are scaled and
rounded to the nearest integers, which minimizes back and forth
conversion errors.
.. versionchanged :: 0.15
``convert`` no longer warns about possible precision or sign
information loss. See discussions on these warnings at:
https://github.com/scikit-image/scikit-image/issues/2602
https://github.com/scikit-image/scikit-image/issues/543#issuecomment-208202228
https://github.com/scikit-image/scikit-image/pull/3575
References
----------
.. [1] DirectX data conversion rules.
https://msdn.microsoft.com/en-us/library/windows/desktop/dd607323%28v=vs.85%29.aspx
.. [2] Data Conversions. In "OpenGL ES 2.0 Specification v2.0.25",
pp 7-8. Khronos Group, 2010.
.. [3] Proper treatment of pixels as integers. A.W. Paeth.
In "Graphics Gems I", pp 249-256. Morgan Kaufmann, 1990.
.. [4] Dirty Pixels. J. Blinn. In "Jim Blinn's corner: Dirty Pixels",
pp 47-57. Morgan Kaufmann, 1998.
"""
image = np.asarray(image)
dtypeobj_in = image.dtype
if dtype is np.floating:
dtypeobj_out = np.dtype("float64")
else:
dtypeobj_out = np.dtype(dtype)
dtype_in = dtypeobj_in.type
dtype_out = dtypeobj_out.type
kind_in = dtypeobj_in.kind
kind_out = dtypeobj_out.kind
itemsize_in = dtypeobj_in.itemsize
itemsize_out = dtypeobj_out.itemsize
# Below, we do an `issubdtype` check. Its purpose is to find out
# whether we can get away without doing any image conversion. This happens
# when:
#
# - the output and input dtypes are the same or
# - when the output is specified as a type, and the input dtype
# is a subclass of that type (e.g. `np.floating` will allow
# `float32` and `float64` arrays through)
if np.issubdtype(dtype_in, np.obj2sctype(dtype)):
if force_copy:
image = image.copy()
return image
if not (dtype_in in _supported_types and dtype_out in _supported_types):
raise ValueError("Can not convert from {} to {}."
.format(dtypeobj_in, dtypeobj_out))
if kind_in in 'ui':
imin_in = np.iinfo(dtype_in).min
imax_in = np.iinfo(dtype_in).max
if kind_out in 'ui':
imin_out = np.iinfo(dtype_out).min
imax_out = np.iinfo(dtype_out).max
# any -> binary
if kind_out == 'b':
return image > dtype_in(dtype_range[dtype_in][1] / 2)
# binary -> any
if kind_in == 'b':
result = image.astype(dtype_out)
if kind_out != 'f':
result *= dtype_out(dtype_range[dtype_out][1])
return result
# float -> any
if kind_in == 'f':
if kind_out == 'f':
# float -> float
return image.astype(dtype_out)
if np.min(image) < -1.0 or np.max(image) > 1.0:
raise ValueError("Images of type float must be between -1 and 1.")
# floating point -> integer
# use float type that can represent output integer type
computation_type = _dtype_itemsize(itemsize_out, dtype_in,
np.float32, np.float64)
if not uniform:
if kind_out == 'u':
image_out = np.multiply(image, imax_out,
dtype=computation_type)
else:
image_out = np.multiply(image, (imax_out - imin_out) / 2,
dtype=computation_type)
image_out -= 1.0 / 2.
np.rint(image_out, out=image_out)
np.clip(image_out, imin_out, imax_out, out=image_out)
elif kind_out == 'u':
image_out = np.multiply(image, imax_out + 1,
dtype=computation_type)
np.clip(image_out, 0, imax_out, out=image_out)
else:
image_out = np.multiply(image, (imax_out - imin_out + 1.0) / 2.0,
dtype=computation_type)
np.floor(image_out, out=image_out)
np.clip(image_out, imin_out, imax_out, out=image_out)
return image_out.astype(dtype_out)
# signed/unsigned int -> float
if kind_out == 'f':
# use float type that can exactly represent input integers
computation_type = _dtype_itemsize(itemsize_in, dtype_out,
np.float32, np.float64)
if kind_in == 'u':
# using np.divide or np.multiply doesn't copy the data
# until the computation time
image = np.multiply(image, 1. / imax_in,
dtype=computation_type)
# DirectX uses this conversion also for signed ints
# if imin_in:
# np.maximum(image, -1.0, out=image)
else:
image = np.add(image, 0.5, dtype=computation_type)
image *= 2 / (imax_in - imin_in)
return np.asarray(image, dtype_out)
# unsigned int -> signed/unsigned int
if kind_in == 'u':
if kind_out == 'i':
# unsigned int -> signed int
image = _scale(image, 8 * itemsize_in, 8 * itemsize_out - 1)
return image.view(dtype_out)
else:
# unsigned int -> unsigned int
return _scale(image, 8 * itemsize_in, 8 * itemsize_out)
# signed int -> unsigned int
if kind_out == 'u':
image = _scale(image, 8 * itemsize_in - 1, 8 * itemsize_out)
result = np.empty(image.shape, dtype_out)
np.maximum(image, 0, out=result, dtype=image.dtype, casting='unsafe')
return result
# signed int -> signed int
if itemsize_in > itemsize_out:
return _scale(image, 8 * itemsize_in - 1, 8 * itemsize_out - 1)
image = image.astype(_dtype_bits('i', itemsize_out * 8))
image -= imin_in
image = _scale(image, 8 * itemsize_in, 8 * itemsize_out, copy=False)
image += imin_out
return image.astype(dtype_out)
def img_as_float32(image, force_copy=False):
"""Convert an image to single-precision (32-bit) floating point format.
Parameters
----------
image : ndarray
Input image.
force_copy : bool, optional
Force a copy of the data, irrespective of its current dtype.
Returns
-------
out : ndarray of float32
Output image.
Notes
-----
The range of a floating point image is [0.0, 1.0] or [-1.0, 1.0] when
converting from unsigned or signed datatypes, respectively.
If the input image has a float type, intensity values are not modified
and can be outside the ranges [0.0, 1.0] or [-1.0, 1.0].
"""
return convert(image, np.float32, force_copy)
def img_as_float64(image, force_copy=False):
"""Convert an image to double-precision (64-bit) floating point format.
Parameters
----------
image : ndarray
Input image.
force_copy : bool, optional
Force a copy of the data, irrespective of its current dtype.
Returns
-------
out : ndarray of float64
Output image.
Notes
-----
The range of a floating point image is [0.0, 1.0] or [-1.0, 1.0] when
converting from unsigned or signed datatypes, respectively.
If the input image has a float type, intensity values are not modified
and can be outside the ranges [0.0, 1.0] or [-1.0, 1.0].
"""
return convert(image, np.float64, force_copy)
def img_as_float(image, force_copy=False):
"""Convert an image to floating point format.
This function is similar to `img_as_float64`, but will not convert
lower-precision floating point arrays to `float64`.
Parameters
----------
image : ndarray
Input image.
force_copy : bool, optional
Force a copy of the data, irrespective of its current dtype.
Returns
-------
out : ndarray of float
Output image.
Notes
-----
The range of a floating point image is [0.0, 1.0] or [-1.0, 1.0] when
converting from unsigned or signed datatypes, respectively.
If the input image has a float type, intensity values are not modified
and can be outside the ranges [0.0, 1.0] or [-1.0, 1.0].
"""
return convert(image, np.floating, force_copy)
| |
"""
Statistical functions and tests, following scipy.stats.
Some differences
- We don't handle missing values at all
"""
# This is lightly adapted from scipy.stats 0.19
# https://github.com/scipy/scipy/blob/v0.19.0/scipy/stats/stats.py
# The original copyright notice follows:
# Copyright 2002 Gary Strangman. All rights reserved
# Copyright 2002-2016 The SciPy Developers
#
# The original code from Gary Strangman was heavily adapted for
# use in SciPy by Travis Oliphant. The original code came with the
# following disclaimer:
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fitness for a given application. In no event
# shall Gary Strangman be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
import math
import numpy as np
import dask.array as da
from dask import delayed
from dask.array.ufunc import wrap_elemwise
from dask.utils import derived_from
try:
import scipy.stats
except ImportError as e:
raise ImportError("`dask.array.stats` requires `scipy` to be installed.") from e
from scipy import special
from scipy.stats import distributions
from scipy.stats.stats import (
F_onewayResult,
KurtosistestResult,
NormaltestResult,
Power_divergenceResult,
SkewtestResult,
Ttest_1sampResult,
Ttest_indResult,
Ttest_relResult,
)
__all__ = [
"ttest_ind",
"ttest_1samp",
"ttest_rel",
"chisquare",
"power_divergence",
"skew",
"skewtest",
"kurtosis",
"kurtosistest",
"normaltest",
"f_oneway",
"moment",
]
# -----------------
# Statistical Tests
# -----------------
@derived_from(scipy.stats)
def ttest_ind(a, b, axis=0, equal_var=True):
v1 = da.var(a, axis, ddof=1) # XXX: np -> da
v2 = da.var(b, axis, ddof=1) # XXX: np -> da
n1 = a.shape[axis]
n2 = b.shape[axis]
if equal_var:
df, denom = _equal_var_ttest_denom(v1, n1, v2, n2)
else:
df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2)
res = _ttest_ind_from_stats(da.mean(a, axis), da.mean(b, axis), denom, df)
return delayed(Ttest_indResult, nout=2)(*res)
@derived_from(scipy.stats)
def ttest_1samp(a, popmean, axis=0, nan_policy="propagate"):
if nan_policy != "propagate":
raise NotImplementedError(
"`nan_policy` other than 'propagate' have not been implemented."
)
n = a.shape[axis]
df = n - 1
d = da.mean(a, axis) - popmean
v = da.var(a, axis, ddof=1)
denom = da.sqrt(v / float(n))
with np.errstate(divide="ignore", invalid="ignore"):
t = da.divide(d, denom)
t, prob = _ttest_finish(df, t)
return delayed(Ttest_1sampResult, nout=2)(t, prob)
@derived_from(scipy.stats)
def ttest_rel(a, b, axis=0, nan_policy="propagate"):
if nan_policy != "propagate":
raise NotImplementedError(
"`nan_policy` other than 'propagate' have not been implemented."
)
n = a.shape[axis]
df = float(n - 1)
d = (a - b).astype(np.float64)
v = da.var(d, axis, ddof=1)
dm = da.mean(d, axis)
denom = da.sqrt(v / float(n))
with np.errstate(divide="ignore", invalid="ignore"):
t = da.divide(dm, denom)
t, prob = _ttest_finish(df, t)
return delayed(Ttest_relResult, nout=2)(t, prob)
@derived_from(scipy.stats)
def chisquare(f_obs, f_exp=None, ddof=0, axis=0):
return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis, lambda_="pearson")
@derived_from(scipy.stats)
def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
if isinstance(lambda_, str):
# TODO: public api
if lambda_ not in scipy.stats.stats._power_div_lambda_names:
names = repr(list(scipy.stats.stats._power_div_lambda_names.keys()))[1:-1]
raise ValueError(
f"invalid string for lambda_: {lambda_!r}. "
f"Valid strings are {names}"
)
lambda_ = scipy.stats.stats._power_div_lambda_names[lambda_]
elif lambda_ is None:
lambda_ = 1
if f_exp is not None:
# f_exp = np.atleast_1d(np.asanyarray(f_exp))
pass
else:
f_exp = f_obs.mean(axis=axis, keepdims=True)
# `terms` is the array of terms that are summed along `axis` to create
# the test statistic. We use some specialized code for a few special
# cases of lambda_.
if lambda_ == 1:
# Pearson's chi-squared statistic
terms = (f_obs - f_exp) ** 2 / f_exp
elif lambda_ == 0:
# Log-likelihood ratio (i.e. G-test)
terms = 2.0 * _xlogy(f_obs, f_obs / f_exp)
elif lambda_ == -1:
# Modified log-likelihood ratio
terms = 2.0 * _xlogy(f_exp, f_exp / f_obs)
else:
# General Cressie-Read power divergence.
terms = f_obs * ((f_obs / f_exp) ** lambda_ - 1)
terms /= 0.5 * lambda_ * (lambda_ + 1)
stat = terms.sum(axis=axis)
num_obs = _count(terms, axis=axis)
# ddof = asarray(ddof)
p = delayed(distributions.chi2.sf)(stat, num_obs - 1 - ddof)
return delayed(Power_divergenceResult, nout=2)(stat, p)
@derived_from(scipy.stats)
def skew(a, axis=0, bias=True, nan_policy="propagate"):
if nan_policy != "propagate":
raise NotImplementedError(
"`nan_policy` other than 'propagate' have not been implemented."
)
n = a.shape[axis] # noqa; for bias
m2 = moment(a, 2, axis)
m3 = moment(a, 3, axis)
zero = m2 == 0
vals = da.where(~zero, m3 / m2**1.5, 0.0)
# vals = da.where(~zero, (m2, m3),
# lambda m2, m3: m3 / m2**1.5,
# 0.)
if not bias:
# Need a version of np.place
raise NotImplementedError("bias=False is not implemented.")
if vals.ndim == 0:
# TODO: scalar, min is a workaround
return vals.min()
return vals
@derived_from(scipy.stats)
def skewtest(a, axis=0, nan_policy="propagate"):
if nan_policy != "propagate":
raise NotImplementedError(
"`nan_policy` other than 'propagate' have not been implemented."
)
b2 = skew(a, axis)
n = float(a.shape[axis])
if n < 8:
raise ValueError(
"skewtest is not valid with less than 8 samples; %i samples"
" were given." % int(n)
)
y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))
beta2 = (
3.0
* (n**2 + 27 * n - 70)
* (n + 1)
* (n + 3)
/ ((n - 2.0) * (n + 5) * (n + 7) * (n + 9))
)
W2 = -1 + math.sqrt(2 * (beta2 - 1))
delta = 1 / math.sqrt(0.5 * math.log(W2))
alpha = math.sqrt(2.0 / (W2 - 1))
y = np.where(y == 0, 1, y)
Z = delta * np.log(y / alpha + np.sqrt((y / alpha) ** 2 + 1))
return delayed(SkewtestResult, nout=2)(Z, 2 * distributions.norm.sf(np.abs(Z)))
@derived_from(scipy.stats)
def kurtosis(a, axis=0, fisher=True, bias=True, nan_policy="propagate"):
if nan_policy != "propagate":
raise NotImplementedError(
"`nan_policy` other than 'propagate' have not been implemented."
)
n = a.shape[axis] # noqa; for bias
m2 = moment(a, 2, axis)
m4 = moment(a, 4, axis)
zero = m2 == 0
olderr = np.seterr(all="ignore")
try:
vals = da.where(zero, 0, m4 / m2**2.0)
finally:
np.seterr(**olderr)
if not bias:
# need a version of np.place
raise NotImplementedError("bias=False is not implemented.")
if fisher:
return vals - 3
else:
if vals.ndim == 0:
# TODO: scalar, min is a workaround
return vals.min()
return vals
@derived_from(scipy.stats)
def kurtosistest(a, axis=0, nan_policy="propagate"):
if nan_policy != "propagate":
raise NotImplementedError(
"`nan_policy` other than 'propagate' have not been implemented."
)
n = float(a.shape[axis])
b2 = kurtosis(a, axis, fisher=False)
E = 3.0 * (n - 1) / (n + 1)
varb2 = (
24.0 * n * (n - 2) * (n - 3) / ((n + 1) * (n + 1.0) * (n + 3) * (n + 5))
) # [1]_ Eq. 1
x = (b2 - E) / np.sqrt(varb2) # [1]_ Eq. 4
# [1]_ Eq. 2:
sqrtbeta1 = (
6.0
* (n * n - 5 * n + 2)
/ ((n + 7) * (n + 9))
* np.sqrt((6.0 * (n + 3) * (n + 5)) / (n * (n - 2) * (n - 3)))
)
# [1]_ Eq. 3:
A = 6.0 + 8.0 / sqrtbeta1 * (2.0 / sqrtbeta1 + np.sqrt(1 + 4.0 / (sqrtbeta1**2)))
term1 = 1 - 2 / (9.0 * A)
denom = 1 + x * np.sqrt(2 / (A - 4.0))
denom = np.where(denom < 0, 99, denom)
term2 = np.where(denom < 0, term1, np.power((1 - 2.0 / A) / denom, 1 / 3.0))
Z = (term1 - term2) / np.sqrt(2 / (9.0 * A)) # [1]_ Eq. 5
Z = np.where(denom == 99, 0, Z)
if Z.ndim == 0:
Z = Z[()]
# zprob uses upper tail, so Z needs to be positive
return delayed(KurtosistestResult, nout=2)(Z, 2 * distributions.norm.sf(np.abs(Z)))
@derived_from(scipy.stats)
def normaltest(a, axis=0, nan_policy="propagate"):
if nan_policy != "propagate":
raise NotImplementedError(
"`nan_policy` other than 'propagate' have not been implemented."
)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s * s + k * k
return delayed(NormaltestResult, nout=2)(k2, delayed(distributions.chi2.sf)(k2, 2))
@derived_from(scipy.stats)
def f_oneway(*args):
# args = [np.asarray(arg, dtype=float) for arg in args]
# ANOVA on N groups, each in its own array
num_groups = len(args)
alldata = da.concatenate(args)
bign = len(alldata)
# Determine the mean of the data, and subtract that from all inputs to a
# variance (via sum_of_sq / sq_of_sum) calculation. Variance is invariance
# to a shift in location, and centering all data around zero vastly
# improves numerical stability.
offset = alldata.mean()
alldata -= offset
sstot = _sum_of_squares(alldata) - (_square_of_sums(alldata) / float(bign))
ssbn = 0
for a in args:
ssbn += _square_of_sums(a - offset) / float(len(a))
# Naming: variables ending in bn/b are for "between treatments", wn/w are
# for "within treatments"
ssbn -= _square_of_sums(alldata) / float(bign)
sswn = sstot - ssbn
dfbn = num_groups - 1
dfwn = bign - num_groups
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
f = msb / msw
prob = _fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf
return delayed(F_onewayResult, nout=2)(f, prob)
@derived_from(scipy.stats)
def moment(a, moment=1, axis=0, nan_policy="propagate"):
if nan_policy != "propagate":
raise NotImplementedError(
"`nan_policy` other than 'propagate' have not been implemented."
)
return da.moment(a, moment, axis=axis)
# -------
# Helpers
# -------
# Don't really want to do all of scipy.special (or do we?)
_xlogy = wrap_elemwise(special.xlogy, source=special)
_fdtrc = wrap_elemwise(special.fdtrc, source=special)
def _equal_var_ttest_denom(v1, n1, v2, n2):
df = n1 + n2 - 2.0
svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / df
denom = da.sqrt(svar * (1.0 / n1 + 1.0 / n2)) # XXX: np -> da
return df, denom
def _unequal_var_ttest_denom(v1, n1, v2, n2):
vn1 = v1 / n1
vn2 = v2 / n2
with np.errstate(divide="ignore", invalid="ignore"):
df = (vn1 + vn2) ** 2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1))
# If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
# Hence it doesn't matter what df is as long as it's not NaN.
df = da.where(da.isnan(df), 1, df) # XXX: np -> da
denom = da.sqrt(vn1 + vn2)
return df, denom
def _ttest_ind_from_stats(mean1, mean2, denom, df):
d = mean1 - mean2
with np.errstate(divide="ignore", invalid="ignore"):
t = da.divide(d, denom)
t, prob = _ttest_finish(df, t)
return (t, prob)
def _ttest_finish(df, t):
"""Common code between all 3 t-test functions."""
# XXX: np.abs -> da.absolute
# XXX: delayed(distributions.t.sf)
prob = (
delayed(distributions.t.sf)(da.absolute(t), df) * 2
) # use np.abs to get upper tail
if t.ndim == 0:
t = t[()]
return t, prob
def _count(x, axis=None):
if axis is None:
return x.size
else:
return x.shape[axis]
def _sum_of_squares(a, axis=0):
"""
Squares each element of the input array, and returns the sum(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
sum_of_squares : ndarray
The sum along the given axis for (a**2).
See also
--------
_square_of_sums : The square(s) of the sum(s) (the opposite of
`_sum_of_squares`).
"""
return da.sum(a * a, axis)
def _square_of_sums(a, axis=0):
"""
Sums elements of the input array, and returns the square(s) of that sum.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
square_of_sums : float or ndarray
The square of the sum over `axis`.
See also
--------
_sum_of_squares : The sum of squares (the opposite of `square_of_sums`).
"""
s = da.sum(a, axis)
return s * s
| |
##
# Copyright (c) 2005-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twext.python.log import Logger, LogLevel
from txweb2.dav.http import ErrorResponse
from twisted.internet.defer import inlineCallbacks, returnValue, succeed
from twisted.python.failure import Failure
from txweb2 import responsecode
from txweb2.http import HTTPError
from twistedcaldav.caldavxml import caldav_namespace
from twistedcaldav.config import config
from txdav.base.propertystore.base import PropertyName
from txdav.caldav.datastore.scheduling.delivery import DeliveryService
from txdav.caldav.datastore.scheduling.freebusy import FreebusyQuery
from txdav.caldav.datastore.scheduling.itip import iTIPRequestStatus
from txdav.caldav.datastore.scheduling.processing import ImplicitProcessor, ImplicitProcessorException
from txdav.caldav.datastore.scheduling.utils import extractEmailDomain
from txdav.caldav.icalendarstore import ComponentUpdateState
import hashlib
import uuid
"""
Handles the sending of scheduling messages to the server itself. This will cause
actual processing of the delivery of the message to the recipient's inbox, via the
L{ImplicitProcessor} class.
"""
__all__ = [
"ScheduleViaCalDAV",
]
log = Logger()
class ScheduleViaCalDAV(DeliveryService):
def __init__(self, scheduler, recipients, responses, freebusy):
self.scheduler = scheduler
self.recipients = recipients
self.responses = responses
self.freebusy = freebusy
@classmethod
def serviceType(cls):
return DeliveryService.serviceType_caldav
@classmethod
def matchCalendarUserAddress(cls, cuaddr):
# Check for local address matches first
if cuaddr.startswith("mailto:") and config.Scheduling[cls.serviceType()]["EmailDomain"]:
addrDomain = extractEmailDomain(cuaddr)
domain = config.Scheduling[cls.serviceType()]["EmailDomain"]
if addrDomain == domain:
return succeed(True)
elif (cuaddr.startswith("http://") or cuaddr.startswith("https://")) and config.Scheduling[cls.serviceType()]["HTTPDomain"]:
splits = cuaddr.split(":")[0][2:].split("?")
domain = config.Scheduling[cls.serviceType()]["HTTPDomain"]
if splits[0].endswith(domain):
return succeed(True)
elif cuaddr.startswith("/"):
# Assume relative HTTP URL - i.e. on this server
return succeed(True)
# Do default match
return super(ScheduleViaCalDAV, cls).matchCalendarUserAddress(cuaddr)
@inlineCallbacks
def generateSchedulingResponses(self):
# Extract the ORGANIZER property and UID value from the calendar data for use later
organizerProp = self.scheduler.calendar.getOrganizerProperty()
uid = self.scheduler.calendar.resourceUID()
# Freebusy needs to be optimized by doing multiple attendee in parallel if possible
if self.freebusy:
# Look for special delegate extended free-busy request
event_details = [] if self.scheduler.calendar.getExtendedFreeBusy() else None
for recipient in self.recipients:
# Check access controls - we do not do this right now. But if we ever implement access controls to
# determine which users can schedule with other users, here is where we would do that test.
yield self.generateFreeBusyResponse(recipient, self.responses, organizerProp, uid, event_details)
else:
for recipient in self.recipients:
# Check access controls - we do not do this right now. But if we ever implement access controls to
# determine which users can schedule with other users, here is where we would do that test.
yield self.generateResponse(recipient, self.responses)
@inlineCallbacks
def generateResponse(self, recipient, responses):
# Hash the iCalendar data for use as the last path element of the URI path
name = "{hash}-{r}.ics".format(hash=hashlib.md5(self.scheduler.calendar.resourceUID()).hexdigest(), r=str(uuid.uuid4())[:8],)
# Do implicit scheduling message processing.
try:
processor = ImplicitProcessor()
_ignore_processed, autoprocessed, store_inbox, changes = (yield processor.doImplicitProcessing(
self.scheduler.txn,
self.scheduler.calendar,
self.scheduler.originator,
recipient,
noAttendeeRefresh=self.scheduler.noAttendeeRefresh,
))
except ImplicitProcessorException as e:
log.failure(
"Could not store data in inbox {inbox}",
inbox=recipient.inbox, level=LogLevel.debug
)
log.error(
"Could not store data in inbox {inbox}",
inbox=recipient.inbox
)
err = HTTPError(ErrorResponse(
responsecode.FORBIDDEN,
(caldav_namespace, "recipient-permissions"),
"Could not store data in inbox",
))
responses.add(recipient.cuaddr, Failure(exc_value=err), reqstatus=e.msg)
returnValue(False)
except Exception as e:
log.failure(
"Could not process iTIP message",
level=LogLevel.debug
)
log.error(
"Could not process iTIP message",
)
err = HTTPError(ErrorResponse(
responsecode.FORBIDDEN,
(caldav_namespace, "recipient-permissions"),
"Could not process iTIP message",
))
responses.add(recipient.cuaddr, Failure(exc_value=err), reqstatus=iTIPRequestStatus.BAD_REQUEST)
returnValue(False)
if store_inbox:
# Copy calendar to inbox
try:
child = yield recipient.inbox._createCalendarObjectWithNameInternal(name, self.scheduler.calendar, ComponentUpdateState.INBOX)
except Exception as e:
log.failure(
"Could not store data in inbox {inbox}: {error}",
inbox=recipient.inbox, error=e, level=LogLevel.debug
)
log.error(
"Could not store data in inbox {inbox}: {error}",
inbox=recipient.inbox, error=e
)
err = HTTPError(ErrorResponse(
responsecode.FORBIDDEN,
(caldav_namespace, "recipient-permissions"),
"Could not store data in inbox",
))
responses.add(recipient.cuaddr, Failure(exc_value=err), reqstatus=iTIPRequestStatus.NO_AUTHORITY)
returnValue(False)
else:
# Store CS:schedule-changes property if present
if changes is not None:
props = child.properties()
props[PropertyName.fromElement(changes)] = changes
responses.add(recipient.cuaddr, responsecode.OK, reqstatus=iTIPRequestStatus.MESSAGE_DELIVERED)
if autoprocessed:
if self.scheduler.logItems is not None:
self.scheduler.logItems["itip.auto"] = self.scheduler.logItems.get("itip.auto", 0) + 1
returnValue(True)
@inlineCallbacks
def generateFreeBusyResponse(self, recipient, responses, organizerProp, uid, event_details):
# Extract the ATTENDEE property matching current recipient from the calendar data
cuas = recipient.record.calendarUserAddresses
attendeeProp = self.scheduler.calendar.getAttendeeProperty(cuas)
try:
fbresult = yield FreebusyQuery(
organizer=self.scheduler.organizer,
organizerProp=organizerProp,
recipient=recipient,
attendeeProp=attendeeProp,
uid=uid,
timerange=self.scheduler.timeRange,
excludeUID=self.scheduler.excludeUID,
logItems=self.scheduler.logItems,
event_details=event_details,
).generateAttendeeFreeBusyResponse()
except Exception as e:
log.failure(
"Could not determine free busy information for recipient {cuaddr}",
cuaddr=recipient.cuaddr, level=LogLevel.debug
)
log.error(
"Could not determine free busy information for recipient {cuaddr}: {ex}",
cuaddr=recipient.cuaddr, ex=e
)
err = HTTPError(ErrorResponse(
responsecode.FORBIDDEN,
(caldav_namespace, "recipient-permissions"),
"Could not determine free busy information",
))
responses.add(
recipient.cuaddr,
Failure(exc_value=err),
reqstatus=iTIPRequestStatus.NO_AUTHORITY
)
returnValue(False)
else:
responses.add(
recipient.cuaddr,
responsecode.OK,
reqstatus=iTIPRequestStatus.SUCCESS,
calendar=fbresult
)
returnValue(True)
| |
# "Copyright (c) 2000-2003 The Regents of the University of California.
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose, without fee, and without written agreement
# is hereby granted, provided that the above copyright notice, the following
# two paragraphs and the author appear in all copies of this software.
#
# IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
# DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
# OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY
# OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
# ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION TO
# PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS."
#
# @author Kamin Whitehouse
#
"""\
RamSymbols.py -- a tool for poking and peeking ram symbols on motes.
To be used in conjunction with tinyos-1.x/contrib/nestfe/nesc/lib/RamSymbols
"""
import sys, string, time, types
from xml.dom import minidom
import pytos.util.nescDecls as nescDecls
import pytos.util.RoutingMessages as RoutingMessages
import pytos.Comm as Comm
import re
from copy import deepcopy
class RamSymbol( RoutingMessages.RoutingMessage ) :
#this is the variable the determines, on a blocking rpc call, how
#many messages will be queued up. Should perhaps be bigger for large
#networks, but will generally be the same number for all rpc
#functions that use the same send and receive comm stacks.
msgQueueSize = 10
def __init__(self, xmlDefinition=None, parent=None) :
if xmlDefinition==None :
return
self.pokeResponseMsg = None
self.memAddress = int(xmlDefinition.getAttribute("address"))
length = int(xmlDefinition.getAttribute("length"))
typeDef = xmlDefinition.getElementsByTagName("type")[0]
self.isPointer = typeDef.getAttribute("typeClass") == "pointer"
self.isArray = xmlDefinition.hasAttribute("array")
symbolType = parent.app.types[typeDef.getAttribute("typeName")]
#if symbolType.size > parent.app.enums.MAX_RAM_SYMBOL_SIZE :
if self.isPointer :
symbolType = nescDecls.nescPointer(parent.app, symbolType)
if self.isArray :
if length % symbolType.size == 0 :
numElements = length // symbolType.size
else :
raise Exception("Could not discern ram symbol array length")
symbolType = nescDecls.nescArray(numElements, symbolType)
structArgs = []
if type(symbolType) == nescDecls.nescStruct :
self.isStruct = True
structArgs.append(symbolType)
else :
structArgs.append(xmlDefinition.getAttribute("name"))
structArgs.append( ("value", symbolType) )
#now initialize this command as a TosMsg object (which is really a nescStruct)
RoutingMessages.RoutingMessage.__init__(self, parent, 0, *structArgs)
if self.isStruct :
self.__dict__["name"] = xmlDefinition.getAttribute("name")
if length != self.size :
raise Exception("Ram symbol size incorrect")
self.pokeResponseMsg = nescDecls.TosMsg(self.memAddress, "PokeResponseMsg",
("value", parent.app.types.result_t))
def poke(self, value=None, arrayIndex = None, dereference=False, **nameArgs) :
if not self.parent.app.__dict__.has_key("RamSymbolsM") :
raise Exception("You must include the contrib/hood/tos/lib/RamSymbols/RamSymbolsM module in your nesc application in order to poke or peek at ram symbols")
func = self.parent.app.RamSymbolsM.poke
if arrayIndex != None :
if self.isArray :
if dereference == True :
if self.isPointer:
ptr = self.value["value"].elementType
newValue = deepcopy(ptr.value)
func.symbol.memAddress = self.memAddress + ptr.size * arrayIndex
func.symbol.length = newValue.size
else :
raise Exception("Dereferencing is only allowed for pointer types")
else :
newValue = deepcopy(self.value["value"].elementType)
func.symbol.memAddress = self.memAddress + newValue.size * arrayIndex
func.symbol.length = newValue.size
else :
raise Exception("Indexing a poke is only supported for arrays")
elif dereference == True :
if self.isPointer and self.isArray :
raise Exception("Poke cannot be used to dereference an entire array of pointers")
elif not self.isPointer :
raise Exception("Dereferencing is only allowed for pointer types")
newValue = deepcopy(self.value["value"].value)
func.symbol.memAddress = self.memAddress
func.symbol.length = newValue.size
else :
if self.isArray and self.size > self.parent.app.ramSymbol_t.data.size :
raise Exception("Array is too large for poking. You must index the poke")
if self.isStruct :
newValue = deepcopy(self)
elif self.isPointer :
newValue = self.parent.app.types["unsigned int"]
else :
newValue = deepcopy(self.value["value"])
func.symbol.memAddress = self.memAddress
func.symbol.length = newValue.size
if func.symbol.length > self.parent.app.types.ramSymbol_t.data.size :
raise Exception("Ram symbol size too large for msg buffer")
if value != None :
self._assignParam(newValue, value, "value")
newBytes = newValue.getBytes()
oldBytes = func.symbol.data.getBytes()
newBytes = oldBytes.replace(oldBytes[:func.symbol.length], newBytes, 1)
func.symbol.data.setBytes(newBytes)
func.symbol.dereference = dereference
result = func(**nameArgs)
if result != None:
return map(self.parsePokeResponse, result)
def parsePokeResponse(self, msg) :
response = deepcopy(self.pokeResponseMsg)
if msg.nescType == "RpcResponseMsg":
response.value=0
addr = msg.sourceAddress
else :
if msg.value["value"].value != self.memAddress and (not self.isArray or
(msg.value["value"].value -self.memAddress) % self.value["value"].elementType.size !=0 or
msg.memAddress >= self.memAddress + self.len * self.value["value"].elementType.size):
raise Exception("Memory address mismatch in poke response")
response.value = 1
addr = msg.parentMsg.sourceAddress
response.parentMsg = msg
response.nescType = "".join( [self.nescType, ".poke(), nodeID=%d"%addr] )
return response
def peek(self, arrayIndex = None, dereference=False, **nameArgs) :
if not self.parent.app.__dict__.has_key("RamSymbolsM") :
raise Exception("You must include the contrib/hood/tos/lib/RamSymbols/RamSymbolsM module in your nesc application in order to poke or peek at ram symbols")
func = self.parent.app.RamSymbolsM.peek
if arrayIndex != None :
#change memaddress to memAddres + array index
if self.isArray :
if dereference :
if self.isPointer :
func.memAddress = self.memAddress + self.value["value"].elementType.size * arrayIndex
#set length of memcpy to ptr dereferenced value
func.length = self.value["value"].elementType.value.size
else :
raise Exception("Dereferencing a peek is only allowed for pointers")
else :
func.memAddress = self.memAddress + self.value["value"].elementType.size * arrayIndex
func.length = self.value["value"].elementType.size
else :
raise Exception("Indexing a peek is only allowed for arrays")
elif dereference :
#if this is an array or ptrs, fail
if self.isArray :
raise Exception("peek cannot be used to dereference an array of pointers")
func.memAddress = self.memAddress
func.length = self.value["value"].size
else :
#if this is an array check if the whole thing will fit in the return msg
if self.isArray and self.size > self.parent.app.types.ramSymbol_t.data.size :
raise Exception("Array is too large for peeking. You must index the peek")
func.memAddress = self.memAddress
func.length = self.size
if func.length > self.parent.app.types.ramSymbol_t.data.size :
raise Exception("Ram symbol size too large for msg buffer")
func.dereference = dereference
result = func(**nameArgs)
if result != None :
return map(self.parsePeekResponse, result)
def parsePeekResponse(self, msg) :
#create the response message depending on if was rpc error or not
if msg.nescType == "RpcResponseMsg":
response = nescDecls.TosMsg(self.memAddress, "PeekErrorMsg",
("value", self.parent.app.types.result_t))
response.value=0
addr = msg.sourceAddress
else:
#choose the type depending on if the ramSymbol is the entire symbol or element of array
if msg.length == self.size and msg.memAddress == self.memAddress :
if self.isStruct :
value = deepcopy(self)
else :
value = deepcopy(self.value["value"])
elif (self.isArray and msg.length == self.value["value"].elementType.size and
(msg.memAddress -self.memAddress) % self.value["value"].elementType.size ==0 and
msg.memAddress < self.memAddress + self.len * self.value["value"].elementType.size):
value = deepcopy(self.value["value"].elementType)
elif (self.isArray and self.isPointer
and msg.length == self.value["value"].elementType.value.size and
(msg.memAddress -self.memAddress) % self.value["value"].elementType.value.size ==0 and
msg.memAddress < self.memAddress + self.len * self.value["value"].elementType.size):
value = deepcopy(self.value["value"].elementType.value)
elif (self.isPointer
and msg.length == self.value["value"].value.size and
msg.memAddress == self.memAddress ) :
value = deepcopy(self.value["value"].value)
else :
raise Exception("Memory address mismatch in peek response")
#choose the type depending on whether calling func was peek or ptrPeek
if self.isPointer :
if msg.dereference :
value = value.value
else :
value = self.parent.app.types["unsigned int"]
#create the return message from type depending if it is a struct already or must be created
if issubclass(type(value), nescDecls.nescStruct) :
response = nescDecls.TosMsg(self.memAddress, value)
else :
response = nescDecls.TosMsg(self.memAddress, value.nescType,
("value", value))
bytes = msg.data.getBytes()
response.setBytes(bytes[:response.size])
addr = msg.parentMsg.sourceAddress
response.parentMsg = msg
response.nescType = "".join( [self.nescType, ".peek(), nodeID=%d"%addr])
return response
def __str__(self) :
if self.isStruct :
return "%20s : %s\n" % (self.nescType,self.name)
else:
return "%20s : %s\n" % (self.value["value"].nescType,self.nescType)
def __deepcopy__(self, memo={}) :
result = self.__class__()
memo[id(self)] = result
result.parent = self.parent
for (callParam, defaultVal) in self.parent.defaultCallParams :
result.__dict__[callParam] = deepcopy(self.__dict__[callParam], memo)
nescDecls.TosMsg.__init__(result, self.amType, self)
return result
def registerPeek(self, listener, comm=()) :
self.parent.app.RamSymbolsM.peek.register(SymbolResponseListener(listener, self.parsePeekResponse), *comm)
def unregisterPeek(self, listener, comm=()) :
self.parent.app.RamSymbols.peek.unregister(SymbolResponseListener(listener, self.parsePeekResponse), *comm)
def registerPoke(self, listener, comm=()) :
self.parent.app.RamSymbolsM.poke.register(SymbolResponseListener(listener, self.parsePokeResponse), *comm)
def unregisterPoke(self, listener, comm=()) :
self.parent.app.RamSymbols.poke.unregister(SymbolResponseListener(listener, self.parsePokeResponse), *comm)
class SymbolResponseListener( Comm.MessageListener ):
def __init__(self, callback, parseFunction ):
self.parseFunction = parseFunction
Comm.MessageListener.__init__(self, callback)
self._firstHashFunction = self._hashFunction
self._hashFunction = self._combinedHash
def _combinedHash(self):
return self._firstHashFunction() + self.parseFunction.__hash__()
def messageReceived( self , addr , msg ) :
try:
msg = self.parseFunction(msg)
self.callback(addr, msg)
except Exception, e:
pass
class RamSymbols( RoutingMessages.RoutingMessages) :
"""A container class from which to find all ram symbols.
"""
def __init__(self, app, sendComm=None, receiveComm=None, tosbase=True, xmlFileDOM=None, **callParams) :
""" Find function defs in nescDecls.xml file and create function objects."""
if not "ramSymbol_t" in app.types._types :
print "The RamSymbolsM module was not compiled in. No ram symbols will be imported."
RoutingMessages.RoutingMessages.__init__(self, app)
self.defaultCallParams = ( ("address", None), ("returnAddress", None),
("timeout", 1), ("blocking", True), ("responseDesired", True) )
self.initializeCallParams(callParams)
self.tooLarge = []
self.sizeIncorrect = []
self.noType = []
self.arraySizeIncorrect = []
if xmlFileDOM == None:
xmlFileDOM = minidom.parse(nescDecls.findBuildFile(buildDir, "nescDecls.xml"))
symbols, = xmlFileDOM.childNodes[0].getElementsByTagName("ramSymbols")
symbols = [node for node in symbols.childNodes if node.nodeType == 1]
regexp = re.compile("\w+\.\w+")
for symbolDef in symbols:
try :
if (not regexp.match(symbolDef.getAttribute("name"))):
# If the identifier is not of form Module.variable, then it is defined in a .c/.h file and not in a
# nesc module. Add it to the "Globals" pseudo-module.
symbolDef.setAttribute("name", "Globals."+symbolDef.getAttribute("name"))
self._messages[symbolDef.getAttribute("name")] = RamSymbol(symbolDef, self)
except Exception, e:
if len(e.args) > 0 and e.args[0].find("No type") == 0:
self.noType.append(symbolDef)
elif len(e.args) > 0 and e.args[0].find("Could not discern") == 0:
self.arraySizeIncorrect.append(symbolDef)
elif len(e.args) > 0 and e.args[0].find("Ram symbol size too large") == 0:
self.tooLarge.append(symbolDef)
elif len(e.args) > 0 and e.args[0].find("Ram symbol size incorrect") == 0:
self.sizeIncorrect.append(symbolDef)
else :
raise
#self.printSkippedSymbols()
def printSkippedSymbols(self) :
err = ""
if len(self.tooLarge) >0 :
err += "\nWarning: %d ram symbols were too large for %d byte packet.\n" % (len(self.tooLarge), self.app.types.ramSymbol_t.data.size )
for symbol in self.tooLarge :
err += "\t%s\n" % symbol.getAttribute("name")
if len(self.sizeIncorrect) >0 :
err += "\nWarning: The size of the following ram symbols does not match the size of the discovered type:\n"
for symbol in self.sizeIncorrect :
err += "\t%s\n" % symbol.getAttribute("name")
if len(self.noType) >0 :
err += "\nWarning: No type was found for the following ram symbols:\n"
for symbol in self.noType :
err += "\t%s\n" % symbol.getAttribute("name")
if len(self.arraySizeIncorrect) >0 :
err += "\nWarning: The following ram symbols are arrays with length not a multiple of the type size:\n"
for symbol in self.arraySizeIncorrect :
err += "\t%s\n" % symbol.getAttribute("name")
if len(err) > 0 : print err
| |
# Copyright (c) 2005-2010 Canonical
#
# Author: Michael Vogt <michael.vogt@ubuntu.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
"""Classes for working with locally available Debian packages."""
from __future__ import print_function
import apt
import apt_inst
import apt_pkg
import gzip
import os
import sys
from apt_pkg import gettext as _
from io import StringIO
class NoDebArchiveException(IOError):
"""Exception which is raised if a file is no Debian archive."""
class DebPackage(object):
"""A Debian Package (.deb file)."""
# Constants for comparing the local package file with the version
# in the cache
(VERSION_NONE,
VERSION_OUTDATED,
VERSION_SAME,
VERSION_NEWER) = range(4)
debug = 0
def __init__(self, filename=None, cache=None):
if cache is None:
cache = apt.Cache()
self._cache = cache
self._debfile = None
self.pkgname = ""
self._sections = {}
self._need_pkgs = []
self._check_was_run = False
self._failure_string = ""
self._multiarch = None
if filename:
self.open(filename)
def open(self, filename):
""" open given debfile """
self._dbg(3, "open '%s'" % filename)
self._need_pkgs = []
self._installed_conflicts = set()
self._failure_string = ""
self.filename = filename
self._debfile = apt_inst.DebFile(self.filename)
control = self._debfile.control.extractdata("control")
self._sections = apt_pkg.TagSection(control)
self.pkgname = self._sections["Package"]
self._check_was_run = False
def __getitem__(self, key):
return self._sections[key]
def __contains__(self, key):
return key in self._sections
@property
def filelist(self):
"""return the list of files in the deb."""
files = []
try:
self._debfile.data.go(lambda item, data: files.append(item.name))
except SystemError:
return [_("List of files for '%s' could not be read") %
self.filename]
return files
@property
def control_filelist(self):
""" return the list of files in control.tar.gt """
control = []
try:
self._debfile.control.go(
lambda item, data: control.append(item.name))
except SystemError:
return [_("List of control files for '%s' could not be read") %
self.filename]
return sorted(control)
# helper that will return a pkgname with a multiarch suffix if needed
def _maybe_append_multiarch_suffix(self, pkgname,
in_conflict_checking=False):
# trivial cases
if ":" in pkgname:
return pkgname
if not self._multiarch:
return pkgname
elif self._cache.is_virtual_package(pkgname):
return pkgname
elif (pkgname in self._cache and
self._cache[pkgname].candidate and
self._cache[pkgname].candidate.architecture == "all"):
return pkgname
# now do the real multiarch checking
multiarch_pkgname = "%s:%s" % (pkgname, self._multiarch)
# the upper layers will handle this
if multiarch_pkgname not in self._cache:
return multiarch_pkgname
# now check the multiarch state
cand = self._cache[multiarch_pkgname].candidate._cand
#print pkgname, multiarch_pkgname, cand.multi_arch
# the default is to add the suffix, unless its a pkg that can satify
# foreign dependencies
if cand.multi_arch & cand.MULTI_ARCH_FOREIGN:
return pkgname
# for conflicts we need a special case here, any not multiarch enabled
# package has a implicit conflict
if (in_conflict_checking and
not (cand.multi_arch & cand.MULTI_ARCH_SAME)):
return pkgname
return multiarch_pkgname
def _is_or_group_satisfied(self, or_group):
"""Return True if at least one dependency of the or-group is satisfied.
This method gets an 'or_group' and analyzes if at least one dependency
of this group is already satisfied.
"""
self._dbg(2, "_checkOrGroup(): %s " % (or_group))
for dep in or_group:
depname = dep[0]
ver = dep[1]
oper = dep[2]
# multiarch
depname = self._maybe_append_multiarch_suffix(depname)
# check for virtual pkgs
if depname not in self._cache:
if self._cache.is_virtual_package(depname):
self._dbg(
3, "_is_or_group_satisfied(): %s is virtual dep" %
depname)
for pkg in self._cache.get_providing_packages(depname):
if pkg.is_installed:
return True
continue
# check real dependency
inst = self._cache[depname].installed
if inst is not None and apt_pkg.check_dep(inst.version, oper, ver):
return True
# if no real dependency is installed, check if there is
# a package installed that provides this dependency
# (e.g. scrollkeeper dependecies are provided by rarian-compat)
# but only do that if there is no version required in the
# dependency (we do not supprot versionized dependencies)
if not oper:
for ppkg in self._cache.get_providing_packages(
depname, include_nonvirtual=True):
if ppkg.is_installed:
self._dbg(
3, "found installed '%s' that provides '%s'" % (
ppkg.name, depname))
return True
return False
def _satisfy_or_group(self, or_group):
"""Try to satisfy the or_group."""
for dep in or_group:
depname, ver, oper = dep
# multiarch
depname = self._maybe_append_multiarch_suffix(depname)
# if we don't have it in the cache, it may be virtual
if depname not in self._cache:
if not self._cache.is_virtual_package(depname):
continue
providers = self._cache.get_providing_packages(depname)
# if a package just has a single virtual provider, we
# just pick that (just like apt)
if len(providers) != 1:
continue
depname = providers[0].name
# now check if we can satisfy the deps with the candidate(s)
# in the cache
pkg = self._cache[depname]
cand = self._cache._depcache.get_candidate_ver(pkg._pkg)
if not cand:
continue
if not apt_pkg.check_dep(cand.ver_str, oper, ver):
continue
# check if we need to install it
self._dbg(2, "Need to get: %s" % depname)
self._need_pkgs.append(depname)
return True
# if we reach this point, we failed
or_str = ""
for dep in or_group:
or_str += dep[0]
if ver and oper:
or_str += " (%s %s)" % (dep[2], dep[1])
if dep != or_group[len(or_group) - 1]:
or_str += "|"
self._failure_string += _(
"Dependency is not satisfiable: %s\n") % or_str
return False
def _check_single_pkg_conflict(self, pkgname, ver, oper):
"""Return True if a pkg conflicts with a real installed/marked pkg."""
# FIXME: deal with conflicts against its own provides
# (e.g. Provides: ftp-server, Conflicts: ftp-server)
self._dbg(
3, "_check_single_pkg_conflict() pkg='%s' ver='%s' oper='%s'" % (
pkgname, ver, oper))
pkg = self._cache[pkgname]
if pkg.is_installed:
pkgver = pkg.installed.version
elif pkg.marked_install:
pkgver = pkg.candidate.version
else:
return False
#print "pkg: %s" % pkgname
#print "ver: %s" % ver
#print "pkgver: %s " % pkgver
#print "oper: %s " % oper
if (apt_pkg.check_dep(pkgver, oper, ver) and not
self.replaces_real_pkg(pkgname, oper, ver)):
self._failure_string += _("Conflicts with the installed package "
"'%s'") % pkg.name
self._dbg(3, "conflicts with installed pkg '%s'" % pkg.name)
return True
return False
def _check_conflicts_or_group(self, or_group):
"""Check the or-group for conflicts with installed pkgs."""
self._dbg(2, "_check_conflicts_or_group(): %s " % (or_group))
for dep in or_group:
depname = dep[0]
ver = dep[1]
oper = dep[2]
# FIXME: is this good enough? i.e. will apt always populate
# the cache with conflicting pkgnames for our arch?
depname = self._maybe_append_multiarch_suffix(
depname, in_conflict_checking=True)
# check conflicts with virtual pkgs
if depname not in self._cache:
# FIXME: we have to check for virtual replaces here as
# well (to pass tests/gdebi-test8.deb)
if self._cache.is_virtual_package(depname):
for pkg in self._cache.get_providing_packages(depname):
self._dbg(3, "conflicts virtual check: %s" % pkg.name)
# P/C/R on virtal pkg, e.g. ftpd
if self.pkgname == pkg.name:
self._dbg(3, "conflict on self, ignoring")
continue
if self._check_single_pkg_conflict(
pkg.name, ver, oper):
self._installed_conflicts.add(pkg.name)
continue
if self._check_single_pkg_conflict(depname, ver, oper):
self._installed_conflicts.add(depname)
return bool(self._installed_conflicts)
@property
def conflicts(self):
"""List of package names conflicting with this package."""
key = "Conflicts"
try:
return apt_pkg.parse_depends(self._sections[key], False)
except KeyError:
return []
@property
def depends(self):
"""List of package names on which this package depends on."""
depends = []
# find depends
for key in "Depends", "Pre-Depends":
try:
depends.extend(
apt_pkg.parse_depends(self._sections[key], False))
except KeyError:
pass
return depends
@property
def provides(self):
"""List of virtual packages which are provided by this package."""
key = "Provides"
try:
return apt_pkg.parse_depends(self._sections[key], False)
except KeyError:
return []
@property
def replaces(self):
"""List of packages which are replaced by this package."""
key = "Replaces"
try:
return apt_pkg.parse_depends(self._sections[key], False)
except KeyError:
return []
def replaces_real_pkg(self, pkgname, oper, ver):
"""Return True if a given non-virtual package is replaced.
Return True if the deb packages replaces a real (not virtual)
packages named (pkgname, oper, ver).
"""
self._dbg(3, "replaces_real_pkg() %s %s %s" % (pkgname, oper, ver))
pkg = self._cache[pkgname]
if pkg.is_installed:
pkgver = pkg.installed.version
elif pkg.marked_install:
pkgver = pkg.candidate.version
else:
pkgver = None
for or_group in self.replaces:
for (name, ver, oper) in or_group:
if (name == pkgname and apt_pkg.check_dep(pkgver, oper, ver)):
self._dbg(3, "we have a replaces in our package for the "
"conflict against '%s'" % (pkgname))
return True
return False
def check_conflicts(self):
"""Check if there are conflicts with existing or selected packages.
Check if the package conflicts with a existing or to be installed
package. Return True if the pkg is OK.
"""
res = True
for or_group in self.conflicts:
if self._check_conflicts_or_group(or_group):
#print "Conflicts with a exisiting pkg!"
#self._failure_string = "Conflicts with a exisiting pkg!"
res = False
return res
def check_breaks_existing_packages(self):
"""
check if installing the package would break exsisting
package on the system, e.g. system has:
smc depends on smc-data (= 1.4)
and user tries to installs smc-data 1.6
"""
# show progress information as this step may take some time
size = float(len(self._cache))
steps = max(int(size / 50), 1)
debver = self._sections["Version"]
debarch = self._sections["Architecture"]
# store what we provide so that we can later check against that
provides = [x[0][0] for x in self.provides]
for (i, pkg) in enumerate(self._cache):
if i % steps == 0:
self._cache.op_progress.update(float(i) / size * 100.0)
if not pkg.is_installed:
continue
# check if the exising dependencies are still satisfied
# with the package
ver = pkg._pkg.current_ver
for dep_or in pkg.installed.dependencies:
for dep in dep_or.or_dependencies:
if dep.name == self.pkgname:
if not apt_pkg.check_dep(
debver, dep.relation, dep.version):
self._dbg(2, "would break (depends) %s" % pkg.name)
# TRANSLATORS: the first '%s' is the package that
# breaks, the second the dependency that makes it
# break, the third the relation (e.g. >=) and the
# latest the version for the releation
self._failure_string += _(
"Breaks existing package '%(pkgname)s' "
"dependency %(depname)s "
"(%(deprelation)s %(depversion)s)") % {
'pkgname': pkg.name,
'depname': dep.name,
'deprelation': dep.relation,
'depversion': dep.version}
self._cache.op_progress.done()
return False
# now check if there are conflicts against this package on
# the existing system
if "Conflicts" in ver.depends_list:
for conflicts_ver_list in ver.depends_list["Conflicts"]:
for c_or in conflicts_ver_list:
if (c_or.target_pkg.name == self.pkgname and
c_or.target_pkg.architecture == debarch):
if apt_pkg.check_dep(
debver, c_or.comp_type, c_or.target_ver):
self._dbg(
2, "would break (conflicts) %s" % pkg.name)
# TRANSLATORS: the first '%s' is the package
# that conflicts, the second the packagename
# that it conflicts with (so the name of the
# deb the user tries to install), the third is
# the relation (e.g. >=) and the last is the
# version for the relation
self._failure_string += _(
"Breaks existing package '%(pkgname)s' "
"conflict: %(targetpkg)s "
"(%(comptype)s %(targetver)s)") % {
'pkgname': pkg.name,
'targetpkg': c_or.target_pkg.name,
'comptype': c_or.comp_type,
'targetver': c_or.target_ver}
self._cache.op_progress.done()
return False
if (c_or.target_pkg.name in provides and
self.pkgname != pkg.name):
self._dbg(
2, "would break (conflicts) %s" % provides)
self._failure_string += _(
"Breaks existing package '%(pkgname)s' "
"that conflict: '%(targetpkg)s'. But the "
"'%(debfile)s' provides it via: "
"'%(provides)s'") % {
'provides': ",".join(provides),
'debfile': self.filename,
'targetpkg': c_or.target_pkg.name,
'pkgname': pkg.name}
self._cache.op_progress.done()
return False
self._cache.op_progress.done()
return True
def compare_to_version_in_cache(self, use_installed=True):
"""Compare the package to the version available in the cache.
Checks if the package is already installed or availabe in the cache
and if so in what version, returns one of (VERSION_NONE,
VERSION_OUTDATED, VERSION_SAME, VERSION_NEWER).
"""
self._dbg(3, "compare_to_version_in_cache")
pkgname = self._sections["Package"]
architecture = self._sections["Architecture"]
# Architecture all gets mapped to the native architecture internally
if architecture == 'all':
architecture = apt_pkg.config.find("APT::Architecture")
# Arch qualify the package name
pkgname = ":".join([pkgname, architecture])
debver = self._sections["Version"]
self._dbg(1, "debver: %s" % debver)
if pkgname in self._cache:
if use_installed and self._cache[pkgname].installed:
cachever = self._cache[pkgname].installed.version
elif not use_installed and self._cache[pkgname].candidate:
cachever = self._cache[pkgname].candidate.version
else:
return self.VERSION_NONE
if cachever is not None:
cmp = apt_pkg.version_compare(cachever, debver)
self._dbg(1, "CompareVersion(debver,instver): %s" % cmp)
if cmp == 0:
return self.VERSION_SAME
elif cmp < 0:
return self.VERSION_NEWER
elif cmp > 0:
return self.VERSION_OUTDATED
return self.VERSION_NONE
def check(self):
"""Check if the package is installable."""
self._dbg(3, "check")
self._check_was_run = True
# check arch
if "Architecture" not in self._sections:
self._dbg(1, "ERROR: no architecture field")
self._failure_string = _("No Architecture field in the package")
return False
arch = self._sections["Architecture"]
if arch != "all" and arch != apt_pkg.config.find("APT::Architecture"):
if arch in apt_pkg.get_architectures():
self._multiarch = arch
self.pkgname = "%s:%s" % (self.pkgname, self._multiarch)
self._dbg(1, "Found multiarch arch: '%s'" % arch)
else:
self._dbg(1, "ERROR: Wrong architecture dude!")
self._failure_string = _("Wrong architecture '%s'") % arch
return False
# check version
if self.compare_to_version_in_cache() == self.VERSION_OUTDATED:
if self._cache[self.pkgname].installed:
# the deb is older than the installed
self._failure_string = _(
"A later version is already installed")
return False
# FIXME: this sort of error handling sux
self._failure_string = ""
# check conflicts
if not self.check_conflicts():
return False
# check if installing it would break anything on the
# current system
if not self.check_breaks_existing_packages():
return False
# try to satisfy the dependencies
if not self._satisfy_depends(self.depends):
return False
# check for conflicts again (this time with the packages that are
# makeed for install)
if not self.check_conflicts():
return False
if self._cache._depcache.broken_count > 0:
self._failure_string = _("Failed to satisfy all dependencies "
"(broken cache)")
# clean the cache again
self._cache.clear()
return False
return True
def satisfy_depends_str(self, dependsstr):
"""Satisfy the dependencies in the given string."""
return self._satisfy_depends(apt_pkg.parse_depends(dependsstr, False))
def _satisfy_depends(self, depends):
"""Satisfy the dependencies."""
# turn off MarkAndSweep via a action group (if available)
try:
_actiongroup = apt_pkg.ActionGroup(self._cache._depcache)
_actiongroup # pyflakes
except AttributeError:
pass
# check depends
for or_group in depends:
if not self._is_or_group_satisfied(or_group):
if not self._satisfy_or_group(or_group):
return False
# now try it out in the cache
for pkg in self._need_pkgs:
try:
self._cache[pkg].mark_install(from_user=False)
except SystemError:
self._failure_string = _("Cannot install '%s'") % pkg
self._cache.clear()
return False
return True
@property
def missing_deps(self):
"""Return missing dependencies."""
self._dbg(1, "Installing: %s" % self._need_pkgs)
if not self._check_was_run:
raise AttributeError(
"property only available after check() was run")
return self._need_pkgs
@property
def required_changes(self):
"""Get the changes required to satisfy the dependencies.
Returns: a tuple with (install, remove, unauthenticated)
"""
install = []
remove = []
unauthenticated = []
if not self._check_was_run:
raise AttributeError(
"property only available after check() was run")
for pkg in self._cache:
if pkg.marked_install or pkg.marked_upgrade:
install.append(pkg.name)
# check authentication, one authenticated origin is enough
# libapt will skip non-authenticated origins then
authenticated = False
for origin in pkg.candidate.origins:
authenticated |= origin.trusted
if not authenticated:
unauthenticated.append(pkg.name)
if pkg.marked_delete:
remove.append(pkg.name)
return (install, remove, unauthenticated)
@staticmethod
def to_hex(in_data):
hex = ""
for (i, c) in enumerate(in_data):
if i % 80 == 0:
hex += "\n"
hex += "%2.2x " % ord(c)
return hex
@staticmethod
def to_strish(in_data):
s = ""
# py2 compat, in_data is type string
if type(in_data) == str:
for c in in_data:
if ord(c) < 10 or ord(c) > 127:
s += " "
else:
s += c
# py3 compat, in_data is type bytes
else:
for b in in_data:
if b < 10 or b > 127:
s += " "
else:
s += chr(b)
return s
def _get_content(self, part, name, auto_decompress=True, auto_hex=True):
if name.startswith("./"):
name = name[2:]
data = part.extractdata(name)
# check for zip content
if name.endswith(".gz") and auto_decompress:
io = StringIO(data)
gz = gzip.GzipFile(fileobj=io)
data = _("Automatically decompressed:\n\n")
data += gz.read()
# auto-convert to hex
try:
data = data.decode("utf-8")
except Exception:
new_data = _("Automatically converted to printable ascii:\n")
new_data += self.to_strish(data)
return new_data
return data
def control_content(self, name):
""" return the content of a specific control.tar.gz file """
try:
return self._get_content(self._debfile.control, name)
except LookupError:
return ""
def data_content(self, name):
""" return the content of a specific control.tar.gz file """
try:
return self._get_content(self._debfile.data, name)
except LookupError:
return ""
def _dbg(self, level, msg):
"""Write debugging output to sys.stderr."""
if level <= self.debug:
print(msg, file=sys.stderr)
def install(self, install_progress=None):
"""Install the package."""
if install_progress is None:
return os.spawnlp(os.P_WAIT, "dpkg", "dpkg", "-i", self.filename)
else:
try:
install_progress.start_update()
except AttributeError:
install_progress.startUpdate()
res = install_progress.run(self.filename)
try:
install_progress.finish_update()
except AttributeError:
install_progress.finishUpdate()
return res
class DscSrcPackage(DebPackage):
"""A locally available source package."""
def __init__(self, filename=None, cache=None):
DebPackage.__init__(self, None, cache)
self.filename = filename
self._depends = []
self._conflicts = []
self._installed_conflicts = set()
self.pkgname = ""
self.binaries = []
if self.filename is not None:
self.open(self.filename)
@property
def depends(self):
"""Return the dependencies of the package"""
return self._depends
@property
def conflicts(self):
"""Return the dependencies of the package"""
return self._conflicts
@property
def filelist(self):
"""Return the list of files associated with this dsc file"""
# Files stanza looks like (hash, size, filename, ...)
return self._sections['Files'].split()[2::3]
def open(self, file):
"""Open the package."""
depends_tags = ["Build-Depends", "Build-Depends-Indep"]
conflicts_tags = ["Build-Conflicts", "Build-Conflicts-Indep"]
fobj = open(file)
tagfile = apt_pkg.TagFile(fobj)
try:
for sec in tagfile:
# we only care about the stanza with the "Format:" tag, the
# rest is gpg signature noise. we should probably have
# bindings for apts OpenMaybeClearsignedFile()
if "Format" not in sec:
continue
for tag in depends_tags:
if tag not in sec:
continue
self._depends.extend(apt_pkg.parse_src_depends(sec[tag]))
for tag in conflicts_tags:
if tag not in sec:
continue
self._conflicts.extend(apt_pkg.parse_src_depends(sec[tag]))
if 'Source' in sec:
self.pkgname = sec['Source']
if 'Binary' in sec:
self.binaries = [b.strip() for b in
sec['Binary'].split(',')]
for tag in sec.keys():
if tag in sec:
self._sections[tag] = sec[tag]
finally:
del tagfile
fobj.close()
s = _("Install Build-Dependencies for "
"source package '%s' that builds %s\n") % (self.pkgname,
" ".join(self.binaries))
self._sections["Description"] = s
self._check_was_run = False
def check(self):
"""Check if the package is installable.."""
if not self.check_conflicts():
for pkgname in self._installed_conflicts:
if self._cache[pkgname]._pkg.essential:
raise Exception(_("An essential package would be removed"))
self._cache[pkgname].mark_delete()
# properties are ok now
self._check_was_run = True
# FIXME: a additional run of the check_conflicts()
# after _satisfy_depends() should probably be done
return self._satisfy_depends(self.depends)
def _test():
"""Test function"""
from apt.cache import Cache
from apt.progress.base import InstallProgress
cache = Cache()
vp = "www-browser"
print("%s virtual: %s" % (vp, cache.is_virtual_package(vp)))
providers = cache.get_providing_packages(vp)
print("Providers for %s :" % vp)
for pkg in providers:
print(" %s" % pkg.name)
d = DebPackage(sys.argv[1], cache)
print("Deb: %s" % d.pkgname)
if not d.check():
print("can't be satified")
print(d._failure_string)
print("missing deps: %s" % d.missing_deps)
print(d.required_changes)
print(d.filelist)
print("Installing ...")
ret = d.install(InstallProgress())
print(ret)
#s = DscSrcPackage(cache, "../tests/3ddesktop_0.2.9-6.dsc")
#s.check_dep()
#print "Missing deps: ",s.missingDeps
#print "Print required changes: ", s.requiredChanges
s = DscSrcPackage(cache=cache)
d = "libc6 (>= 2.3.2), libaio (>= 0.3.96) | libaio1 (>= 0.3.96)"
print(s._satisfy_depends(apt_pkg.parse_depends(d, False)))
if __name__ == "__main__":
_test()
| |
import pprint
import pickle
import socket
import exceptions
import time
import uuid
import numpy as np
import threading
import os
import sys
import cStringIO
import io
import base64
import traceback
import gc
import json
import obspy
import datetime
from obspy.core import read,UTCDateTime,Stream,Trace
import multiprocessing
import copy
from subprocess import *
from time import gmtime, strftime
from array import array
#from prov.model import ProvDocument, Namespace, Literal, PROV, Identifier
def sampleWorker(input,pip,queue):
pip[0].input=copy.deepcopy(input)
exec("output=pip[0].process()")
pip[1].input=copy.deepcopy(output)
exec("output=pip[1].process()")
queue.put([output,[output["metadata"]]])
queue.close()
def commandChain(commands,envhpc,queue=None):
for cmd in commands:
print cmd
process = Popen(cmd, stdout=PIPE,stderr=PIPE,env=envhpc,shell=True)
stdoutdata, stderrdata=process.communicate()
if queue!=None:
queue.put([stdoutdata,stderrdata])
queue.close()
else:
return stdoutdata, stderrdata
def launchPipelines(inputs,pipelines):
output = None
bulkprov= []
jobs =[]
queues = []
result_queue = multiprocessing.Queue()
#pool = multiprocessing.Pool(processes=8)
for pip in pipelines:
p = multiprocessing.Process(target=pipelineWorker, args=(inputs.pop(0),pip,result_queue))
# pool.apply_async(pipelineWorker,(inputs.pop(0),pip,result_queue))
jobs.append(p)
# queues.append(result_queue)
print p
for j in jobs:
j.start()
results=[]
for pip in pipelines:
results.append(result_queue.get())
for j in jobs:
j.join()
return results
def pipelineWorker(input,PEs,queue):
output = None
data=input
bulkprov= []
for pe in PEs:
pe.input=data
output=pe.process()
bulkprov.append(output["metadata"])
data=output
queue.put([output,bulkprov])
queue.close()
def num(s):
try:
return int(s)
except exceptions.ValueError:
return float(s)
class SeismoPreprocessingActivity(object):
"""
Initiates class' properties by reading them from the hashmap map previously populated by the workflow engine
"""
def __init__ (self,name="SeismoActivity",input=None,vercejson=None,params=None,stdoutredirect=True,caller=None,provon=True,iterationIndex=None,runId=None, username=None,iterationId=None, instanceId=None,mapping="simple"):
try:
self.mapping=mapping
self.iterationIndex=0
self.name=name
self.metadata={};
self.w3c_prov={};
self._={};
self.annotations={};
self.stateful=False;
self.inMetaStreams=None
self.provon=provon;
if provon!=False:
if iterationId is None:
self.iterationId = name+self.__getUniqueId()
if instanceId is None:
self.instanceId = 'Invoker-instance-'+socket.gethostname()
#self.iterationIndex=iterationIndex
self.runId=vercejson["runId"]
self.username=vercejson["username"]
self.outputstreams=list();
self.outputstreamsbulk=list();
self.output=list()
self.streams=list();
self.derivationIds=list()
self.streamItemsLocations={}
self.streamItemsFormat={}
self.streamItemsMeta={}
self.streamItemsControl={}
self.streamItemsAnnotations={}
self.streamItemsPorts={}
self.caller=caller;
' list of joinable subprocess '
self.processes=[]
self.error="";
if name!=None:
self.name=name;
"""
Read the system-parameters JSON
"""
if vercejson!=None:
self.verce=vercejson
elif stdoutredirect and len(sys.argv) >= 2:
self.verce=json.loads(sys.argv[1])
else:
self.verce={}
"""
Reads the user-parameters JSON
"""
if params!=None:
self.parameters=params;
elif stdoutredirect and len(sys.argv) >= 3:
self.parameters=json.loads(sys.argv[2])
else:
self.parameters={}
"""
Read the input JSON
"""
if stdoutredirect:
try:
self.input = json.loads(sys.stdin.readline().strip())
except Exception, err:
self.input = input;
else:
self.input = input;
self.stdoutredirect=stdoutredirect;
self.outputdest=self.verce["outputdest"]
self.rootpath=self.verce["inputrootpath"];
self.outputid=self.verce["outputid"];
if self.rootpath==None:
self.rootpath="None"
if self.outputdest==None:
self.outputdest="None"
except Exception, err:
self.error="";
self.error+= self.name+" Initialisation Error: "+traceback.format_exc();
# sys.stderr.write('ERROR: '+ self.name+' Initialisation Error: %s\n' % str(err))
traceback.print_exc(file=sys.stderr)
def __toW3Cprov_api(self):
g = ProvDocument()
ve = Namespace('ve', "http://verce.eu")
g.add_namespace("ve", "http://verce.eu")
g.agent('_ag:'+self.username, {'prov:type': PROV["Person"]})
g.activity('_ac:'+self.iterationId, startTime=str(self.startTime), endTime=str(self.endTime), other_attributes={
've:iterationIndex':self.iterationIndex,
've:instanceId':self.instanceId,
've:stateful':self.stateful,
've:site':socket.gethostname(),
've:errors':self.error[:500],
've:pid':'%s' % (os.getpid()),
've:parameters':str(self.parameters),
've:name':self.name,
've:type':'lineage'}
)
for x in self.metadata["streams"]:
i=0
print x
g.entity('_e:'+x["id"],other_attributes={
"ve:annotations": str(x["annotations"]),
"ve:location": str(x["location"]),
"ve:format": str(x["format"])}
)
g.wasGeneratedBy("_:wgb-"+self.iterationId+str(i),'_e:'+x["id"],self.endTime)
i=i+1
return json.loads(g.serialize())
def __toW3Cprov(self):
W3CDic={}
W3CDic.update({"prefix": {
"ve": "http://verce.eu",
"prov":"http://w3c-prov.org"
}
})
W3CDic.update({"agent":{self.username:{}}})
W3CDic.update({"activity":{self.iterationId:{
've:iterationIndex':self.iterationIndex,
've:instanceId':self.instanceId,
've:annotations':self.dicToKeyVal(self.annotations,True),
've:stateful':self.stateful,
've:site':socket.gethostname(),
've:parameters':self.dicToKeyVal(self.parameters),
've:errors':self.error[:500],
've:pid':'%s' % (os.getpid()),
've:name':self.name,
'prov:startTime':str(self.startTime),
'prov:endTime':str(self.endTime),
've:type':'lineage'}
}})
W3CDic.update({"entity":{}})
W3CDic.update({"wasGeneratedBy":{}})
i=0
for x in self.metadata["streams"]:
i=0
W3CDic["entity"].update({x["id"]:{"ve:content":x["content"]}})
W3CDic["entity"][x["id"]].update({
"ve:annotations": x["annotations"],
"ve:location": x["location"],
"ve:format": x["format"]}
)
W3CDic["wasGeneratedBy"].update({"_:wgb-"+self.iterationId+x["id"]:{"prov:entity":x["id"],
"prov:activity": self.iterationId}})
W3CDic.update({"wasDerivedFrom":{}})
i=0
for d in self.derivationIds:
W3CDic["wasDerivedFrom"].update({"_:wdf-"+self.iterationId :{"prov:entity":x["id"],
"prov:activity": self.iterationId}})
W3CDic.update({"wasAssociatedWith":{}})
return W3CDic
def __getUniqueId(self):
return socket.gethostname()+"-"+str(os.getpid())+"-"+str(uuid.uuid1())
"""
Imports Input metadata if available, the metadata will be available in the self.inMetaStreams property as a Dictiinary"
"""
def __importInputMetadata(self):
try:
self.inMetaStreams=self.input["metadata"]["streams"];
except Exception,err:
None
"""
TBD: Produces a bulk output with data,location,format,metadata: to be used in exclusion of
self.streamItemsLocations
self.streamItemsFormat
self.outputstreams
"""
def addOutput(self,data, location="", format="", metadata={},control={}):
self.outputstreams.append(data)
self.streamItemsLocations[str(id(data))]=location
self.streamItemsFormat[str(id(data))]=format
self.streamItemsMeta[str(id(data))]=metadata
self.streamItemsControl[str(id(data))]=control
"""
Reads and formats the steream's metadata
"""
def __getMetadataWrapper(self):
try:
if (len(self.outputstreams)==0):
self.outputstreams=self.streams
if self.provon!=False:
self.metadata.update({"streams":self.getMetadata()})
except Exception, err:
streamlist=list()
streamItem={}
streammeta=list()
streamItem.update({"content": streammeta})
streamItem.update({"id":self.__getUniqueId()});
streamlist.append(streamItem)
self.metadata.update({"streams":streamlist});
self.error+=self.name+" Metadata extraction Error: "+str(err);
# sys.stderr.write('ERROR: '+ self.name+' Metadata extraction Error: %s\n' % str(err))
# traceback.print_exc(file=sys.stderr)
def extractItemMetadata(self,st):
try:
streammeta=list()
for tr in st:
metadic={}
metadic.update({"id":str(uuid.uuid1())});
for attr, value in tr.stats.__dict__.iteritems():
if attr=="mseed":
mseed={}
for a,v in value.__dict__.iteritems():
try:
if type(v)==obspy.core.utcdatetime.UTCDateTime:
mseed.update({a:str(v)});
else:
mseed.update({a:float(v)});
except Exception,e:
mseed.update({a:str(v)});
metadic.update({"mseed":mseed});
else:
try:
if type(value)==obspy.core.utcdatetime.UTCDateTime:
metadic.update({attr:str(value)});
else:
metadic.update({attr:float(value)});
except Exception,e:
metadic.update({attr:str(value)});
streammeta.append(metadic);
except Exception, err:
streammeta=str(st);
return streammeta
def getMetadata(self):
streamlist=list()
i=0
for st in self.outputstreams:
streamItem={}
streammeta={}
# try:
# if len(st)>0:
try:
if (len(self.streamItemsMeta[str(id(st))].keys())!=0):
streammeta=self.streamItemsMeta[str(id(st))]
else:
streammeta=self.extractItemMetadata(st);
except Exception, err:
traceback.print_exc(file=sys.stderr)
streammeta=self.extractItemMetadata(st);
if type(streammeta) != list:
streamItem.update({"content": [streammeta]})
else:
streamItem.update({"content": streammeta})
# except Exception, err:
# streammeta=str(st)
streamItem.update({"id":self.__getUniqueId()});
streamItem.update({"format":""})
streamItem.update({"location":""})
streamItem.update({"annotations":[]})
if (len(self.streamItemsPorts)!=0):
streamItem.update({"port": self.streamItemsPorts.pop(0)})
if (self.streamItemsControl!={}):
streamItem.update(self.streamItemsControl[str(id(st))])
if (self.streamItemsLocations!={}):
streamItem.update({"location": self.streamItemsLocations[str(id(st))]})
if (self.streamItemsFormat!={}):
streamItem.update({"format": self.streamItemsFormat[str(id(st))]})
if (self.streamItemsAnnotations!={}):
streamItem.update({"annotations": self.dicToKeyVal(self.streamItemsAnnotations[str(id(st))],True)})
streamlist.append(streamItem)
return streamlist
def buildDerivation(self,data):
try:
derivation={'DerivedFromDatasetID':data['id'],'TriggeredByProcessIterationID':data['TriggeredByProcessIterationID']}
self.derivationIds.append(derivation)
except Exception, err:
if self.provon!=False:
self.error+= "Build Derivation Error:"+str(err);
"""
Read the input data from an base64 encoded seed stream, if fails passes the data to the user that will take care of decoding accordingly to the expected input
"""
def importInputData(self):
try:
i=0
self.streams=list();
while (self.input["streams"]):
streamItem=self.input["streams"].pop(0);
data=None
if "data" not in streamItem:
continue
try:
if self.stdoutredirect==True:
data=read(cStringIO.StringIO(base64.b64decode(streamItem["data"])))
else:
data=pickle.loads(streamItem["data"])
except Exception, err:
data=streamItem["data"];
finally:
self.streams.append(data)
if self.provon!=False:
self.buildDerivation(streamItem)
except Exception, err:
self.output="";
self.error+= "Reading Input Error:"+str(err);
# sys.stderr.write('ERROR: '+ self.name+' Reading Input Error: %s\n' % str(err))
traceback.print_exc(file=sys.stderr)
"""
Performs the actual analysis
"""
def __computewrapper(self):
try:
if len(self.streams)==1:
self.st=self.streams[0];
self.startTime=datetime.datetime.utcnow()
try:
self.compute()
'checks if there are subprocesses still running and joins them'
except Exception, err:
traceback.print_exc(file=sys.stderr)
self.error+=" Compute Error: %s\n" % traceback.format_exc()
self.endTime=datetime.datetime.utcnow()
except Exception, err:
self.error+=" Compute Wrapper Error: %s\n" % traceback.format_exc()
# sys.stderr.write(self.name+" Compute Wrapper Error: %s\n" % str(err))
traceback.print_exc(file=sys.stderr)
finally:
self.__getMetadataWrapper()
def compute(self):
try:
output=None;
except Exception, err:
self.error+=self.name+" Computation Failed: "+str(err);
sys.stderr.write(self.name+" Computation Failed: %s\n" % str(err))
return output
"""
Controls all the different phases of the execution of the script
"""
def process (self):
try:
self.iterationIndex += 1
self.importInputData()
self.__importInputMetadata()
self.__computewrapper()
self.__writeOutputwrapper();
return self.packageAll();
except Exception, err:
self.__getMetadataWrapper()
output={"class":"eu.admire.seismo.metadata.Verce","streams":[{"data":None}],"metadata":self.metadata,"error":self.error,"pid":"%s" % (os.getpid(),)}
# sys.stderr.write(self.name+" pid= %s" % (os.getpid(),)+" : metadata: "+json.dumps(self.metadata)+"\n")
if self.stdoutredirect==True:
sys.stdout.write(json.dumps(output)+"\n");
sys.stdout.flush();
sys.stdout.close();
return self.packageAll();
"""
Writes the output results in memory
"""
def __writeOutputwrapper(self):
try:
self.writeOutput()
except Exception, err:
self.error+=self.name+" Writing output Error: "+str(err);
sys.stderr.write('ERROR: '+self.name+' Writing output Error: '+str(err))
# self.map.put("output","");
traceback.print_exc(file=sys.stderr)
raise
def writeOutput(self):
try:
self.memory_file = cStringIO.StringIO();
# if (self.st!=None):
# for tr in self.st:
# tr.data=np.int32(tr.data);
i=0
for st in self.outputstreams:
self.streamtransfer={}
if self.provon!=False:
self.streamtransfer={"data":pickle.dumps(st),"id":self.metadata["streams"][i]["id"],"TriggeredByProcessIterationID":self.iterationId}
else:
self.streamtransfer={"data":pickle.dumps(st)}
self.output.append(self.streamtransfer);
i=i+1
except Exception, err:
self.error+=self.name+" Writing output Error: "+str(err);
sys.stderr.write('ERROR: '+self.name+' Writing output Error: '+str(err))
# self.map.put("output","");
traceback.print_exc(file=sys.stderr)
raise
def dicToKeyVal(self,dict,valueToString=False):
try:
alist=list()
for k, v in dict.iteritems():
adic={}
adic.update({"key":str(k)})
if valueToString:
adic.update({"val":str(v)})
else:
try:
v =num(v)
adic.update({"val":v})
except Exception,e:
adic.update({"val":str(v)})
alist.append(adic)
return alist
except Exception, err:
self.error+=self.name+" dicToKeyVal output Error: "+str(err);
sys.stderr.write('ERROR: '+self.name+' dicToKeyVal output Error: '+str(err))
# self.map.put("output","");
traceback.print_exc(file=sys.stderr)
"""
Packages everything into the JSON. Its content will be handed over to the workflow engine after the script execution terminates.
"""
def packageAll(self):
try:
if self.provon!=False:
self.metadata.update({'actedOnBehalfOf': self.name})
self.metadata.update({'_id':self.iterationId})
self.metadata.update({'worker': socket.gethostname()})
self.metadata.update({'iterationIndex':self.iterationIndex})
self.metadata.update({'iterationId':self.iterationId})
self.metadata.update({'instanceId':self.name+"-Instance-"+socket.gethostname()+self.__getUniqueId()})
self.metadata.update({'annotations':self.dicToKeyVal(self.annotations,True)})
self.metadata.update({'stateful':self.stateful})
self.metadata.update({'site':str("")})
self.metadata.update({'parameters':self.dicToKeyVal(self.parameters)})
self.metadata.update({'errors':self.error[:500]})
self.metadata.update({'pid':'%s' % (os.getpid())})
self.metadata.update({'derivationIds':self.derivationIds})
self.metadata.update({'name':self.name})
self.metadata.update({'runId':self.runId})
self.metadata.update({'username':self.username})
self.metadata.update({'startTime':str(self.startTime)})
self.metadata.update({'endTime':str(self.endTime)})
self.metadata.update({'mapping': self.mapping})
self.metadata.update({'type':'lineage'})
#if self.provon=="W3C":
# self.w3c_prov=self.__toW3Cprov_api()
output={"class":"eu.admire.seismo.metadata.Verce","streams": self.output,"metadata":self.metadata,"error":self.error[:500],"pid":"%s" % (os.getpid(),)}
if self.stdoutredirect==True:
sys.stdout.write(json.dumps(output));
sys.stdout.flush();
sys.stdout.close();
return output;
except Exception, err:
self.error+=" Packaging Error: "+str(err)+"\n" ;
# sys.stderr.write('ERROR: '+self.name+' Packaging output Error: '+str(err))
# traceback.print_exc(file=sys.stderr)
raise
def joinChains(self):
# print "NUM process a="+str(len(self.processes))
# print "joining...."
if len(self.processes)>0:
for p,q in self.processes:
stdoutdata, stderrdata = q.get()
# print "READ process ="+str(p)
self.error+=stderrdata
self.processes=[]
# print "NUM process b="+str(len(self.processes))
def launchParallelCommandsChain(self,arguments):
queue = multiprocessing.Queue()
arguments.append(queue)
p = multiprocessing.Process(target=commandChain, args=tuple(arguments))
self.processes.append((p,queue))
p.start()
print p
#
| |
# Copyright 2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test AsyncIOMotorCursor."""
import asyncio
import sys
import unittest
from functools import partial
from unittest import SkipTest
import greenlet
from pymongo.errors import InvalidOperation, ExecutionTimeout
from pymongo.errors import OperationFailure
from motor import motor_asyncio
from test import one, env
from test.asyncio_tests import (asyncio_test, AsyncIOTestCase,
server_is_mongos, at_least, get_command_line)
def setup_package():
"""Run once by MotorTestCase before any tests.
If 'warn', let Tornado log warnings.
"""
env.setup()
class TestAsyncIOCursor(AsyncIOTestCase):
def setUp(self):
# TODO: fix this
if not env.initialized:
env.setup()
super().setUp()
def test_cursor(self):
cursor = self.collection.find()
self.assertTrue(isinstance(cursor, motor_asyncio.AsyncIOMotorCursor))
self.assertFalse(cursor.started, "Cursor shouldn't start immediately")
@asyncio_test
def test_count(self):
yield from self.make_test_data()
coll = self.collection
self.assertEqual(200, (yield from coll.find().count()))
self.assertEqual(
100,
(yield from coll.find({'_id': {'$gt': 99}}).count()))
where = 'this._id % 2 == 0 && this._id >= 50'
self.assertEqual(75, (yield from coll.find().where(where).count()))
@asyncio_test
def test_fetch_next(self):
yield from self.make_test_data()
coll = self.collection
# 200 results, only including _id field, sorted by _id.
cursor = coll.find({}, {'_id': 1}).sort('_id').batch_size(75)
self.assertEqual(None, cursor.cursor_id)
self.assertEqual(None, cursor.next_object()) # Haven't fetched yet.
i = 0
while (yield from cursor.fetch_next):
self.assertEqual({'_id': i}, cursor.next_object())
i += 1
# With batch_size 75 and 200 results, cursor should be exhausted on
# the server by third fetch.
if i <= 150:
self.assertNotEqual(0, cursor.cursor_id)
else:
self.assertEqual(0, cursor.cursor_id)
self.assertEqual(False, (yield from cursor.fetch_next))
self.assertEqual(None, cursor.next_object())
self.assertEqual(0, cursor.cursor_id)
self.assertEqual(200, i)
@asyncio_test
def test_fetch_next_delete(self):
coll = self.collection
yield from coll.insert({})
# Decref'ing the cursor eventually closes it on the server.
cursor = coll.find()
yield from cursor.fetch_next
cursor_id = cursor.cursor_id
retrieved = cursor.delegate._Cursor__retrieved
del cursor
yield from self.wait_for_cursor(coll, cursor_id, retrieved)
@asyncio_test
def test_fetch_next_without_results(self):
coll = self.collection
# Nothing matches this query.
cursor = coll.find({'foo': 'bar'})
self.assertEqual(None, cursor.next_object())
self.assertEqual(False, (yield from cursor.fetch_next))
self.assertEqual(None, cursor.next_object())
# Now cursor knows it's exhausted.
self.assertEqual(0, cursor.cursor_id)
@asyncio_test
def test_fetch_next_is_idempotent(self):
# Subsequent calls to fetch_next don't do anything
yield from self.make_test_data()
coll = self.collection
cursor = coll.find()
self.assertEqual(None, cursor.cursor_id)
yield from cursor.fetch_next
self.assertTrue(cursor.cursor_id)
self.assertEqual(101, cursor._buffer_size())
yield from cursor.fetch_next # Does nothing
self.assertEqual(101, cursor._buffer_size())
@asyncio_test
def test_fetch_next_exception(self):
coll = self.collection
cursor = coll.find()
cursor.delegate._Cursor__id = 1234 # Not valid on server.
with self.assertRaises(OperationFailure):
yield from cursor.fetch_next
# Avoid the cursor trying to close itself when it goes out of scope.
cursor.delegate._Cursor__id = None
@asyncio_test
def test_each(self):
yield from self.make_test_data()
cursor = self.collection.find({}, {'_id': 1}).sort('_id')
future = asyncio.Future(loop=self.loop)
results = []
def callback(result, error):
if error:
raise error
if result is not None:
results.append(result)
else:
# Done iterating.
future.set_result(True)
cursor.each(callback)
yield from future
expected = [{'_id': i} for i in range(200)]
self.assertEqual(expected, results)
@asyncio_test
def test_to_list_argument_checking(self):
# We need more than 10 documents so the cursor stays alive.
yield from self.make_test_data()
coll = self.collection
cursor = coll.find()
with self.assertRaises(ValueError):
yield from cursor.to_list(-1)
with self.assertRaises(TypeError):
yield from cursor.to_list('foo')
@asyncio_test
def test_to_list_with_length(self):
yield from self.make_test_data()
coll = self.collection
cursor = coll.find().sort('_id')
def expected(start, stop):
return [{'_id': i} for i in range(start, stop)]
self.assertEqual(expected(0, 10), (yield from cursor.to_list(10)))
self.assertEqual(expected(10, 100), (yield from cursor.to_list(90)))
# Test particularly rigorously around the 101-doc mark, since this is
# where the first batch ends
self.assertEqual(expected(100, 101), (yield from cursor.to_list(1)))
self.assertEqual(expected(101, 102), (yield from cursor.to_list(1)))
self.assertEqual(expected(102, 103), (yield from cursor.to_list(1)))
self.assertEqual([], (yield from cursor.to_list(0)))
self.assertEqual(expected(103, 105), (yield from cursor.to_list(2)))
# Only 95 docs left, make sure length=100 doesn't error or hang
self.assertEqual(expected(105, 200), (yield from cursor.to_list(100)))
self.assertEqual(0, cursor.cursor_id)
yield from cursor.close()
@asyncio_test
def test_to_list_with_length_of_none(self):
yield from self.make_test_data()
collection = self.collection
cursor = collection.find()
docs = yield from cursor.to_list(None) # Unlimited.
count = yield from collection.count()
self.assertEqual(count, len(docs))
@asyncio_test
def test_to_list_tailable(self):
coll = self.collection
cursor = coll.find(tailable=True)
# Can't call to_list on tailable cursor.
with self.assertRaises(InvalidOperation):
yield from cursor.to_list(10)
@asyncio_test
def test_limit_zero(self):
# Limit of 0 is a weird case that PyMongo handles specially, make sure
# Motor does too. cursor.limit(0) means "remove limit", but cursor[:0]
# or cursor[5:5] sets the cursor to "empty".
coll = self.collection
# make sure we do not have _id: 1
yield from coll.remove({'_id': 1})
yield from coll.insert({'_id': 1})
resp = yield from coll.find()[:0].fetch_next
self.assertEqual(False, resp)
resp = yield from coll.find()[5:5].fetch_next
self.assertEqual(False, resp)
resp = yield from coll.find()[:0].to_list(length=1000)
self.assertEqual([], resp)
resp = yield from coll.find()[5:5].to_list(length=1000)
self.assertEqual([], resp)
@asyncio_test
def test_cursor_explicit_close(self):
yield from self.make_test_data()
collection = self.collection
cursor = collection.find()
yield from cursor.fetch_next
self.assertTrue(cursor.alive)
yield from cursor.close()
# Cursor reports it's alive because it has buffered data, even though
# it's killed on the server
self.assertTrue(cursor.alive)
retrieved = cursor.delegate._Cursor__retrieved
yield from self.wait_for_cursor(collection, cursor.cursor_id,
retrieved)
@asyncio_test
def test_each_cancel(self):
yield from self.make_test_data()
loop = self.loop
collection = self.collection
results = []
future = asyncio.Future(loop=self.loop)
def cancel(result, error):
if error:
future.set_exception(error)
else:
results.append(result)
loop.call_soon(canceled)
return False # Cancel iteration.
def canceled():
try:
self.assertFalse(cursor.delegate._Cursor__killed)
self.assertTrue(cursor.alive)
# Resume iteration
cursor.each(each)
except Exception as e:
future.set_exception(e)
def each(result, error):
if error:
future.set_exception(error)
elif result:
pass
results.append(result)
else:
# Complete
future.set_result(None)
cursor = collection.find()
cursor.each(cancel)
yield from future
self.assertEqual((yield from collection.count()), len(results))
@asyncio_test
def test_each_close(self):
yield from self.make_test_data() # 200 documents.
loop = self.loop
collection = self.collection
results = []
future = asyncio.Future(loop=self.loop)
def callback(result, error):
if error:
future.set_exception(error)
else:
results.append(result)
if len(results) == 50:
# Prevent further calls.
cursor.close()
asyncio.Task(cursor.close(), loop=self.loop)
# Soon, finish this test. Leave a little time for further
# calls to ensure we've really canceled them by calling
# cursor.close().
loop.call_later(0.1, partial(future.set_result, None))
cursor = collection.find()
cursor.each(callback)
yield from future
self.assertGreater(150, len(results))
def test_cursor_slice_argument_checking(self):
collection = self.collection
for arg in '', None, {}, []:
self.assertRaises(TypeError, lambda: collection.find()[arg])
self.assertRaises(IndexError, lambda: collection.find()[-1])
@asyncio_test
def test_cursor_slice(self):
# This is an asynchronous copy of PyMongo's test_getitem_slice_index in
# test_cursor.py
yield from self.make_test_data()
coll = self.collection
self.assertRaises(IndexError, lambda: coll.find()[-1])
self.assertRaises(IndexError, lambda: coll.find()[1:2:2])
self.assertRaises(IndexError, lambda: coll.find()[2:1])
result = yield from coll.find()[0:].to_list(length=1000)
self.assertEqual(200, len(result))
result = yield from coll.find()[20:].to_list(length=1000)
self.assertEqual(180, len(result))
result = yield from coll.find()[99:].to_list(length=1000)
self.assertEqual(101, len(result))
result = yield from coll.find()[1000:].to_list(length=1000)
self.assertEqual(0, len(result))
result = yield from coll.find()[20:25].to_list(length=1000)
self.assertEqual(5, len(result))
# Any slice overrides all previous slices
result = yield from coll.find()[20:25][20:].to_list(length=1000)
self.assertEqual(180, len(result))
result = yield from coll.find()[20:25].limit(0).skip(20).to_list(
length=1000)
self.assertEqual(180, len(result))
result = yield from coll.find().limit(0).skip(20)[20:25].to_list(
length=1000)
self.assertEqual(5, len(result))
result = yield from coll.find()[:1].to_list(length=1000)
self.assertEqual(1, len(result))
result = yield from coll.find()[:5].to_list(length=1000)
self.assertEqual(5, len(result))
@asyncio_test(timeout=30)
def test_cursor_index(self):
yield from self.make_test_data()
coll = self.collection
cursor = coll.find().sort([('_id', 1)])[0]
yield from cursor.fetch_next
self.assertEqual({'_id': 0}, cursor.next_object())
self.assertEqual(
[{'_id': 5}],
(yield from coll.find().sort([('_id', 1)])[5].to_list(100)))
# Only 200 documents, so 1000th doc doesn't exist. PyMongo raises
# IndexError here, but Motor simply returns None.
cursor = coll.find()[1000]
self.assertFalse((yield from cursor.fetch_next))
self.assertEqual(None, cursor.next_object())
self.assertEqual([], (yield from coll.find()[1000].to_list(100)))
@asyncio_test
def test_cursor_index_each(self):
yield from self.make_test_data()
coll = self.collection
results = set()
futures = [asyncio.Future(loop=self.loop) for _ in range(3)]
def each(result, error):
if error:
raise error
if result:
results.add(result['_id'])
else:
futures.pop().set_result(None)
coll.find({}, {'_id': 1}).sort([('_id', 1)])[0].each(each)
coll.find({}, {'_id': 1}).sort([('_id', 1)])[5].each(each)
# Only 200 documents, so 1000th doc doesn't exist. PyMongo raises
# IndexError here, but Motor simply returns None, which won't show up
# in results.
coll.find()[1000].each(each)
yield from asyncio.gather(*futures, loop=self.loop)
self.assertEqual(set([0, 5]), results)
@asyncio_test
def test_rewind(self):
yield from self.collection.insert([{}, {}, {}])
cursor = self.collection.find().limit(2)
count = 0
while (yield from cursor.fetch_next):
cursor.next_object()
count += 1
self.assertEqual(2, count)
cursor.rewind()
count = 0
while (yield from cursor.fetch_next):
cursor.next_object()
count += 1
self.assertEqual(2, count)
cursor.rewind()
count = 0
while (yield from cursor.fetch_next):
cursor.next_object()
break
cursor.rewind()
while (yield from cursor.fetch_next):
cursor.next_object()
count += 1
self.assertEqual(2, count)
self.assertEqual(cursor, cursor.rewind())
@asyncio_test
def test_del_on_main_greenlet(self):
# Since __del__ can happen on any greenlet, cursor must be
# prepared to close itself correctly on main or a child.
yield from self.make_test_data()
collection = self.collection
cursor = collection.find()
yield from cursor.fetch_next
cursor_id = cursor.cursor_id
retrieved = cursor.delegate._Cursor__retrieved
del cursor
yield from self.wait_for_cursor(collection, cursor_id, retrieved)
@asyncio_test
def test_del_on_child_greenlet(self):
# Since __del__ can happen on any greenlet, cursor must be
# prepared to close itself correctly on main or a child.
yield from self.make_test_data()
collection = self.collection
cursor = [collection.find().batch_size(1)]
yield from cursor[0].fetch_next
cursor_id = cursor[0].cursor_id
retrieved = cursor[0].delegate._Cursor__retrieved
def f():
# Last ref, should trigger __del__ immediately in CPython and
# allow eventual __del__ in PyPy.
del cursor[0]
return
greenlet.greenlet(f).switch()
yield from self.wait_for_cursor(collection, cursor_id, retrieved)
@asyncio_test
def test_exhaust(self):
if (yield from server_is_mongos(self.cx)):
self.assertRaises(InvalidOperation,
self.db.test.find, exhaust=True)
return
self.assertRaises(TypeError, self.db.test.find, exhaust=5)
cur = self.db.test.find(exhaust=True)
self.assertRaises(InvalidOperation, cur.limit, 5)
cur = self.db.test.find(limit=5)
self.assertRaises(InvalidOperation, cur.add_option, 64)
cur = self.db.test.find()
cur.add_option(64)
self.assertRaises(InvalidOperation, cur.limit, 5)
yield from self.db.drop_collection("test")
# Insert enough documents to require more than one batch.
yield from self.db.test.insert([{} for _ in range(150)])
client = self.asyncio_client(max_pool_size=1)
# Ensure a pool.
yield from client.db.collection.find_one()
socks = client._get_primary_pool().sockets
# Make sure the socket is returned after exhaustion.
cur = client[self.db.name].test.find(exhaust=True)
has_next = yield from cur.fetch_next
self.assertTrue(has_next)
self.assertEqual(0, len(socks))
while (yield from cur.fetch_next):
cur.next_object()
self.assertEqual(1, len(socks))
# Same as previous but with to_list instead of next_object.
docs = yield from client[self.db.name].test.find(exhaust=True).to_list(
None)
self.assertEqual(1, len(socks))
self.assertEqual(
(yield from self.db.test.count()),
len(docs))
# If the Cursor instance is discarded before being
# completely iterated we have to close and
# discard the socket.
sock = one(socks)
cur = client[self.db.name].test.find(exhaust=True).batch_size(1)
has_next = yield from cur.fetch_next
self.assertTrue(has_next)
self.assertEqual(0, len(socks))
if 'PyPy' in sys.version:
# Don't wait for GC or use gc.collect(), it's unreliable.
cur.close()
cursor_id = cur.cursor_id
retrieved = cur.delegate._Cursor__retrieved
cur = None
yield from asyncio.sleep(0.1, loop=self.loop)
# The exhaust cursor's socket was discarded, although another may
# already have been opened to send OP_KILLCURSORS.
self.assertNotIn(sock, socks)
self.assertTrue(sock.closed)
yield from self.wait_for_cursor(self.collection, retrieved, cursor_id)
class MotorCursorMaxTimeMSTest(AsyncIOTestCase):
def setUp(self):
super(MotorCursorMaxTimeMSTest, self).setUp()
self.loop.run_until_complete(self.maybe_skip())
def tearDown(self):
self.io_loop.run_until_complete(self.disable_timeout())
super(MotorCursorMaxTimeMSTest, self).tearDown()
@asyncio.coroutine
def maybe_skip(self):
if not (yield from at_least(self.cx, (2, 5, 3, -1))):
raise SkipTest("maxTimeMS requires MongoDB >= 2.5.3")
cmd_line = yield from get_command_line(self.cx)
if "enableTestCommands=1" not in cmd_line:
raise SkipTest("testing maxTimeMS requires failpoints")
@asyncio.coroutine
def enable_timeout(self):
yield from self.cx.admin.command("configureFailPoint",
"maxTimeAlwaysTimeOut",
mode="alwaysOn")
@asyncio.coroutine
def disable_timeout(self):
self.cx.admin.command("configureFailPoint",
"maxTimeAlwaysTimeOut",
mode="off")
@asyncio_test
def test_max_time_ms_query(self):
# Cursor parses server timeout error in response to initial query.
yield from self.enable_timeout()
cursor = self.collection.find().max_time_ms(100000)
with self.assertRaises(ExecutionTimeout):
yield from cursor.fetch_next
cursor = self.collection.find().max_time_ms(100000)
with self.assertRaises(ExecutionTimeout):
yield from cursor.to_list(10)
with self.assertRaises(ExecutionTimeout):
yield from self.collection.find_one(max_time_ms=100000)
@asyncio_test(timeout=60)
def test_max_time_ms_getmore(self):
# Cursor handles server timeout during getmore, also.
yield from self.collection.insert({} for _ in range(200))
try:
# Send initial query.
cursor = self.collection.find().max_time_ms(100000)
yield from cursor.fetch_next
cursor.next_object()
# Test getmore timeout.
yield from self.enable_timeout()
with self.assertRaises(ExecutionTimeout):
while (yield from cursor.fetch_next):
cursor.next_object()
yield from cursor.close()
# Send another initial query.
yield from self.disable_timeout()
cursor = self.collection.find().max_time_ms(100000)
yield from cursor.fetch_next
cursor.next_object()
# Test getmore timeout.
yield from self.enable_timeout()
with self.assertRaises(ExecutionTimeout):
yield from cursor.to_list(None)
# Avoid 'IOLoop is closing' warning.
yield from cursor.close()
finally:
# Cleanup.
yield from self.disable_timeout()
yield from self.collection.remove()
@asyncio_test
def test_max_time_ms_each_query(self):
# Cursor.each() handles server timeout during initial query.
yield from self.enable_timeout()
cursor = self.collection.find().max_time_ms(100000)
future = asyncio.Future(loop=self.loop)
def callback(result, error):
if error:
future.set_exception(error)
elif not result:
# Done.
future.set_result(None)
with self.assertRaises(ExecutionTimeout):
cursor.each(callback)
yield from future
@asyncio_test(timeout=30)
def test_max_time_ms_each_getmore(self):
# Cursor.each() handles server timeout during getmore.
yield from self.collection.insert({} for _ in range(200))
try:
# Send initial query.
cursor = self.collection.find().max_time_ms(100000)
yield from cursor.fetch_next
cursor.next_object()
future = asyncio.Future(loop=self.loop)
def callback(result, error):
if error:
future.set_exception(error)
elif not result:
# Done.
future.set_result(None)
yield from self.enable_timeout()
with self.assertRaises(ExecutionTimeout):
cursor.each(callback)
yield from future
yield from cursor.close()
finally:
# Cleanup.
yield from self.disable_timeout()
yield from self.collection.remove()
if __name__ == '__main__':
unittest.main()
| |
import sys
import os
import time
import math
import torch
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from torch.autograd import Variable
import struct # get_image_size
import imghdr # get_image_size
def sigmoid(x):
return 1.0/(math.exp(-x)+1.)
def softmax(x):
x = torch.exp(x - torch.max(x))
x = x/x.sum()
return x
def bbox_iou(box1, box2, x1y1x2y2=True):
if x1y1x2y2:
mx = min(box1[0], box2[0])
Mx = max(box1[2], box2[2])
my = min(box1[1], box2[1])
My = max(box1[3], box2[3])
w1 = box1[2] - box1[0]
h1 = box1[3] - box1[1]
w2 = box2[2] - box2[0]
h2 = box2[3] - box2[1]
else:
mx = min(box1[0]-box1[2]/2.0, box2[0]-box2[2]/2.0)
Mx = max(box1[0]+box1[2]/2.0, box2[0]+box2[2]/2.0)
my = min(box1[1]-box1[3]/2.0, box2[1]-box2[3]/2.0)
My = max(box1[1]+box1[3]/2.0, box2[1]+box2[3]/2.0)
w1 = box1[2]
h1 = box1[3]
w2 = box2[2]
h2 = box2[3]
uw = Mx - mx
uh = My - my
cw = w1 + w2 - uw
ch = h1 + h2 - uh
carea = 0
if cw <= 0 or ch <= 0:
return 0.0
area1 = w1 * h1
area2 = w2 * h2
carea = cw * ch
uarea = area1 + area2 - carea
return carea/uarea
def bbox_ious(boxes1, boxes2, x1y1x2y2=True):
if x1y1x2y2:
mx = torch.min(boxes1[0], boxes2[0])
Mx = torch.max(boxes1[2], boxes2[2])
my = torch.min(boxes1[1], boxes2[1])
My = torch.max(boxes1[3], boxes2[3])
w1 = boxes1[2] - boxes1[0]
h1 = boxes1[3] - boxes1[1]
w2 = boxes2[2] - boxes2[0]
h2 = boxes2[3] - boxes2[1]
else:
mx = torch.min(boxes1[0]-boxes1[2]/2.0, boxes2[0]-boxes2[2]/2.0)
Mx = torch.max(boxes1[0]+boxes1[2]/2.0, boxes2[0]+boxes2[2]/2.0)
my = torch.min(boxes1[1]-boxes1[3]/2.0, boxes2[1]-boxes2[3]/2.0)
My = torch.max(boxes1[1]+boxes1[3]/2.0, boxes2[1]+boxes2[3]/2.0)
w1 = boxes1[2]
h1 = boxes1[3]
w2 = boxes2[2]
h2 = boxes2[3]
uw = Mx - mx
uh = My - my
cw = w1 + w2 - uw
ch = h1 + h2 - uh
mask = ((cw <= 0) + (ch <= 0) > 0)
area1 = w1 * h1
area2 = w2 * h2
carea = cw * ch
carea[mask] = 0
uarea = area1 + area2 - carea
return carea/uarea
def nms(boxes, nms_thresh):
if len(boxes) == 0:
return boxes
det_confs = torch.zeros(len(boxes))
for i in range(len(boxes)):
det_confs[i] = 1-boxes[i][4]
_,sortIds = torch.sort(det_confs)
out_boxes = []
for i in range(len(boxes)):
box_i = boxes[sortIds[i]]
if box_i[4] > 0:
out_boxes.append(box_i)
for j in range(i+1, len(boxes)):
box_j = boxes[sortIds[j]]
if bbox_iou(box_i, box_j, x1y1x2y2=False) > nms_thresh:
#print(box_i, box_j, bbox_iou(box_i, box_j, x1y1x2y2=False))
box_j[4] = 0
return out_boxes
def convert2cpu(gpu_matrix):
return torch.FloatTensor(gpu_matrix.size()).copy_(gpu_matrix)
def convert2cpu_long(gpu_matrix):
return torch.LongTensor(gpu_matrix.size()).copy_(gpu_matrix)
def get_region_boxes(output, conf_thresh, num_classes, anchors, num_anchors, only_objectness=1, validation=False):
anchor_step = len(anchors)/num_anchors
if output.dim() == 3:
output = output.unsqueeze(0)
batch = output.size(0)
assert(output.size(1) == (5+num_classes)*num_anchors)
h = output.size(2)
w = output.size(3)
t0 = time.time()
all_boxes = []
output = output.view(batch*num_anchors, 5+num_classes, h*w).transpose(0,1).contiguous().view(5+num_classes, batch*num_anchors*h*w)
grid_x = torch.linspace(0, w-1, w).repeat(h,1).repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).cuda()
grid_y = torch.linspace(0, h-1, h).repeat(w,1).t().repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).cuda()
xs = torch.sigmoid(output[0]) + grid_x
ys = torch.sigmoid(output[1]) + grid_y
anchor_w = torch.Tensor(anchors).view(num_anchors, anchor_step).index_select(1, torch.LongTensor([0]))
anchor_h = torch.Tensor(anchors).view(num_anchors, anchor_step).index_select(1, torch.LongTensor([1]))
anchor_w = anchor_w.repeat(batch, 1).repeat(1, 1, h*w).view(batch*num_anchors*h*w).cuda()
anchor_h = anchor_h.repeat(batch, 1).repeat(1, 1, h*w).view(batch*num_anchors*h*w).cuda()
ws = torch.exp(output[2]) * anchor_w
hs = torch.exp(output[3]) * anchor_h
det_confs = torch.sigmoid(output[4])
cls_confs = torch.nn.Softmax()(Variable(output[5:5+num_classes].transpose(0,1))).data
cls_max_confs, cls_max_ids = torch.max(cls_confs, 1)
cls_max_confs = cls_max_confs.view(-1)
cls_max_ids = cls_max_ids.view(-1)
t1 = time.time()
sz_hw = h*w
sz_hwa = sz_hw*num_anchors
det_confs = convert2cpu(det_confs)
cls_max_confs = convert2cpu(cls_max_confs)
cls_max_ids = convert2cpu_long(cls_max_ids)
xs = convert2cpu(xs)
ys = convert2cpu(ys)
ws = convert2cpu(ws)
hs = convert2cpu(hs)
if validation:
cls_confs = convert2cpu(cls_confs.view(-1, num_classes))
t2 = time.time()
for b in range(batch):
boxes = []
for cy in range(h):
for cx in range(w):
for i in range(num_anchors):
ind = b*sz_hwa + i*sz_hw + cy*w + cx
det_conf = det_confs[ind]
if only_objectness:
conf = det_confs[ind]
else:
conf = det_confs[ind] * cls_max_confs[ind]
if conf > conf_thresh:
bcx = xs[ind]
bcy = ys[ind]
bw = ws[ind]
bh = hs[ind]
cls_max_conf = cls_max_confs[ind]
cls_max_id = cls_max_ids[ind]
box = [bcx/w, bcy/h, bw/w, bh/h, det_conf, cls_max_conf, cls_max_id]
if (not only_objectness) and validation:
for c in range(num_classes):
tmp_conf = cls_confs[ind][c]
if c != cls_max_id and det_confs[ind]*tmp_conf > conf_thresh:
box.append(tmp_conf)
box.append(c)
boxes.append(box)
all_boxes.append(boxes)
t3 = time.time()
if False:
print('---------------------------------')
print('matrix computation : %f' % (t1-t0))
print(' gpu to cpu : %f' % (t2-t1))
print(' boxes filter : %f' % (t3-t2))
print('---------------------------------')
return all_boxes
def plot_boxes(img, boxes, savename, class_names=None):
colors = torch.FloatTensor([[1,0,1],[0,0,1],[0,1,1],[0,1,0],[1,1,0],[1,0,0]]);
def get_color(c, x, max_val):
ratio = float(x)/max_val * 5
i = int(math.floor(ratio))
j = int(math.ceil(ratio))
ratio = ratio - i
r = (1-ratio) * colors[i][c] + ratio*colors[j][c]
return int(r*255)
width = img.width
height = img.height
draw = ImageDraw.Draw(img)
for i in range(len(boxes)):
box = boxes[i]
x1 = (box[0] - box[2]/2.0) * width
y1 = (box[1] - box[3]/2.0) * height
x2 = (box[0] + box[2]/2.0) * width
y2 = (box[1] + box[3]/2.0) * height
rgb = (255, 0, 0)
if len(box) >= 7 and class_names:
cls_conf = box[5]
cls_id = box[6]
print('%s: %f' % (class_names[cls_id], cls_conf))
classes = len(class_names)
offset = cls_id * 123457 % classes
red = get_color(2, offset, classes)
green = get_color(1, offset, classes)
blue = get_color(0, offset, classes)
rgb = (red, green, blue)
draw.text((x1, y1), class_names[cls_id], fill=rgb)
draw.rectangle([x1, y1, x2, y2], outline = rgb)
img.save(savename)
def read_truths(lab_path):
if os.path.getsize(lab_path):
truths = np.loadtxt(lab_path)
truths = truths.reshape(truths.size/5, 5) # to avoid single truth problem
return truths
else:
return np.array([])
def read_truths_args(lab_path, min_box_scale):
truths = read_truths(lab_path)
new_truths = []
for i in range(truths.shape[0]):
if truths[i][3] < min_box_scale:
continue
new_truths.append([truths[i][0], truths[i][1], truths[i][2], truths[i][3], truths[i][4]])
return np.array(new_truths)
def load_class_names(namesfile):
class_names = []
with open(namesfile, 'r') as fp:
lines = fp.readlines()
for line in lines:
line = line.rstrip()
class_names.append(line)
return class_names
def image2torch(img):
width = img.width
height = img.height
img = torch.ByteTensor(torch.ByteStorage.from_buffer(img.tobytes()))
img = img.view(height, width, 3).transpose(0,1).transpose(0,2).contiguous()
img = img.view(1, 3, height, width)
img = img.float().div(255.0)
return img
def do_detect(model, img, conf_thresh, nms_thresh, use_cuda=1):
model.eval()
t0 = time.time()
if isinstance(img, Image.Image):
width = img.width
height = img.height
img = torch.ByteTensor(torch.ByteStorage.from_buffer(img.tobytes()))
img = img.view(height, width, 3).transpose(0,1).transpose(0,2).contiguous()
img = img.view(1, 3, height, width)
img = img.float().div(255.0)
t1 = time.time()
if use_cuda:
img = img.cuda()
img = torch.autograd.Variable(img)
t2 = time.time()
output = model(img)
output = output.data
#for j in range(100):
# sys.stdout.write('%f ' % (output.storage()[j]))
#print('')
t3 = time.time()
boxes = get_region_boxes(output, conf_thresh, model.num_classes, model.anchors, model.num_anchors)[0]
#for j in range(len(boxes)):
# print(boxes[j])
t4 = time.time()
boxes = nms(boxes, nms_thresh)
t5 = time.time()
if False:
print('-----------------------------------')
print(' image to tensor : %f' % (t1 - t0))
print(' tensor to cuda : %f' % (t2 - t1))
print(' predict : %f' % (t3 - t2))
print('get_region_boxes : %f' % (t4 - t3))
print(' nms : %f' % (t5 - t4))
print(' total : %f' % (t5 - t0))
print('-----------------------------------')
return boxes
def read_data_cfg(datacfg):
options = dict()
options['gpus'] = '0,1,2,3'
options['num_workers'] = '10'
with open(datacfg, 'r') as fp:
lines = fp.readlines()
for line in lines:
line = line.strip()
if line == '':
continue
key,value = line.split('=')
key = key.strip()
value = value.strip()
options[key] = value
return options
def file_lines(thefilepath):
count = 0
thefile = open(thefilepath, 'rb')
while True:
buffer = thefile.read(8192*1024)
if not buffer:
break
count += buffer.count('\n')
thefile.close( )
return count
def get_image_size(fname):
'''Determine the image type of fhandle and return its size.
from draco'''
with open(fname, 'rb') as fhandle:
head = fhandle.read(24)
if len(head) != 24:
return
if imghdr.what(fname) == 'png':
check = struct.unpack('>i', head[4:8])[0]
if check != 0x0d0a1a0a:
return
width, height = struct.unpack('>ii', head[16:24])
elif imghdr.what(fname) == 'gif':
width, height = struct.unpack('<HH', head[6:10])
elif imghdr.what(fname) == 'jpeg' or imghdr.what(fname) == 'jpg':
try:
fhandle.seek(0) # Read 0xff next
size = 2
ftype = 0
while not 0xc0 <= ftype <= 0xcf:
fhandle.seek(size, 1)
byte = fhandle.read(1)
while ord(byte) == 0xff:
byte = fhandle.read(1)
ftype = ord(byte)
size = struct.unpack('>H', fhandle.read(2))[0] - 2
# We are at a SOFn block
fhandle.seek(1, 1) # Skip `precision' byte.
height, width = struct.unpack('>HH', fhandle.read(4))
except Exception: #IGNORE:W0703
return
else:
return
return width, height
def logging(message):
print('%s %s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), message))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.